source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
PacketForwarderCollector.py
|
# LoRaWAN Security Framework - GenericMqttCollector
# Copyright (c) 2019 IOActive Inc. All rights reserved.
import sys,argparse, datetime, json, os, re, time,logging, threading, socket, traceback
import auditing.datacollectors.utils.PhyParser as phy_parser
from auditing.datacollectors.utils.PacketPersistence import save
if os.environ.get("ENVIRONMENT") == "DEV":
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
# This dict keeps track of the
gateways_location={}
def init_packet_writter_message():
packet_writter_message = dict()
packet_writter_message['packet'] = None
packet_writter_message['messages'] = list()
return packet_writter_message
class PacketForwarderCollector:
def __init__(self, data_collector_id, organization_id, port):
self.data_collector_id = data_collector_id
self.organization_id = organization_id
self.port = port
self.stop_thread=True
# The data sent to the MQTT queue, to be written by the packet writer. It must have at least one MQ message
self.packet_writter_message = init_packet_writter_message()
def connect(self):
self.stop_thread=False
# Launch listener() in a thread
self.listener=threading.Thread(target=listener,args = (self,))
self.listener.daemon = True
self.listener.start()
def disconnect(self):
self.stop_thread= True
self.listener.join()
def listener(client):
udp_listener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_listener.bind(('', client.port))
while True:
if client.stop_thread:
break
payload, source_address = udp_listener.recvfrom(65565)
skip_packet=False
if len(payload)>4:
try:
if chr(payload[4]) == '{':
udp_message= json.loads(payload[4:].decode("utf-8") )
header= payload[0:4]
else:
udp_message= json.loads(payload[12:].decode("utf-8") )
header= payload[0:12]
except Exception as e:
logging.debug('Skipping packet: {0}'.format(payload))
skip_packet= True
else:
logging.debug('Skipping packet: {0}'.format(payload))
skip_packet= True
try:
if not skip_packet:
standardPacket={}
if "stat" in udp_message:
pkt = udp_message.get("stat")
location = {}
if 'lati' in pkt:
location['latitude'] = pkt.get('lati')
if 'long' in pkt:
location['longitude'] = pkt.get('long')
if 'alti' in pkt:
location['altitude'] = pkt.get('alti')
if len(location) > 0:
gateway= get_gateway_id(header)
gateways_location[gateway]=location
if "rxpk" in udp_message or "txpk" in udp_message:
pkt = udp_message.get("rxpk")[0] if "rxpk" in udp_message else udp_message.get("txpk")
standardPacket = phy_parser.setPHYPayload(pkt.get('data'))
standardPacket['chan'] = pkt.get('chan', None)
standardPacket['stat'] = pkt.get('stat', None)
standardPacket['lsnr'] = pkt.get('lsnr', None)
standardPacket['rssi'] = pkt.get('rssi', None)
standardPacket['tmst'] = pkt.get('tmst', None)
standardPacket['rfch'] = pkt.get('rfch', None)
standardPacket['freq'] = pkt.get('freq', None)
standardPacket['modu'] = pkt.get('modu', None)
standardPacket['datr'] = json.dumps(parse_datr(pkt.get('datr', None)))
standardPacket['codr'] = pkt.get('codr', None)
standardPacket['size'] = pkt.get('size', None)
standardPacket['data'] = pkt.get('data', None)
gateway= get_gateway_id(header)
if gateway:
standardPacket['gateway'] = gateway
if gateway in gateways_location:
standardPacket['latitude']= gateways_location[gateway]['latitude']
standardPacket['longitude']= gateways_location[gateway]['longitude']
standardPacket['altitude']= gateways_location[gateway]['altitude']
standardPacket['date'] = datetime.datetime.now().__str__()
standardPacket['data_collector_id'] = client.data_collector_id
standardPacket['organization_id'] = client.organization_id
client.packet_writter_message['packet']= standardPacket
logging.debug('Message received: {0} \n{1}'.format(payload, json.dumps(standardPacket)))
# Save this message an topic into MQ
client.packet_writter_message['messages'].append(
{
'topic':None,
'message':payload.decode('latin1').replace('\x00',''),
'data_collector_id': client.data_collector_id
}
)
# Save the packet
save(client.packet_writter_message, client.data_collector_id)
# Reset packet_writter_message
client.packet_writter_message = init_packet_writter_message()
except Exception as e:
logging.error("Error creating Packet in PacketForwarderCollector: {0}. Message: {1}".format(e,payload))
traceback.print_exc(file=sys.stdout)
def get_gateway_id(header):
gw = None
if len(header) > 4:
gw = ""
for pos in range(4,12):
gw+= "{:02x}".format(header[pos])
return gw
def parse_datr(encoded_datr):
datr = {}
search = re.search('SF(.*)BW(.*)', encoded_datr)
if search:
datr["spread_factor"] = search.group(1)
datr["bandwidth"] = search.group(2)
return datr
if __name__ == '__main__':
from auditing.db.Models import DataCollector, DataCollectorType, Organization, commit, rollback
print ("\n*****************************************************")
print ("LoRaWAN Security Framework - %s"%(sys.argv[0]))
print ("Copyright (c) 2019 IOActive Inc. All rights reserved.")
print ("*****************************************************\n")
parser = argparse.ArgumentParser(description='This script receives UDP packets from the UDP proxy in the gateway packet_forwarder format and persists them.')
requiredGroup = parser.add_argument_group('Required arguments')
requiredGroup.add_argument("-n", "--name",
help= "Unique string identifier of the Data Collector. eg. --name semtech_collector",
required = True)
requiredGroup.add_argument('-p','--port',
help='Port where to listen for UDP packets. --port 1702.',
type=int)
parser.add_argument('--collector-id',
type=int,
help = 'The ID of the dataCollector. This ID will be associated to the packets saved into DB. eg. --id 1')
parser.add_argument('--organization-id',
help = 'The ID of the dataCollector. This ID will be associated to the packets saved into DB. eg. --id 1',
type=int,
default= None)
options = parser.parse_args()
# Get the organization
if options.organization_id:
organization_obj = Organization.find_one(options.organization_id)
if organization_obj is None:
print("Organization doesn't exist. Please provide a valid ID")
exit(0)
else:
organization_quant = Organization.count()
if organization_quant > 1:
print("There are more than one organizations in the DB. Provide the Organization DB explicitly.")
elif organization_quant == 1:
organization_obj = Organization.find_one()
else:
organization_obj = Organization(name = "Auto-generated Organization")
organization_obj.save()
# Get the data collector
collector_obj = None
if options.collector_id:
collector_obj = DataCollector.find_one(options.collector_id)
if collector_obj is None:
print("DataCollector doesn't exist. Please provide a valid ID")
exit(0)
else:
if options.port:
collector_type_obj = DataCollectorType.find_one_by_type("forwarder_collector")
if collector_type_obj is None:
collector_type_obj= DataCollectorType(
type = "forwarder_collector",
name= "forwarder_collector")
collector_type_obj.save()
collector_obj= DataCollector.find_one_by_name_and_dctype_id(collector_type_obj.id, options.name)
if collector_obj is None:
collector_obj= DataCollector(
data_collector_type_id= collector_type_obj.id,
name= options.name,
organization_id = organization_obj.id,
ip='N/A',
port= str(options.port))
collector_obj.save()
else:
print('Datacollector IP and port must be provided if not provided a collector ID.')
exit(0)
connector = PacketForwarderCollector(
data_collector_id = collector_obj.id,
organization_id = collector_obj.organization_id,
port = int(collector_obj.port))
connector.connect()
while(True):
time.sleep(5)
try:
commit()
logging.debug('Commit done!')
except KeyboardInterrupt as ki:
connector.disconnect()
commit()
exit(0)
except Exception as exc:
logging.error('Error at commit:', exc)
logging.info('Rolling back the session')
rollback()
|
ui_code.py
|
# Copyright (C) 2022 Louis Vottero louis.vot@gmail.com All rights reserved.
from __future__ import absolute_import
import subprocess
import re
import threading
from .. import qt_ui, qt
from .. import util_file
from .. import util
from . import ui_data
from . import process
in_maya = False
if util.is_in_maya():
from ..maya_lib import core
in_maya = True
class CodeProcessWidget(qt_ui.DirectoryWidget):
"""
The main widget for code editing.
"""
code_text_size_changed = qt.create_signal(object)
def __init__(self):
self._process_inst = None
super(CodeProcessWidget, self).__init__()
self.sizes = self.splitter.sizes()
def resizeEvent(self, event):
if self.restrain_move:
if not self.skip_move:
self._close_splitter()
return super(CodeProcessWidget, self).resizeEvent(event)
def _build_widgets(self):
self.splitter = qt.QSplitter()
self.main_layout.addWidget(self.splitter)
self.code_widget = CodeWidget()
self.script_widget = ScriptWidget()
self.code_widget.collapse.connect(self._close_splitter)
self.script_widget.script_open.connect(self._code_change)
self.script_widget.script_open_external.connect(self._open_external)
self.script_widget.script_focus.connect(self._script_focus)
self.script_widget.script_rename.connect(self._script_rename)
self.script_widget.script_remove.connect(self._script_remove)
self.script_widget.script_duplicate.connect(self._script_duplicate)
self.script_widget.script_added.connect(self._script_added)
self.code_text_size_changed.connect(self.script_widget.script_text_size_change)
self.script_widget.script_text_size_change.connect(self._code_size_changed)
self.splitter.addWidget(self.script_widget)
self.splitter.addWidget(self.code_widget)
self.restrain_move = True
self.skip_move = False
width = self.splitter.width()
self.splitter.moveSplitter(width, 1)
self.splitter.splitterMoved.connect(self._splitter_moved)
self.settings = None
def _code_size_changed(self, value):
self.code_text_size_changed.connect(self.code_widget.code_edit.code_text_size_changed)
def _splitter_moved(self, pos, index):
if self.restrain_move:
if not self.skip_move:
self.skip_move = True
width = self.splitter.width()
self.splitter.moveSplitter(width,1)
return
if self.skip_move:
self.skip_move = False
return
self.sizes = self.splitter.sizes()
def _define_main_layout(self):
return qt.QVBoxLayout()
def _close_splitter(self):
if not self.code_widget.code_edit.has_tabs():
self.restrain_move = True
width = self.splitter.width()
self.splitter.moveSplitter(width,1)
self.code_widget.set_code_path(None)
def _script_focus(self, code_path):
if code_path:
name = code_path + '.py'
self.code_widget.code_edit.goto_tab(name)
self.code_widget.code_edit.goto_floating_tab(name)
def _code_change(self, code, open_in_window = False, open_in_external = False):
if not code:
self._close_splitter()
return
if not open_in_window and not open_in_external:
if self.restrain_move == True:
self.restrain_move = False
width = self.splitter.width()
section = width/2.5
self.splitter.setSizes([section, section])
else:
self.splitter.setSizes([1,1])
process_tool = process.Process()
process_tool.set_directory(self.directory)
code_name = util_file.remove_extension(code)
code_file = process_tool.get_code_file(code_name)
if not open_in_external:
self.code_widget.set_code_path(code_file, open_in_window, name = code)
if open_in_external:
self._open_external(code)
if not open_in_window and not open_in_external:
if self.sizes[1] != 0:
self.splitter.setSizes(self.sizes)
def _open_external(self, code):
if not code:
return
process_tool = process.Process()
process_tool.set_directory(self.directory)
code_file = process_tool.get_code_file(code)
external_editor = self.settings.get('external_editor')
if not util.is_linux():
external_editor = util_file.fix_slashes(external_editor)
if external_editor:
p = subprocess.Popen([external_editor, code_file])
if not external_editor:
util_file.open_browser(code_file)
def _script_rename(self, old_name, new_name):
process_data = process.Process()
process_data.set_directory(self.directory)
code_folder = process_data.get_code_path()
old_path = util_file.join_path(code_folder, old_name)
old_path = util_file.join_path(old_path, '%s.py' % util_file.get_basename(old_name))
new_path = util_file.join_path(code_folder, new_name)
new_path = util_file.join_path(new_path, '%s.py' % util_file.get_basename(new_name))
new_file_name = new_name + '.py'
old_file_name = old_name + '.py'
self.code_widget.code_edit.rename_tab(old_path, new_path, old_file_name, new_file_name)
def _script_remove(self, filepath):
process_instance = process.Process()
process_instance.set_directory(self.directory)
code_name = process_instance.get_code_name_from_path(filepath)
code_name = code_name + '.py'
self.code_widget.code_edit.close_tab(code_name)
if not self.code_widget.code_edit.has_tabs():
self._close_splitter()
def _script_duplicate(self):
pass
#if self.code_widget.code_edit.has_tabs():
# self.code_widget.code_edit.close_tabs()
# self._close_splitter()
def _script_added(self, item):
if self.code_widget.code_edit.has_tabs():
code_folder = self.script_widget._get_current_code(item)
self._code_change(code_folder, open_in_window = False, open_in_external = False)
#if self.code_widget.code_edit.has_tabs():
# self.code_widget.code_edit.close_tabs()
# self._close_splitter()
def set_directory(self, directory, sync_code = False):
super(CodeProcessWidget, self).set_directory(directory)
self.script_widget.set_directory(directory, sync_code)
process_path = util.get_env('VETALA_CURRENT_PROCESS')
if process_path and directory:
process_inst = process.Process()
process_inst.set_directory(process_path)
self._process_inst = process_inst
self.code_widget.set_process(process_inst)
self.script_widget.set_process_inst(self._process_inst)
self._close_splitter()
def set_code_directory(self, directory):
self.code_directory = directory
def reset_process_script_state(self):
self.script_widget.reset_process_script_state()
def set_process_script_state(self, directory, state):
self.script_widget.set_process_script_state(directory, state)
def set_process_script_log(self, directory, log):
self.script_widget.set_process_script_log(directory, log)
def refresh_manifest(self):
self.script_widget.code_manifest_tree.refresh()
def set_external_code_library(self, code_directory):
self.script_widget.set_external_code_library(code_directory)
def set_settings(self, settings):
self.settings = settings
def save_code(self):
self.code_widget.code_edit.save_tabs()
def close_widgets(self, close_windows = False):
self.script_widget.code_manifest_tree.clearSelection()
#this line was causing Maya 2014 to crash.
#self.code_widget.code_edit.clear()
self.code_widget.code_edit.close_tabs()
self.set_directory(None, sync_code = False)
if close_windows:
self.code_widget.code_edit.close_windows()
self.script_widget.code_manifest_tree.break_index = None
self.script_widget.code_manifest_tree.break_item = None
self.script_widget.code_manifest_tree.start_index = None
self.script_widget.code_manifest_tree.start_item = None
class CodeWidget(qt_ui.BasicWidget):
collapse = qt_ui.create_signal()
def __init__(self, parent= None):
self._process_inst = None
super(CodeWidget, self).__init__(parent)
policy = self.sizePolicy()
policy.setHorizontalPolicy(policy.Maximum)
policy.setHorizontalStretch(2)
self.setSizePolicy(policy)
self.directory = None
def _build_widgets(self):
self.code_edit = qt_ui.CodeEditTabs()
completer = CodeCompleter
completer.process_inst = self._process_inst
self.completer = completer
self.code_edit.set_completer(completer)
self.code_edit.hide()
self.code_edit.tabChanged.connect(self._tab_changed)
self.code_edit.no_tabs.connect(self._collapse)
self.save_file = ui_data.ScriptFileWidget()
self.code_edit.save.connect( self._code_saved )
self.code_edit.multi_save.connect(self._multi_save)
self.main_layout.addWidget(self.code_edit, stretch = 1)
self.main_layout.addWidget(self.save_file, stretch = 0)
self.alt_layout = qt.QVBoxLayout()
self.save_file.hide()
def _tab_changed(self, widget):
if not widget:
return
if widget.filepath:
filepath = util_file.get_dirname(widget.filepath)
if util_file.is_dir(filepath):
self.save_file.set_directory(filepath)
self.save_file.set_text_widget(widget.text_edit)
if not util_file.is_dir(filepath):
self.save_file.hide()
def _collapse(self):
self.collapse.emit()
def _load_file_text(self, path, open_in_window):
process_data = process.Process()
process_data.set_directory(path)
name = process_data.get_code_name_from_path(path)
self.completer.name = name
name = name + '.py'
if not open_in_window:
tab = self.code_edit.add_tab(path, name)
if open_in_window:
floating_tab = self.code_edit.add_floating_tab(path, name)
def _code_saved(self, code_edit_widget):
if not code_edit_widget:
return
filepath = util_file.get_dirname(code_edit_widget.filepath)
self.save_file.set_directory(filepath)
self.save_file.set_text_widget(code_edit_widget)
self.save_file.save_widget._save(parent = code_edit_widget)
def _multi_save(self, widgets, note = None):
widgets = util.convert_to_sequence(widgets)
if not widgets:
return
#comment = qt_ui.get_comment(self, '- %s -\nScripts not saved.\nSave scripts?' % note)
#if comment == None:
#return
comment = 'auto save'
for widget in widgets:
self.save_file.set_text_widget(widget)
folder_path = util_file.get_dirname(widget.filepath)
util.show('Auto save %s' % folder_path)
self.save_file.set_directory(folder_path)
self.save_file.save_widget._save(comment)
def set_code_path(self, path, open_in_window = False, name = None, load_file = True):
if not path:
self.save_file.hide()
self.code_edit.hide()
return
folder_path = util_file.get_dirname(path)
self.directory = folder_path
self.save_file.set_directory(folder_path)
if path:
self.save_file.show()
self.code_edit.show()
if load_file:
self._load_file_text(path, open_in_window)
def set_process(self, process_inst):
self._process_inst = process_inst
self.code_edit.set_process(self._process_inst)
self.completer.process_inst = self._process_inst
class CodeCompleter(qt_ui.PythonCompleter):
def __init__(self):
super(CodeCompleter, self).__init__()
def keyPressEvent(self):
return
def _insert_completion(self, completion_string):
super(CodeCompleter, self)._insert_completion(completion_string)
#this stops maya from entering edit mode in the outliner, if something is selected
if util.is_in_maya():
import maya.cmds as cmds
cmds.setFocus('modelPanel1')
#cmds.select(cl = True)
def _format_live_function(self, function_instance):
"""
This was being used to get the functions of an instance for code completion.
It was being used to get functions from Process class but has been replaced with
util_file.get_ast_class_sub_functions
could still be useful in the future.
"""
function_name = None
if hasattr(function_instance, 'im_func'):
args = function_instance.im_func.func_code.co_varnames
count = function_instance.im_func.func_code.co_argcount
args_name = ''
if count:
if args:
args = args[:count]
if args[0] == 'self':
args = args[1:]
args_name = ','.join(args)
function_name = '%s(%s)' % (function_instance.im_func.func_name, args_name)
return function_name
def custom_clear_cache(self, text):
if text.find('put') == -1:
self._put_list = []
def custom_import_load(self, assign_map, module_name, text):
text = str(text)
if module_name == 'put':
if hasattr(self, 'name') and hasattr(self, 'process_inst'):
check_name = self.name + '/' + util_file.get_basename(self.name)
scripts = self.process_inst.get_manifest_scripts(basename = False, fast_with_less_checks = True)
found = {}
inc = 0
threads = []
for script in scripts:
if script[:-3].endswith(check_name):
break
thread = threading.Thread(target = get_puts_in_file, args = (script,found))
threads.append(thread)
thread.start()
inc += 1
for thread in threads:
thread.join()
put_value = get_put(text)
if put_value:
for value in put_value:
found[value] = None
keys = list(found.keys())
keys.sort()
return keys
if module_name == 'process':
if assign_map:
if module_name in assign_map:
return []
process_file = process.__file__
if process_file.endswith('.pyc'):
process_file = process_file[:-4] + '.py'
functions, _ = util_file.get_ast_class_sub_functions(process_file, 'Process')
return functions
if module_name == 'cmds' or module_name == 'mc':
if assign_map:
if module_name in assign_map:
return []
if util.is_in_maya():
import maya.cmds as cmds
functions = dir(cmds)
return functions
if module_name == 'pm' or module_name == 'pymel':
if assign_map:
if module_name in assign_map:
return []
if util.is_in_maya():
import pymel.all as pymel
functions = dir(pymel)
return functions
def get_put(text):
puts = []
find = re.findall('\s*(put.)([a-zA-Z0-9_]*)(?=.*[=])', text)
if find:
for f in find:
puts.append(f[1])
return puts
def get_puts_in_file(filepath, accum_dict = {}):
check_text = util_file.get_file_text(filepath)
put_value = get_put(check_text)
if put_value:
for value in put_value:
accum_dict[value] = None
return accum_dict
class ScriptWidget(qt_ui.DirectoryWidget):
script_open = qt_ui.create_signal(object, object, object)
script_open_external = qt_ui.create_signal(object)
script_focus = qt_ui.create_signal(object)
script_rename = qt_ui.create_signal(object, object)
script_remove = qt_ui.create_signal(object)
script_duplicate = qt_ui.create_signal()
script_text_size_change = qt.create_signal(object)
script_added = qt_ui.create_signal(object)
def __init__(self):
self._process_inst = None
super(ScriptWidget, self).__init__()
policy = self.sizePolicy()
policy.setHorizontalPolicy(policy.Minimum)
policy.setHorizontalStretch(0)
self.setSizePolicy(policy)
self.exteranl_code_libarary = None
def _define_main_layout(self):
return qt.QVBoxLayout()
def _build_widgets(self):
self._create_history_widget()
self.code_manifest_tree = CodeManifestTree()
buttons_layout = qt.QHBoxLayout()
self.code_manifest_tree.item_renamed.connect(self._rename)
self.code_manifest_tree.script_open.connect(self._script_open)
self.code_manifest_tree.script_open_external.connect(self._script_open_external)
self.code_manifest_tree.script_focus.connect(self._script_focus)
self.code_manifest_tree.item_removed.connect(self._remove_code)
self.code_manifest_tree.item_duplicated.connect(self._duplicate)
self.code_manifest_tree.item_added.connect(self._item_added)
self.edit_mode_button = qt.QPushButton('Edit')
self.edit_mode_button.setCheckable(True)
self.edit_mode_button.setMaximumWidth(100)
self.edit_mode_button.setMaximumHeight(20)
self.edit_mode_button.setMaximumWidth(40)
self.edit_mode_button.toggled.connect(self._edit_click)
btm_layout = qt.QHBoxLayout()
btm_layout.addWidget(self.history_widget)
btm_layout.addWidget(self.edit_mode_button, alignment= qt.QtCore.Qt.AlignRight)
self.main_layout.addWidget(self.code_manifest_tree)
self.main_layout.addLayout(btm_layout)
self.main_layout.addLayout(buttons_layout)
def _edit_click(self, bool_value):
self.code_manifest_tree.setDragEnabled(bool_value)
self.code_manifest_tree.setAcceptDrops(bool_value)
self.code_manifest_tree.setDropIndicatorShown(bool_value)
def _create_history_widget(self):
history_widget = qt_ui.CompactHistoryWidget()
history_widget.set_auto_accept(True)
history_widget.back_socket.connect(self._set_current_manifest_history)
history_widget.forward_socket.connect(self._set_current_manifest_history)
history_widget.load_default_socket.connect(self._load_manifest_default)
history_widget.accept_socket.connect(self._accept_changes)
self.history_widget = history_widget
if self._process_inst:
version_history = self.process_inst.get_option_history()
self.history_widget.set_history(version_history)
return history_widget
def _accept_changes(self):
self.code_manifest_tree.update_manifest_file()
#self.code_manifest_tree.refresh(sync = True)
def _set_current_manifest_history(self, version_file):
if not self.history_widget:
return
if version_file == 'current':
self.code_manifest_tree.refresh()
return
if version_file:
scripts, states = self._process_inst.get_manifest(version_file)
self.code_manifest_tree.refresh(False, [scripts,states])
def _load_manifest_default(self, default_version_file):
if not self.history_widget:
return
if default_version_file:
scripts, states = self._process_inst.get_manifest(default_version_file)
self.code_manifest_tree.refresh(False, [scripts,states])
def _script_open(self, item, open_in_window, open_external = False):
if self.code_manifest_tree.handle_selection_change:
code_folder = self._get_current_code(item)
self.script_open.emit(code_folder, open_in_window, open_external)
def _script_open_external(self):
if self.code_manifest_tree.handle_selection_change:
code_folder = self._get_current_code()
self.script_open_external.emit(code_folder)
def _script_focus(self):
if self.code_manifest_tree.handle_selection_change:
code_folder = self._get_current_code()
self.script_focus.emit(code_folder)
def _get_current_code(self, item = None):
if not item:
item = self.code_manifest_tree.selectedItems()
if item:
item = item[0]
if not item:
return
name = util_file.get_basename_no_extension(item.get_text())
path = self.code_manifest_tree._get_item_path(item)
if path:
name = util_file.join_path(path, name)
return name
def _run_code(self):
self.code_manifest_tree.run_current_item(self.exteranl_code_libarary)
def _create_code(self):
process_tool = process.Process()
process_tool.set_directory(self.directory)
code_path = process_tool.create_code('code', 'script.python', inc_name = True)
name = util_file.get_basename(code_path)
item = self.code_manifest_tree._add_item(name, False)
self.code_manifest_tree.scrollToItem(item)
def _create_import_code(self):
process_tool = process.Process()
process_tool.set_directory(self.directory)
folders = process_tool.get_data_folders()
picked = qt_ui.get_pick(folders, 'Pick the data to import.', self)
process_tool.create_code('import_%s' % picked, import_data = picked)
self.code_manifest_tree._add_item('import_%s.py' % picked, False)
def _remove_code(self, filepath):
self.script_remove.emit(filepath)
def _rename(self, old_name, new_name):
self.script_rename.emit(old_name, new_name)
def _duplicate(self):
self.script_duplicate.emit()
def _item_added(self, item):
self.script_added.emit(item)
def set_directory(self, directory, sync_code = False):
super(ScriptWidget, self).set_directory(directory)
if not sync_code:
if self.directory == self.last_directory:
return
process_tool = process.Process()
process_tool.set_directory(directory)
self.code_manifest_tree.process = process_tool
self.code_manifest_tree.set_directory(directory)
def set_process_inst(self, process_inst):
self._process_inst = process_inst
if self._process_inst:
version_history = self._process_inst.get_manifest_history()
self.history_widget.set_history(version_history)
def reset_process_script_state(self):
self.code_manifest_tree.reset_process_script_state()
def set_process_script_state(self, directory, state):
self.code_manifest_tree.set_process_script_state(directory, state)
def set_process_script_log(self, directory, log):
self.code_manifest_tree.set_process_script_log(directory, log)
def set_external_code_library(self, code_directory):
self.exteranl_code_libarary = code_directory
class CodeManifestTree(qt_ui.FileTreeWidget):
item_renamed = qt_ui.create_signal(object, object)
script_open = qt_ui.create_signal(object, object, object)
script_open_external = qt_ui.create_signal()
script_focus = qt_ui.create_signal()
item_removed = qt_ui.create_signal(object)
item_duplicated = qt_ui.create_signal()
item_added = qt_ui.create_signal(object)
def __init__(self):
super(CodeManifestTree, self).__init__()
self.process = None
self.title_text_index = 0
self.setSortingEnabled(False)
self.setAlternatingRowColors(True)
self.edit_state = False
self.setSelectionMode(self.ExtendedSelection)
self.setDragDropMode(self.InternalMove)
self.setDragEnabled(False)
self.setAcceptDrops(False)
self.setDropIndicatorShown(False)
self.setAutoScroll(True)
self.setDefaultDropAction(qt.QtCore.Qt.MoveAction)
self.invisibleRootItem().setFlags(qt.QtCore.Qt.ItemIsDropEnabled)
self.dragged_item = None
self.handle_selection_change = True
self.setContextMenuPolicy(qt.QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._item_menu)
self.future_rename = False
self.new_actions = []
self.edit_actions = []
self._create_context_menu()
header = self.header()
self.expand_collapse = qt.QPushButton('Collapse')
self.checkbox = qt.QCheckBox(header)
self.checkbox.stateChanged.connect(self._set_all_checked)
self.update_checkbox = True
self.hierarchy = True
#new
self.dragged_item = None
self.shift_activate = False
self.allow_manifest_update = True
self.break_index = None
self.break_item = None
self.start_index = None
self.start_item = None
if util.is_in_maya():
directory = util_file.get_vetala_directory()
icon_on = util_file.join_path(directory, 'icons/box_on.png')
icon_off = util_file.join_path(directory, 'icons/box_off.png')
icon_folder = util_file.join_path(directory, 'icons/folder.png')
icon_folder_open = util_file.join_path(directory, 'icons/folder_open.png')
lines = 'QTreeWidget::indicator:unchecked {image: url(%s);}' % icon_off
lines += ' QTreeWidget::indicator:checked {image: url(%s);}' % icon_on
self.setStyleSheet( lines)
self.setWhatsThis('Manifest List\n'
'\n'
'This list helps create a recipe for building your rig.\n'
'Here all the scripts you create to build the rig can be organized and run.\n'
'Turn on edit mode on the bottom right of this widget to access drag and drop\n'
'Double click on a script to open and edit it.\n'
'\n'
'Right Click menu\n\n'
'Hitting the Process button at the bottom of Vetala will run the whole recipe.\n'
'To run individual scripts or a group of scripts use the right click menu\n'
'The right click menu can also set start and end points.\n')
def resizeEvent(self, event = None):
super(CodeManifestTree, self).resizeEvent(event)
self.checkbox.setGeometry(qt.QtCore.QRect(3, 2, 16, 17))
def mouseDoubleClickEvent(self, event):
item = None
items = self.selectedItems()
if items:
item = items[0]
if not item:
return
settings_file = util.get_env('VETALA_SETTINGS')
settings = util_file.SettingsFile()
settings.set_directory(settings_file)
double_click_option = settings.get('manifest_double_click')
if double_click_option:
if double_click_option == 'open tab':
self.script_open.emit(item, False, False)
if double_click_option == 'open new':
self.script_open.emit(item, True, False)
if double_click_option == 'open external':
self.script_open.emit(item, False, True)
return
self.script_open.emit(item, False, False)
def mousePressEvent(self, event):
self.handle_selection_change = True
item = self.itemAt(event.pos())
parent = self.invisibleRootItem()
if item:
if item.parent():
parent = item.parent()
self.drag_parent = parent
self.dragged_item = item
super(CodeManifestTree, self).mousePressEvent(event)
self.script_focus.emit()
def keyPressEvent(self, event):
if event.key() == qt.QtCore.Qt.Key_Shift:
self.shift_activate = True
def keyReleaseEvent(self, event):
if event.key() == qt.QtCore.Qt.Key_Shift:
self.shift_activate = False
def dropEvent(self, event):
is_dropped = self.is_item_dropped(event, strict = True)
if not self.hierarchy:
is_dropped = False
event.accept()
if not is_dropped:
self._insert_drop(event)
if is_dropped:
self._add_drop(event)
self._update_manifest()
def _get_item_path(self, item):
"""
get the path to an item from the highest level down. eg script/script/script
"""
parent = item.parent()
parent_path = ''
while parent:
parent_name = parent.text(0)
split_name = parent_name.split('.')
parent_name = split_name[0]
if parent_path:
parent_path = util_file.join_path(parent_name, parent_path)
if not parent_path:
parent_path = parent_name
parent = parent.parent()
return parent_path
def _get_item_path_name(self, item, keep_extension = False):
"""
get the script name with path, eg script/script/script.py
"""
name = item.text(0)
if not keep_extension:
name = util_file.remove_extension(name)
path = self._get_item_path(item)
if path:
name = util_file.join_path(path, name)
return name
def _get_item_path_full_name(self, item, keep_extension = False):
"""
get the script name with path, eg script/script/script.py
"""
name = item.text(0)
folder_name = util_file.remove_extension(name)
if not keep_extension:
name = folder_name
path = self._get_item_path(item)
if path:
path = util_file.join_path(path, folder_name)
if not path:
path = folder_name
name = util_file.join_path(path, name)
def _get_entered_item(self, event):
position = event.pos()
entered_item = self.itemAt(position)
if not entered_item:
entered_item = self.invisibleRootItem()
return entered_item
def _get_item_by_name(self, name):
items = self._get_all_items()
for item in items:
check_name = item.text(0)
path = self._get_item_path(item)
if path:
check_name = util_file.join_path(path, check_name)
check_name = self._get_item_path_name(item, keep_extension=True)
if check_name == name:
return item
def _get_all_items(self):
item_count = self.topLevelItemCount()
items = []
for inc in range(0, item_count):
item = self.topLevelItem(inc)
ancestors = self._get_ancestors(item)
items.append(item)
if ancestors:
items += ancestors
return items
def _get_ancestors(self, item):
child_count = item.childCount()
items = []
for inc in range(0, child_count):
child = item.child(inc)
children = self._get_ancestors(child)
items.append(child)
if children:
items += children
return items
def _get_files(self, scripts = [], states = []):
process_tool = self.process
if not scripts:
scripts, states = process_tool.get_manifest()
if not scripts:
return
#this is slow
code_folders = process_tool.get_code_folders()
found_scripts = []
found_states = []
for inc in range(0, len(scripts)):
#and each increment is slow
name = util_file.remove_extension( scripts[inc] )
if not name in code_folders:
continue
code_path = process_tool.get_code_file(name)
if not code_path or not util_file.exists(code_path):
continue
found_scripts.append(scripts[inc])
found_states.append(states[inc])
return [found_scripts, found_states]
def _check_has_parent(self, item, list_of_items):
has_parent = False
for other_item in list_of_items:
current_index = self.indexFromItem(item, 0)
other_index = self.indexFromItem(other_item, 0)
if current_index == other_index:
continue
current_path = self._get_item_path(item)
other_path = self._get_item_path(other_item)
if current_path == other_path:
continue
if current_path.startswith(other_path):
has_parent = True
break
return has_parent
def _insert_drop(self, event):
entered_item = self._get_entered_item(event)
entered_parent = entered_item.parent()
if not entered_parent:
entered_parent = self.invisibleRootItem()
from_list = event.source()
insert_inc = 0
remove_items = []
moved_items = []
has_parent_dict = {}
selected_items = from_list.selectedItems()
for item in selected_items:
has_parent_dict[item.text(0)] = self._check_has_parent(item, selected_items)
for item in selected_items:
if has_parent_dict[item.text(0)]:
continue
children = item.takeChildren()
filename = item.get_text()
state = item.get_state()
new_item = self._create_item(filename, state)
for child in children:
new_item.addChild(child)
child.set_state(-1)
parent = item.parent()
if not parent:
parent = self.invisibleRootItem()
remove_items.append([item, parent])
insert_row = self.indexFromItem(entered_item, column=0).row()
if self.dropIndicatorPosition == self.BelowItem:
insert_row += 1
insert_row = insert_row + insert_inc
if not entered_parent:
if insert_row == -1:
insert_row = self.topLevelItemCount()
if not entered_item:
self.insertTopLevelItem(insert_row, new_item)
else:
if entered_item.text(0) == parent.text(0):
entered_item.insertChild(entered_item.childCount()-1, new_item)
else:
self.insertTopLevelItem(insert_row, new_item)
if entered_parent:
entered_parent.insertChild(insert_row, new_item)
insert_inc += 1
entered_parent_name = None
if entered_parent:
entered_parent_name = entered_parent.text(0)
if entered_parent_name != parent.text(0):
old_name = self._get_item_path_name(item)
new_name = self._get_item_path_name(new_item)
moved_items.append([old_name, new_name, new_item])
for item in remove_items:
item[1].removeChild(item[0])
for moved_item in moved_items:
old_name, new_name, item = moved_item
self._move_item(old_name, new_name, item)
def _add_drop(self, event):
entered_item = self._get_entered_item(event)
from_list = event.source()
remove_items = []
moved_items = []
has_parent_dict = {}
selected_items = from_list.selectedItems()
for item in selected_items:
has_parent_dict[item.text(0)] = self._check_has_parent(item, selected_items)
for item in selected_items:
if has_parent_dict[item.text(0)]:
continue
parent = item.parent()
if not parent:
parent = self.invisibleRootItem()
remove_items.append([item, parent])
children = item.takeChildren()
name = item.get_text()
state = item.get_state()
entered_item.setExpanded(True)
new_item = self._create_item(name, state)
for child in children:
child.set_state(-1)
new_item.addChild(child)
entered_item.addChild(new_item)
entered_item.setExpanded(True)
old_name = self._get_item_path_name(item)
new_name = self._get_item_path_name(new_item)
moved_items.append([old_name, new_name, new_item])
for item in remove_items:
item[1].removeChild(item[0])
for moved_item in moved_items:
old_name, new_name, item = moved_item
self._move_item(old_name, new_name, item)
def _move_item(self, old_name, new_name, item):
after_name = self._handle_item_reparent(old_name, new_name)
basename = util_file.get_basename(after_name)
item.set_text(basename + '.py')
self.item_renamed.emit(old_name, after_name)
def _handle_item_reparent(self, old_name, new_name):
if old_name == new_name:
return old_name
process_tool = process.Process()
process_tool.set_directory(self.directory)
new_name = process_tool.move_code(old_name, new_name)
return new_name
def _item_collapsed(self, item):
if self.shift_activate:
child_count = item.childCount()
for inc in range(0, child_count):
children = self._get_ancestors(item.child(inc))
item.child(inc).setExpanded(False)
for child in children:
child.setExpanded(False)
def _item_expanded(self, item):
if self.shift_activate:
child_count = item.childCount()
for inc in range(0, child_count):
children = self._get_ancestors(item.child(inc))
item.child(inc).setExpanded(True)
for child in children:
child.setExpanded(True)
def _set_all_checked(self, int):
if not self.update_checkbox:
return
if int == 2:
state = qt.QtCore.Qt.Checked
if int == 0:
state = qt.QtCore.Qt.Unchecked
value = qt_ui.get_permission('This will activate/deactivate all code. Perhaps consider saving your manifest before continuing.\n\n\n Continue?', self, title = 'Warning: Activate/Deactivate all code')
if not value:
self.update_checkbox = False
if int == 0:
self.checkbox.setCheckState(qt.QtCore.Qt.Checked)
if int == 2:
self.checkbox.setCheckState(qt.QtCore.Qt.Unchecked)
self.update_checkbox = True
return
for iterator in qt.QTreeWidgetItemIterator(self):
item = iterator.value()
if item:
item.setCheckState(0, state)
def _create_context_menu(self):
self.context_menu = qt.QMenu()
new_python = self.context_menu.addAction('New Python Code')
new_data_import = self.context_menu.addAction('New Data Import')
self.new_actions = [new_python, new_data_import]
self.context_menu.addSeparator()
self.run_action = self.context_menu.addAction('Run')
self.run_group_action = self.context_menu.addAction('Run Group')
self.context_menu.addSeparator()
rename_action = self.context_menu.addAction(self.tr('Rename'))
duplicate_action = self.context_menu.addAction('Duplicate')
self.delete_action = self.context_menu.addAction('Delete')
self.context_menu.addSeparator()
log_window = self.context_menu.addAction('Show Last Log')
new_window_action = self.context_menu.addAction('Open In New Window')
external_window_action = self.context_menu.addAction('Open In External')
browse_action = self.context_menu.addAction('Browse')
refresh_action = self.context_menu.addAction('Refresh')
self.context_menu.addSeparator()
start_action = self.context_menu.addAction('Set Startpoint')
self.cancel_start_action = self.context_menu.addAction('Cancel Startpoint')
self.context_menu.addSeparator()
break_action = self.context_menu.addAction('Set Breakpoint')
self.cancel_break_action = self.context_menu.addAction('Cancel Breakpoint')
self.context_menu.addSeparator()
self.cancel_points_action = self.context_menu.addAction('Cancel Start/Breakpoint')
self.edit_actions = [self.run_action, self.run_group_action, rename_action, duplicate_action, self.delete_action]
new_python.triggered.connect(self.create_python_code)
new_data_import.triggered.connect(self.create_import_code)
self.run_action.triggered.connect(self.run_current_item)
self.run_group_action.triggered.connect(self.run_current_group)
start_action.triggered.connect(self.set_startpoint)
self.cancel_start_action.triggered.connect(self.cancel_startpoint)
break_action.triggered.connect(self.set_breakpoint)
self.cancel_break_action.triggered.connect(self.cancel_breakpoint)
self.cancel_points_action.triggered.connect(self.cancel_points)
rename_action.triggered.connect(self._activate_rename)
duplicate_action.triggered.connect(self._duplicate_current_item)
self.delete_action.triggered.connect(self.remove_current_item)
log_window.triggered.connect(self._open_log_window)
new_window_action.triggered.connect(self._open_in_new_window)
external_window_action.triggered.connect(self._open_in_external)
browse_action.triggered.connect(self._browse_to_code)
refresh_action.triggered.connect(self._refresh_action)
def _item_menu(self, position):
items = self.selectedItems()
item = None
if items:
item = items[0]
if item:
self._edit_actions_visible(True)
if not item:
self._edit_actions_visible(False)
if len(items) > 1:
self._edit_actions_visible(False)
self.run_action.setVisible(True)
self.delete_action.setVisible(True)
self.context_menu.exec_(self.viewport().mapToGlobal(position))
def _edit_actions_visible(self, bool_value):
for action in self.edit_actions:
action.setVisible(bool_value)
def _refresh_action(self):
self.refresh(sync = True)
def _activate_rename(self):
items = self.selectedItems()
if not items:
return
item = items[0]
self.old_name = str(item.get_text())
new_name = qt_ui.get_new_name('New Name', self, self.old_name)
if new_name == self.old_name:
return
if not new_name:
return
if new_name == 'manifest' or new_name == 'manifest.py':
qt_ui.warning('Manifest is reserved. Name your script something else.', self)
return
self._rename_item(item, new_name)
def _open_log_window(self):
text_window = qt_ui.BasicWindow(self)
items = self.selectedItems()
item = items[0]
log = item.log
if not log:
qt_ui.message('No log. Please process first', self)
return
text_window.setWindowTitle('%s log' % item.get_text())
log_text = qt.QPlainTextEdit()
log_text.setReadOnly(True)
log_text.setPlainText(log)
log_text.setLineWrapMode(log_text.NoWrap)
log_text.setMinimumHeight(300)
log_text.setMinimumWidth(600)
text_window.main_layout.addWidget(log_text)
text_window.show()
def _open_in_new_window(self):
items = self.selectedItems()
item = items[0]
self.script_open.emit(item, True, False)
def _open_in_external(self):
self.script_open_external.emit()
def _browse_to_code(self):
items = self.selectedItems()
process_tool = process.Process()
process_tool.set_directory(self.directory)
if items:
item = items[0]
code_name = self._get_item_path_name(item)
code_path = process_tool.get_code_folder(code_name)
util_file.open_browser(code_path)
if not items:
code_path = process_tool.get_code_path()
util_file.open_browser(code_path)
def _define_header(self):
return [' Manifest']
def _name_clash(self, name):
process_tool = process.Process()
process_tool.set_directory(self.directory)
folders = process_tool.get_code_folders()
for folder in folders:
other_name = folder
if name == other_name:
return True
return False
def _rename_item(self, item, new_name):
new_name = str(new_name)
test_name = util_file.remove_extension(new_name)
if new_name and not test_name:
new_name = '_' + new_name[1:]
new_name = util_file.remove_extension(new_name)
path = self._get_item_path(item)
if path:
new_name = util_file.join_path(path, new_name)
old_name = self.old_name
old_name = util_file.remove_extension(old_name)
if path:
old_name = util_file.join_path(path, old_name)
inc = util.get_last_number(new_name)
if inc == None:
inc = 0
while self._name_clash(new_name):
inc += 1
if not util.get_trailing_number(new_name):
new_name = new_name + '1'
continue
new_name = util.replace_last_number(new_name, str(inc))
if inc >= 1000:
return
if old_name == new_name:
return
process_tool = process.Process()
process_tool.set_directory(self.directory)
file_name = process_tool.rename_code(old_name, new_name)
new_file_name = util_file.remove_extension(file_name)
filepath = process_tool.get_code_file(new_file_name)
basename = util_file.get_basename(filepath)
item.set_text(basename)
self.item_renamed.emit(old_name, new_name)
self._update_manifest()
def _define_item(self):
return ManifestItem()
def _setup_item(self, item, state):
if not state:
item.setCheckState(0, qt.QtCore.Qt.Unchecked)
if state:
item.setCheckState(0, qt.QtCore.Qt.Checked)
if not self.hierarchy:
#dont remove this comment. You can make an item not be drop enabled by giving it every flag except drop enabled.
item.setFlags(qt.QtCore.Qt.ItemIsSelectable|qt.QtCore.Qt.ItemIsEditable|qt.QtCore.Qt.ItemIsEnabled|qt.QtCore.Qt.ItemIsDragEnabled|qt.QtCore.Qt.ItemIsUserCheckable)
if self.hierarchy:
#this allows for dropping
item.setFlags(qt.QtCore.Qt.ItemIsSelectable|qt.QtCore.Qt.ItemIsEnabled|qt.QtCore.Qt.ItemIsDragEnabled|qt.QtCore.Qt.ItemIsUserCheckable|qt.QtCore.Qt.ItemIsDropEnabled)
#setData in the item is an issue. If this isn't happening then the manifest will update every time the check state changes by the program.
#this avoids it updating while it is being set by the program.
if hasattr(item, 'handle_manifest'):
item.handle_manifest = True
def _create_item(self, filename, state = False):
item = self._define_item()
item.set_text(filename)
self._setup_item(item, state)
return item
def _add_item(self, filename, state, parent = None, update_manifest = True, skip_emit = False):
if filename:
if filename.count('/') > 0:
basename = util_file.get_basename(filename)
item = super(CodeManifestTree, self)._add_item(basename, parent = False)
if filename.count('/') == 0:
item = super(CodeManifestTree,self)._add_item(filename, parent)
self._setup_item(item, state)
if update_manifest:
self._update_manifest()
if not skip_emit:
self.item_added.emit(item)
return item
def _add_items(self, files, item = None):
scripts, states = files
script_count = len(scripts)
found_false = False
order_scripts = {}
order_of_scripts = []
parents = {}
for inc in range(0, script_count):
script_full = scripts[inc]
script_name = script_full.split('.')[0]
slash_count = script_name.count('/')
if not slash_count in order_scripts:
order_scripts[slash_count] = []
order_of_scripts.append(slash_count)
parents[script_name] = None
order_scripts[slash_count].append([script_name, script_full, states[inc]])
ordered_scripts = []
for count in order_of_scripts:
ordered_scripts += order_scripts[count]
built_parents = {}
for inc in range(0, script_count):
script_name,script_full, state = ordered_scripts[inc]
basename = util_file.get_basename(script_full)
item = self._add_item('...temp...', state, parent = False, update_manifest = False, skip_emit=True)
if script_name in parents:
built_parents[script_name] = item
dirname = util_file.get_dirname(script_full)
if dirname in built_parents:
current_parent = built_parents[dirname]
current_parent.addChild(item)
item.set_text(basename)
if not dirname:
self.addTopLevelItem(item)
item.set_text(basename)
if not state:
found_false = True
self.update_checkbox = False
if not found_false:
self.checkbox.setChecked(True)
if found_false:
self.checkbox.setChecked(False)
self.update_checkbox = True
def _reparent_item(self, name, item, parent_item):
current_parent = item.parent()
if not current_parent:
current_parent = self.invisibleRootItem()
if current_parent and parent_item:
old_name = self._get_item_path_name(item)
parent_path = self._get_item_path_name(parent_item)
new_name = util_file.join_path(parent_path, name)
current_parent.removeChild(item)
parent_item.addChild(item)
old_name = util_file.remove_extension(old_name)
new_name = util_file.remove_extension(new_name)
self._move_item(old_name, new_name, item)
def _get_current_manifest(self):
scripts = []
states = []
#Could not user item iterator because it updates setData which updates the manifest,
#which causes the manifest to be updated too much.
#it = qt.QTreeWidgetItemIterator(self)
#while it:
#item = it.value()
#items.append(item)
items = self._get_all_items()
for item in items:
name = item.get_text()
path = self._get_item_path(item)
if path:
name = util_file.join_path(path, name)
state = item.checkState(0)
if state == 0:
state = False
if state == 2:
state = True
scripts.append(name)
states.append(state)
return scripts, states
def _update_manifest(self):
if not self.allow_manifest_update:
return
scripts, states = self.get_current_manifest()
process_tool = self.process
process_tool.set_manifest(scripts, states)
def _run_item(self, item, process_tool, run_children = False):
self.scrollToItem(item)
item.set_state(2)
item.setExpanded(True)
background = item.background(0)
orig_background = background
color = qt.QColor(1,0,0)
background.setColor(color)
item.setBackground(0, background)
name = self._get_item_path_name(item)
code_file = process_tool.get_code_file(name)
if in_maya:
import maya.cmds as cmds
cmds.select(cl = True)
core.auto_focus_view()
util.start_temp_log()
status = process_tool.run_script(code_file, False, return_status = True)
log = util.get_last_temp_log()#util.get_env('VETALA_LAST_TEMP_LOG')
item.set_log(log)
if status == 'Success':
item.set_state(1)
if not status == 'Success':
item.set_state(0)
if log.find('Warning!') > -1:
item.set_state(3)
item.setBackground(0, orig_background)
if status == 'Success':
if run_children:
self._run_children(item, process_tool, recursive = True)
def _run_children(self, item, process_tool, recursive = True):
child_count = item.childCount()
if not child_count:
return
item.setExpanded(True)
if child_count:
for inc in range(0, child_count):
child_item = item.child(inc)
child_item.set_state(-1)
for inc in range(0, child_count):
child_item = item.child(inc)
check_state = child_item.checkState(0)
if check_state == qt.QtCore.Qt.Unchecked:
continue
self._run_item(child_item, process_tool, run_children=recursive)
def _duplicate_current_item(self):
self.setFocus(qt.QtCore.Qt.ActiveWindowFocusReason)
items = self.selectedItems()
item = items[0]
name = self._get_item_path_name(item)
process_tool = process.Process()
process_tool.set_directory(self.directory)
filepath = process_tool.get_code_file(name)
parent_item = item.parent()
code_path = process_tool.create_code(name, 'script.python', inc_name = True)
file_lines = util_file.get_file_lines(filepath)
util_file.write_lines(code_path, file_lines, append = False)
name = util_file.get_basename(code_path)
item = self._add_item(name, False, parent = parent_item)
item.setCheckState(0, qt.QtCore.Qt.Checked)
self.item_duplicated.emit()
self._activate_rename()
self.scrollToItem(item)
self.setItemSelected(item, True)
self.setCurrentItem(item)
return item
def _custom_refresh(self, scripts, states):
files = self._get_files(scripts, states)
if not files:
self.clear()
return
self._load_files(files)
self.refreshed.emit()
def sync_manifest(self):
process_tool = process.Process()
process_tool.set_directory(self.directory)
process_tool.sync_manifest()
def refresh(self, sync = False, scripts_and_states = []):
if self.break_item:
break_item_path = self._get_item_path_name(self.break_item, keep_extension=True)
if self.start_item:
start_item_path = self._get_item_path_name(self.start_item, keep_extension=True)
if sync:
self.sync_manifest()
self.allow_manifest_update = False
if not scripts_and_states:
super(CodeManifestTree, self).refresh()
if scripts_and_states:
self._custom_refresh(scripts_and_states[0], scripts_and_states[1])
self.allow_manifest_update = True
if self.start_item:
item = self._get_item_by_name(start_item_path)
if item:
self.set_startpoint(item)
if self.break_item:
item = self._get_item_by_name(break_item_path)
if item:
self.set_breakpoint(item)
def update_manifest_file(self):
self._update_manifest()
def get_current_item_file(self):
items = self.selectedItems()
if not items:
return
item = items[0]
name = self._get_item_path_full_name(item, keep_extension=True)
path = util_file.join_path(self.directory, name)
return path
def get_current_manifest(self):
scripts = []
states = []
#Could not user item iterator because it updates setData which updates the manifest,
#which causes the manifest to be updated too much.
#it = qt.QTreeWidgetItemIterator(self)
#while it:
#item = it.value()
#items.append(item)
items = self._get_all_items()
for item in items:
name = item.get_text()
path = self._get_item_path(item)
if path:
name = util_file.join_path(path, name)
state = item.checkState(0)
if state == 0:
state = False
if state == 2:
state = True
scripts.append(name)
states.append(state)
return scripts, states
def set_directory(self, directory, refresh = True):
self.directory = directory
if refresh:
self.refresh(refresh)
def reset_process_script_state(self):
items = self._get_all_items()
for item in items:
item.set_state(-1)
def set_process_script_state(self, directory, state):
script_name = directory
item = self._get_item_by_name(script_name)
if not item:
return
if state > -1:
self.scrollToItem(item)
item.set_state(state)
def set_process_script_log(self, directory, log):
script_name = directory
item = self._get_item_by_name(script_name)
if not item:
return
item.set_log(log)
def is_process_script_breakpoint(self, directory):
item = self._get_item_by_name(directory)
model_index = self.indexFromItem(item)
index = model_index.internalId()
if index == self.break_index:
return True
return False
def has_startpoint(self):
if self.start_index != None:
return True
return False
def is_process_script_startpoint(self, directory):
item = self._get_item_by_name(directory)
model_index = self.indexFromItem(item)
index = model_index.internalId()
if index == self.start_index:
return True
return False
def create_python_code(self):
process_tool = process.Process()
process_tool.set_directory(self.directory)
items = self.selectedItems()
code = 'code'
parent = None
if items:
parent = items[0]
path = self._get_item_path_name(parent, keep_extension = False)
if path:
code = path + '/' + code
code_path = process_tool.create_code(code, 'script.python', inc_name = True)
name = util_file.get_basename(code_path)
item = self._add_item(name, False, parent = parent)
item.setCheckState(0, qt.QtCore.Qt.Checked)
#self._reparent_item('code', item, parent_item)
self.scrollToItem(item)
self.setItemSelected(item, True)
self.setCurrentItem(item)
self._update_manifest()
self._activate_rename()
def create_import_code(self):
process_tool = process.Process()
process_tool.set_directory(self.directory)
folders = process_tool.get_data_folders()
picked = qt_ui.get_pick(folders, 'Pick the data to import.', self)
if not picked:
return
parent_item = None
items = self.selectedItems()
if items:
parent_item = items[0]
code_path = process_tool.create_code('import_%s' % picked, 'script.python', import_data = picked, inc_name = True)
name = util_file.get_basename(code_path)
item = self._add_item(name, False)
item.setCheckState(0, qt.QtCore.Qt.Checked)
self._reparent_item('import_%s' % picked, item, parent_item)
self.scrollToItem(item)
self.setItemSelected(item, True)
self.setCurrentItem(item)
self._update_manifest()
def run_current_item(self, external_code_library = None, group_only = False):
util.set_env('VETALA RUN', True)
util.set_env('VETALA STOP', False)
process_tool = self.process
scripts, states = process_tool.get_manifest()
items = self.selectedItems()
if len(items) > 1:
if util.is_in_maya():
value = qt_ui.get_permission('Start a new scene?', self)
if value:
core.start_new_scene()
if value == None:
return
watch = util.StopWatch()
watch.start(feedback = False)
for item in items:
item.set_state(-1)
if external_code_library:
process_tool.set_external_code_library(external_code_library)
inc = 0
last_name = items[-1].text(0)
last_path = self._get_item_path( items[-1] )
if last_path:
last_name = util_file.join_path(last_path, last_name)
set_end_states = False
for inc in range(0, len(scripts)):
if util.get_env('VETALA RUN') == 'True':
if util.get_env('VETALA STOP') == 'True':
break
if set_end_states:
item = self._get_item_by_name(scripts[inc])
if item:
item.set_state(-1)
item_count = len(items)
for item in items:
name = item.text(0)
path = self._get_item_path(item)
if path:
name = util_file.join_path(path, name)
if name == scripts[inc]:
run_children = False
if group_only:
run_children = True
self._run_item(item, process_tool, run_children)
if group_only:
break
if name == last_name:
set_end_states = True
util.set_env('VETALA RUN', False)
util.set_env('VETALA STOP', False)
minutes, seconds = watch.stop()
if minutes:
util.show('Processes run in %s minutes and %s seconds.' % (minutes, seconds))
else:
util.show('Processes run in %s seconds.' % seconds)
def run_current_group(self):
self.run_current_item(group_only = True)
def remove_current_item(self):
items = self.selectedItems()
delete_state = False
if len(items) > 1:
delete_state = qt_ui.get_permission('Delete selected codes?')
if not delete_state:
return
for item in items:
name = self._get_item_path_name(item)
if len(items) == 1:
delete_state = qt_ui.get_permission('Delete %s?' % name)
process_tool = process.Process()
process_tool.set_directory(self.directory)
filepath = process_tool.get_code_file(name)
if delete_state:
index = self.indexFromItem(item)
parent = item.parent()
if parent:
parent.removeChild(item)
if not parent:
self.takeTopLevelItem(index.row())
process_tool.delete_code(name)
self._update_manifest()
self.item_removed.emit(filepath)
def set_breakpoint(self, item = None):
self.cancel_breakpoint()
if not item:
items = self.selectedItems()
if not items:
return
item = items[0]
self.clearSelection()
item_index = self.indexFromItem(item)
if item_index.internalId() == self.start_index:
self.cancel_startpoint()
self.break_index = item_index.internalId()
self.break_item = item
if util.is_in_maya():
brush = qt.QBrush( qt.QColor(70,0,0))
if not util.is_in_maya():
brush = qt.QBrush( qt.QColor(240,230,230))
item.setBackground(0, brush)
def set_startpoint(self, item = None):
self.cancel_startpoint()
if not item:
items = self.selectedItems()
if not items:
return
item = items[0]
self.clearSelection()
item_index = self.indexFromItem(item)
if item_index.internalId() == self.break_index:
self.cancel_breakpoint()
self.start_index = item_index.internalId()
self.start_item = item
if util.is_in_maya():
brush = qt.QBrush( qt.QColor(0,70,20))
if not util.is_in_maya():
brush = qt.QBrush( qt.QColor(230,240,230))
item.setBackground(0, brush)
def cancel_breakpoint(self):
if self.break_item:
try:
self.break_item.setBackground(0, qt.QBrush())
except:
pass
self.break_index = None
self.break_item = None
self.repaint()
def cancel_startpoint(self):
if self.start_item:
try:
self.start_item.setBackground(0, qt.QBrush())
except:
pass
self.start_index = None
self.start_item = None
self.repaint()
def cancel_points(self):
self.cancel_startpoint()
self.cancel_breakpoint()
def set_process_data(self, process_runtime_dictionary, put_class):
self.process.runtime_values = process_runtime_dictionary
self.process._put = put_class
class ManifestItem(qt_ui.TreeWidgetItem):
def __init__(self):
self.handle_manifest = False
super(ManifestItem, self).__init__()
self.setSizeHint(0, qt.QtCore.QSize(10, 20))
maya_version = util.get_maya_version()
if maya_version > 2015 or maya_version == 0:
self.status_icon = self._circle_fill_icon(0, 0, 0)
if maya_version < 2016 and maya_version != 0:
self.status_icon = self._radial_fill_icon(0, 0, 0)
self.setCheckState(0, qt.QtCore.Qt.Unchecked)
self.run_state = -1
self.log = ''
def _square_fill_icon(self, r,g,b):
alpha = 1
if r == 0 and g == 0 and b == 0:
alpha = 0
pixmap = qt.QPixmap(20, 20)
pixmap.fill(qt.QColor.fromRgbF(r, g, b, alpha))
painter = qt.QPainter(pixmap)
painter.fillRect(0, 0, 100, 100, qt.QColor.fromRgbF(r, g, b, alpha))
painter.end()
icon = qt.QIcon(pixmap)
self.setIcon(0, icon)
def _circle_fill_icon(self, r,g,b):
alpha = 1
if r == 0 and g == 0 and b == 0:
alpha = 0
pixmap = qt.QPixmap(20, 20)
pixmap.fill(qt.QtCore.Qt.transparent)
#pixmap.fill(qt.QColor.fromRgbF(r, g, b, alpha))
painter = qt.QPainter(pixmap)
painter.setBrush(qt.QColor.fromRgbF(r, g, b, alpha))
painter.setPen(qt.QtCore.Qt.NoPen)
painter.drawEllipse(0,0,20,20)
#painter.fillRect(0, 0, 100, 100, qt.QColor.fromRgbF(r, g, b, alpha))
painter.end()
icon = qt.QIcon(pixmap)
self.setIcon(0, icon)
def _radial_fill_icon(self, r,g,b):
alpha = 1
if r == 0 and g == 0 and b == 0:
alpha = 0
pixmap = qt.QPixmap(20, 20)
pixmap.fill(qt.QtCore.Qt.transparent)
gradient = qt.QRadialGradient(10, 10, 10)
gradient.setColorAt(0, qt.QColor.fromRgbF(r, g, b, alpha))
gradient.setColorAt(1, qt.QColor.fromRgbF(0, 0, 0, 0))
painter = qt.QPainter(pixmap)
painter.fillRect(0, 0, 100, 100, gradient)
painter.end()
icon = qt.QIcon(pixmap)
self.setIcon(0, icon)
def setData(self, column, role, value):
super(ManifestItem, self).setData(column, role, value)
if value == 0:
check_state = qt.QtCore.Qt.Unchecked
if value == 2:
check_state = qt.QtCore.Qt.Checked
if role == qt.QtCore.Qt.CheckStateRole:
if self.handle_manifest:
tree = self.treeWidget()
tree._update_manifest()
if tree.shift_activate:
child_count = self.childCount()
for inc in range(0, child_count):
child = self.child(inc)
child.setCheckState(column, check_state)
children = tree._get_ancestors(child)
for child in children:
child.setCheckState(column, check_state)
def set_state(self, state):
maya_version = util.get_maya_version()
if maya_version < 2016 and maya_version != 0:
if state == 0:
self._radial_fill_icon(1.0, 0.0, 0.0)
if state == 1:
self._radial_fill_icon(0.0, 1.0, 0.0)
if state == -1:
self._radial_fill_icon(0.6, 0.6, 0.6)
if state == 2:
self._radial_fill_icon(1.0, 1.0, 0.0)
if state == 3:
self._radial_fill_icon(.65, .7, 0.225)
if maya_version > 2015 or maya_version == 0:
if state == 0:
self._circle_fill_icon(1.0, 0.0, 0.0)
if state == 1:
self._circle_fill_icon(0.0, 1.0, 0.0)
if state == -1:
self._circle_fill_icon(0, 0, 0)
#self._square_fill_icon(0.6, 0.6, 0.6)
if state == 2:
self._circle_fill_icon(1.0, 1.0, 0.0)
if state == 3:
self._circle_fill_icon(.65, .7, .225)
self.run_state = state
def get_run_state(self):
return self.run_state
def get_state(self):
return self.checkState(0)
def set_text(self, text):
text = ' ' + text
super(ManifestItem, self).setText(0, text)
def get_text(self):
text_value = super(ManifestItem, self).text(0)
return str(text_value).strip()
def text(self, index):
return self.get_text()
def setText(self, index, text):
return self.set_text(text)
def set_log(self, log):
self.log = log
|
context.py
|
"""
Overview:
Utilities for building context variables on thread level.
This is useful when implementing a with-block-based syntax.
For example:
>>> from contextlib import contextmanager
>>> from hbutils.reflection import context
>>>
>>> # developer's view
... @contextmanager
... def use_mul(): # set 'mul' to `True` in its with-block
... with context().vars(mul=True):
... yield
>>>
>>> def calc(a, b): # logic of `calc` will be changed when 'mul' is given
... if context().get('mul', None):
... return a * b
... else:
... return a + b
>>>
>>> # user's view (magic-liked, isn't it?)
... print(calc(3, 5)) # 3 + 5
8
>>> with use_mul():
... print(calc(3, 5)) # changed to 3 * 5
15
>>> print(calc(3, 5)) # back to 3 + 5, again :)
8
"""
import collections.abc
from contextlib import contextmanager
from functools import wraps
from multiprocessing import current_process
from threading import current_thread
from typing import Tuple, TypeVar, Iterator, Mapping, Optional
__all__ = [
'context', 'cwrap',
]
def _get_pid() -> int:
return current_process().pid
def _get_tid() -> int:
return current_thread().ident
def _get_context_id() -> Tuple[int, int]:
return _get_pid(), _get_tid()
_global_contexts = {}
_KeyType = TypeVar('_KeyType', bound=str)
_ValueType = TypeVar('_ValueType')
class ContextVars(collections.abc.Mapping):
"""
Overview:
Context variable management.
.. note::
This class is inherited from :class:`collections.abc.Mapping`.
Main features of mapping object (such as ``__getitem__``, ``__len__``, ``__iter__``) are supported.
See `Collections Abstract Base Classes \
<https://docs.python.org/3/library/collections.abc.html#collections-abstract-base-classes>`_.
.. warning::
This object should be singleton on thread level.
It is not recommended constructing manually.
"""
def __init__(self, **kwargs):
"""
Constructor of :class:`ContextVars`.
:param kwargs: Initial context values.
"""
self._vars = dict(kwargs)
@contextmanager
def _with_vars(self, params: Mapping[_KeyType, _ValueType], clear: bool = False):
# initialize new values
_origin = dict(self._vars)
self._vars.update(params)
if clear:
for key in list(self._vars.keys()):
if key not in params:
del self._vars[key]
try:
yield
finally:
# de-initialize, recover changed values
for k in set(_origin.keys()) | set(self._vars.keys()):
if k not in _origin:
del self._vars[k]
else:
self._vars[k] = _origin[k]
@contextmanager
def vars(self, **kwargs):
"""
Adding variables into context of ``with`` block.
:param kwargs: Additional context variables.
Examples::
>>> from hbutils.reflection import context
>>>
>>> def var_detect():
... if context().get('var', None):
... print(f'Var detected, its value is {context()["var"]}.')
... else:
... print('Var not detected.')
>>>
>>> var_detect()
Var not detected.
>>> with context().vars(var=1):
... var_detect()
Var detected, its value is 1.
>>> var_detect()
Var not detected.
.. note::
See :func:`context`.
"""
with self._with_vars(kwargs, clear=False):
yield
@contextmanager
def inherit(self, context_: 'ContextVars'):
"""
Inherit variables from the given context.
:param context_: :class:`ContextVars` object to inherit from.
.. note::
After :meth:`inherit` is used, **the original variables which not present in the given ``context_`` \
will be removed**. This is different from :meth:`vars`, so attention.
"""
with self._with_vars(context_._vars, clear=True):
yield
def __getitem__(self, key: _KeyType):
return self._vars[key]
def __len__(self) -> int:
return len(self._vars)
def __iter__(self) -> Iterator[_KeyType]:
return self._vars.__iter__()
def context() -> ContextVars:
"""
Overview:
Get context object in this thread.
:return: Context object in this thread.
.. note::
This result is unique on one thread.
"""
_context_id = _get_context_id()
if _context_id not in _global_contexts:
_context = ContextVars()
_global_contexts[_context_id] = _context
return _global_contexts[_context_id]
def cwrap(func, *, context_: Optional[ContextVars] = None, **vars_):
"""
Overview:
Context wrap for functions.
:param func: Original function to wrap.
:param context_: Context for inheriting. Default is ``None`` which means :func:`context`'s result will be used.
:param vars_: External variables after inherit context.
.. note::
:func:`cwrap` is important when you need to pass the current context into thread.
And **it is compitable on all platforms**.
For example:
>>> from threading import Thread
>>> from hbutils.reflection import context, cwrap
>>>
>>> def var_detect():
... if context().get('var', None):
... print(f'Var detected, its value is {context()["var"]}.')
... else:
... print('Var not detected.')
>>>
>>> with context().vars(var=1): # no inherit, vars will be lost in thread
... t = Thread(target=var_detect)
... t.start()
... t.join()
Var not detected.
>>> with context().vars(var=1): # with inherit, vars will be kept in thread
... t = Thread(target=cwrap(var_detect))
... t.start()
... t.join()
Var detected, its value is 1.
.. warning::
:func:`cwrap` **is not compitable on Windows or Python3.8+ on macOS** when creating **new process**.
Please pass in direct arguments by ``args`` argument of :class:`Process`.
If you insist on using :func:`context` feature, you need to pass the context object into the sub process.
For example:
>>> from contextlib import contextmanager
>>> from multiprocessing import Process
>>> from hbutils.reflection import context
>>>
>>> # developer's view
... @contextmanager
... def use_mul(): # set 'mul' to `True` in its with-block
... with context().vars(mul=True):
... yield
>>>
>>> def calc(a, b): # logic of `calc` will be changed when 'mul' is given
... if context().get('mul', None):
... print(a * b)
... else:
... print(a + b)
>>>
>>> def _calc(a, b, ctx=None):
... with context().inherit(ctx or context()):
... return calc(a, b)
>>>
>>> # user's view
... if __name__ == '__main__':
... calc(3, 5) # 3 + 5
... with use_mul():
... p = Process(target=_calc, args=(3, 5, context()))
... p.start()
... p.join()
... calc(3, 5) # back to 3 + 5, again :)
8
15
8
"""
context_ = context_ or context()
@wraps(func)
def _new_func(*args, **kwargs):
with context().inherit(context_):
with context().vars(**vars_):
return func(*args, **kwargs)
return _new_func
|
tello.py
|
# coding=utf-8
import cv2
import time
import socket
import logging
import threading
from threading import Thread
from drone.djitellopy.decorators import accepts
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf
"""
# Send and receive commands, client socket
UDP_IP = '192.168.10.1'
UDP_PORT = 8888
RESPONSE_TIMEOUT = 5 # in seconds
TIME_BTW_COMMANDS = 0.5 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.5 # in seconds
last_received_command = time.time()
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('%(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# use logging.getLogger('djitellopy').setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
STATE_UDP_PORT = 8890
# VideoCapture object
cap = None
background_frame_read = None
stream_on = False
def __init__(self, host='192.168.10.1', port=8889, client_socket=None, enable_exceptions=True, retry_count=3):
self.address = (host, port)
self.response = None
self.response_state = None # to attain the response of the states
self.stream_on = False
self.enable_exceptions = enable_exceptions
self.retry_count = retry_count
if client_socket:
self.clientSocket = client_socket
else:
self.clientSocket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.clientSocket.bind(('', self.UDP_PORT)) # For UDP response (receiving data)
self.stateSocket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.stateSocket.bind(('', self.STATE_UDP_PORT))# for accessing the states of Tello
# Run tello udp receiver on background
thread1 = threading.Thread(target=self.run_udp_receiver, args=())
# Run state reciever on background
thread2 = threading.Thread(target=self.get_states, args=())
thread1.daemon = True
thread2.daemon = True
thread1.start()
thread2.start()
def run_udp_receiver(self):
"""Setup drone UDP receiver. This method listens for responses of Tello. Must be run from a background thread
in order to not block the main thread."""
while True:
try:
self.response, _ = self.clientSocket.recvfrom(1024) # buffer size is 1024 bytes
except Exception as e:
self.LOGGER.error(e)
break
def get_states(self):
"""This runs on background to recieve the state of Tello"""
while True:
try:
self.response_state, _ = self.stateSocket.recvfrom(128)
except Exception as e:
self.LOGGER.error(e)
break
def get_current_state_all(self):
"""Call this function to attain the states of Tello"""
if self.response_state == 'ok':
return False
else:
return self.response_state.decode('ASCII')
def get_pitch(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[1])
except:
print("Exception in pitch occured")
return 0
def get_roll(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[3])
except:
print("Exception in roll occured")
return 0
def get_yaw(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[5])
except:
print("Exception in yaw occured")
return 0
def get_vgx(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[7])
except:
print("Exception in velocity in x occured")
return 0
def get_vgy(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[9])
except:
print("Exception in velocity in y occured")
return 0
def get_vgz(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[11])
except:
print("Exception in velocity in z occured")
return 0
def get_agx(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[27])
except:
print("Exception in acceleration in x")
return 0
def get_agy(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[29])
except:
print("Exception in acceleration in y")
return 0
def get_agz(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[31])
except:
print("Exception in acceleration in z")
return 0
def get_h(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[19])
except:
print("Exception in height")
return 0
def get_bat(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';',':')
response = response.split(':')
try:
return float(response[21])
except:
print("Exception in battery")
return 50
def get_udp_video_address(self):
return 'udp://@' + self.VS_UDP_IP + ':' + str(self.VS_UDP_PORT) # + '?overrun_nonfatal=1&fifo_size=5000'
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self):
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
@accepts(command=str)
def send_command_with_return(self, command):
"""Send command to Tello and wait for its response.
Return:
bool: True for successful, False for unsuccessful
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() * 1000 - self.last_received_command
if diff < self.TIME_BTW_COMMANDS:
time.sleep(diff)
self.LOGGER.info('Send command: ' + command)
timestamp = int(time.time() * 1000)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > self.RESPONSE_TIMEOUT * 1000:
self.LOGGER.warning('Timeout exceed on command ' + command)
return False
response = self.response.decode('utf-8').rstrip("\r\n")
self.LOGGER.info('Response: ' + response)
self.response = None
self.last_received_command = time.time() * 1000
return response
@accepts(command=str)
def send_command_without_return(self, command):
"""Send command to Tello without expecting a response. Use this method when you want to send a command
continuously
- go x y z speed: Tello fly to x y z in speed (cm/s)
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
- curve x1 y1 z1 x2 y2 z2 speed: Tello fly a curve defined by the current and two given coordinates with
speed (cm/s). If the arc radius is not within the range of 0.5-10 meters, it responses false.
x/y/z can’t be between -20 – 20 at the same time .
x1, x2: 20-500
y1, y2: 20-500
z1, z2: 20-500
speed: 10-60
- rc a b c d: Send RC control via four channels.
a: left/right (-100~100)
b: forward/backward (-100~100)
c: up/down (-100~100)
d: yaw (-100~100)
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info('Send command (no expect response): ' + command)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
@accepts(command=str)
def send_control_command(self, command):
"""Send control command to Tello and wait for its response. Possible control commands:
- command: entry SDK mode
- takeoff: Tello auto takeoff
- land: Tello auto land
- streamon: Set video stream on
- streamoff: Set video stream off
- emergency: Stop all motors immediately
- up x: Tello fly up with distance x cm. x: 20-500
- down x: Tello fly down with distance x cm. x: 20-500
- left x: Tello fly left with distance x cm. x: 20-500
- right x: Tello fly right with distance x cm. x: 20-500
- forward x: Tello fly forward with distance x cm. x: 20-500
- back x: Tello fly back with distance x cm. x: 20-500
- cw x: Tello rotate x degree clockwise x: 1-3600
- ccw x: Tello rotate x degree counter- clockwise. x: 1-3600
- flip x: Tello fly flip x
l (left)
r (right)
f (forward)
b (back)
- speed x: set speed to x cm/s. x: 10-100
- wifi ssid pass: Set Wi-Fi with SSID password
Return:
bool: True for successful, False for unsuccessful
"""
for i in range(0, self.retry_count):
response = self.send_command_with_return(command)
if response == 'OK' or response == 'ok':
return True
return self.return_error_on_send_command(command, response, self.enable_exceptions)
@accepts(command=str)
def send_read_command(self, command):
"""Send set command to Tello and wait for its response. Possible set commands:
- speed?: get current speed (cm/s): x: 1-100
- battery?: get current battery percentage: x: 0-100
- time?: get current fly time (s): time
- height?: get height (cm): x: 0-3000
- temp?: get temperature (°C): x: 0-90
- attitude?: get IMU attitude data: pitch roll yaw
- baro?: get barometer value (m): x
- tof?: get distance value from TOF (cm): x: 30-1000
- wifi?: get Wi-Fi SNR: snr
Return:
bool: True for successful, False for unsuccessful
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if ('error' not in response) and ('ERROR' not in response) and ('False' not in response):
if response.isdigit():
return int(response)
else:
try:
return float(response) # isdigit() is False when the number is a float(barometer)
except ValueError:
return response
else:
return self.return_error_on_send_command(command, response, self.enable_exceptions)
@classmethod
def return_error_on_send_command(cl, command, response, enable_exceptions):
"""Returns False and print an informative result code to show unsuccessful response"""
msg = 'Command ' + command + ' was unsuccessful. Message: ' + str(response)
if enable_exceptions:
raise Exception(msg)
else:
cl.LOGGER.error(msg)
return False
def connect(self):
"""Entry SDK mode
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("command")
def takeoff(self):
"""Tello auto takeoff
Returns:
bool: True for successful, False for unsuccessful
False: Unsuccessful
"""
return self.send_control_command("takeoff")
def land(self):
"""Tello auto land
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("land")
def streamon(self):
"""Set video stream on. If the response is 'Unknown command' means you have to update the Tello firmware. That
can be done through the Tello app.
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamon")
if result is True:
self.stream_on = True
return result
def streamoff(self):
"""Set video stream off
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamoff")
if result is True:
self.stream_on = False
return result
def emergency(self):
"""Stop all motors immediately
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("emergency")
@accepts(direction=str, x=int)
def move(self, direction, x):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(direction + ' ' + str(x))
@accepts(x=int)
def move_up(self, x):
"""Tello fly up with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("up", x)
@accepts(x=int)
def move_down(self, x):
"""Tello fly down with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("down", x)
@accepts(x=int)
def move_left(self, x):
"""Tello fly left with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("left", x)
@accepts(x=int)
def move_right(self, x):
"""Tello fly right with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("right", x)
@accepts(x=int)
def move_forward(self, x):
"""Tello fly forward with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("forward", x)
@accepts(x=int)
def move_back(self, x):
"""Tello fly back with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("back", x)
@accepts(x=int)
def rotate_clockwise(self, x):
"""Tello rotate x degree clockwise.
Arguments:
x: 1-360
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("cw " + str(x))
@accepts(x=int)
def rotate_counter_clockwise(self, x):
"""Tello rotate x degree counter-clockwise.
Arguments:
x: 1-360
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("ccw " + str(x))
@accepts(x=str)
def flip(self, direction):
"""Tello fly flip.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("flip " + direction)
def flip_left(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("l")
def flip_right(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("r")
def flip_forward(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("f")
def flip_back(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("b")
@accepts(x=int, y=int, z=int, speed=int)
def go_xyz_speed(self, x, y, z, speed):
"""Tello fly to x y z in speed (cm/s)
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('go %s %s %s %s' % (x, y, z, speed))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int)
def curve_xyz_speed(self, x1, y1, z1, x2, y2, z2, speed):
"""Tello fly a curve defined by the current and two given coordinates with speed (cm/s).
- If the arc radius is not within the range of 0.5-10 meters, it responses false.
- x/y/z can’t be between -20 – 20 at the same time.
Arguments:
x1: 20-500
x2: 20-500
y1: 20-500
y2: 20-500
z1: 20-500
z2: 20-500
speed: 10-60
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return('curve %s %s %s %s %s %s %s' % (x1, y1, z1, x2, y2, z2, speed))
@accepts(x=int, y=int, z=int, speed=int, mid=int)
def go_xyz_speed_mid(self, x, y, z, speed, mid):
"""Tello fly to x y z in speed (cm/s) relative to mission pad iwth id mid
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('go %s %s %s %s m%s' % (x, y, z, speed, mid))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int, mid=int)
def curve_xyz_speed_mid(self, x1, y1, z1, x2, y2, z2, speed, mid):
"""Tello fly to x2 y2 z2 over x1 y1 z1 in speed (cm/s) relative to mission pad with id mid
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('curve %s %s %s %s %s %s %s m%s' % (x1, y1, z1, x2, y2, z2, speed, mid))
@accepts(x=int, y=int, z=int, speed=int, yaw=int, mid1=int, mid2=int)
def go_xyz_speed_yaw_mid(self, x, y, z, speed, yaw, mid1, mid2):
"""Tello fly to x y z in speed (cm/s) relative to mid1
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('jump %s %s %s %s %s m%s m%s' % (x, y, z, speed, yaw, mid1, mid2))
def enable_mission_pads(self):
return self.send_control_command("mon")
def disable_mission_pads(self):
return self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
return self.send_control_command("mdirection " + str(x))
@accepts(x=int)
def set_speed(self, x):
"""Set speed to x cm/s.
Arguments:
x: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("speed " + str(x))
last_rc_control_sent = 0
@accepts(left_right_velocity=int, forward_backward_velocity=int, up_down_velocity=int, yaw_velocity=int)
def send_rc_control(self, left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
Returns:
bool: True for successful, False for unsuccessful
"""
if int(time.time() * 1000) - self.last_rc_control_sent < self.TIME_BTW_RC_CONTROL_COMMANDS:
pass
else:
self.last_rc_control_sent = int(time.time() * 1000)
return self.send_command_without_return('rc %s %s %s %s' % (left_right_velocity, forward_backward_velocity,
up_down_velocity, yaw_velocity))
def set_wifi_credentials(self, ssid, password):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('wifi %s %s' % (ssid, password))
def connect_to_wifi(self, ssid, password):
"""Connects to the Wi-Fi with SSID and password.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('ap %s %s' % (ssid, password))
def get_speed(self):
"""Get current speed (cm/s)
Returns:
False: Unsuccessful
int: 1-100
"""
return self.send_read_command('speed?')
def get_battery(self):
"""Get current battery percentage
Returns:
False: Unsuccessful
int: -100
"""
return self.send_read_command('battery?')
def get_flight_time(self):
"""Get current fly time (s)
Returns:
False: Unsuccessful
int: Seconds elapsed during flight.
"""
return self.send_read_command('time?')
def get_height(self):
"""Get height (cm)
Returns:
False: Unsuccessful
int: 0-3000
"""
return self.send_read_command('height?')
def get_temperature(self):
"""Get temperature (°C)
Returns:
False: Unsuccessful
int: 0-90
"""
return self.send_read_command('temp?')
def get_attitude(self):
"""Get IMU attitude data
Returns:
False: Unsuccessful
int: pitch roll yaw
"""
r = self.send_read_command('attitude?').replace(';', ':').split(':')
return dict(zip(r[::2], [int(i) for i in r[1::2]])) # {'pitch': xxx, 'roll': xxx, 'yaw': xxx}
def get_barometer(self):
"""Get barometer value (m)
Returns:
False: Unsuccessful
int: 0-100
"""
return self.send_read_command('baro?')
def get_distance_tof(self):
"""Get distance value from TOF (cm)
Returns:
False: Unsuccessful
int: 30-1000
"""
return self.send_read_command('tof?')
def get_wifi(self):
"""Get Wi-Fi SNR
Returns:
False: Unsuccessful
str: snr
"""
return self.send_read_command('wifi?')
def get_sdk_version(self):
"""Get SDK Version
Returns:
False: Unsuccessful
str: SDK Version
"""
return self.send_read_command('sdk?')
def get_serial_number(self):
"""Get Serial Number
Returns:
False: Unsuccessful
str: Serial Number
"""
return self.send_read_command('sn?')
def end(self):
"""Call this method when you want to end the tello object"""
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Then, just call backgroundFrameRead.frame to get the
actual one.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
def start(self):
Thread(target=self.update_frame, args=()).start()
return self
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
(self.grabbed, self.frame) = self.cap.read()
def stop(self):
self.stopped = True
|
multiple_dag_devices.py
|
import sys
import os
sys.path.append(os.getcwd())
import pyschedcl as fw
import logging
import argparse
import json
#import sys
import time
import datetime
#import plotly.plotly as py
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import networkx as nx
import csv
import random
import time
import threading
import numpy as np
# logging.basicConfig(level=logging.CRITICAL)
# fw.logging.basicConfig(level=logging.CRITICAL)
# logging.basicConfig(level=logging.DEBUG)
# fw.logging.basicConfig(level=logging.DEBUG)
def parse_arg(args=None):
parser = argparse.ArgumentParser(description='Schedule set of independent OpenCL Task directed acyclic grpah on CPU-GPU heterogeneous multicores')
parser.add_argument('-f', '--file',
help='Input task file containing list of <json filename, partition class, dataset> tuples',
default ='dag_info/dag_transformer/')
parser.add_argument('-ng', '--nGPU',
help='Number of GPUs',
default='1')
parser.add_argument('-nc', '--nCPU',
help='Number of CPUs',
default='1')
parser.add_argument('-l', '--log',
help='Flag for turning on LOG',
action="store_true")
parser.add_argument('-g', '--graph',
help='Flag for plotting GANTT chart for execution',
action="store_true")
parser.add_argument('-df', '--dump_output_file',
help='Flag for dumping output file for a kernel',
action="store_true")
parser.add_argument('-t', '--task',
help='reduce everything to a single task',
action="store_true",
default=False)
parser.add_argument('-ag', '--all_gpu',
help='if --task/-t flag is enabled all kernels are moved to gpu if -ag is on, else cpu',
action="store_true",
default=False)
parser.add_argument('-rc','--recreate_dag',
help = 'recreate the dag json file from dag.graph specification',
action = "store_true",
default=False
)
parser.add_argument('-nchk','--num_chunks',
help = 'number of chunks to split the kernels',
default='1'
)
parser.add_argument('-ce','--check_error',
help = 'print error for some kernels',
action = "store_true",
default = False)
parser.add_argument('-thd','--use_thread',
help = "Use threaded scheduler",
action = "store_true",
default = False)
parser.add_argument('-fdp','--full_dump_path',
help = "Specify Full Dump Path for profiling results",
default='None')
return parser.parse_args(args)
def all_done():
for dag in all_dags:
if not dag.finished():
return False
return True
def random_selector(Q,start_sched):
fw.frontier_Q_lock.acquire()
while ((not Q) and (not all_done())):
fw.frontier_Q_lock.wait()
if all_done():
total_time_in_multiple_dag_devices = time.time()-start_sched
print("\t \t Total Time measured by the scheduler - ",total_time_in_multiple_dag_devices)
fw.frontier_Q_lock.release()
return -1
#fw.frontier_Q_lock.release()
task = Q[0]
del Q[0]
fw.frontier_Q_lock.release()
return task
if __name__ == '__main__':
args = parse_arg(sys.argv[1:])
file_prefix = args.file
if args.recreate_dag:
fw.create_dag("./database/info",file_prefix + "dag.graph",file_prefix + "t1.json", partition=10)
# fw.create_dag("./dag_info/dag_3_gemm/info","./dag_info/dag_3_gemm/dag.graph","./dag_info/dag_3_gemm/t1.json"\
# ,partition=10)
num_chunks = int(args.num_chunks)
fw.just_for_testing_num_chunks = num_chunks
info_file = args.file
print(info_file)
cmd_qs, ctxs, gpus, cpus = fw.host_initialize(int(args.nGPU), int(args.nCPU),use_mul_queues = True)
#Dags_folder = list()
all_dags = [] #list of all the DAGs
finished_task_Dag = dict()
deleted_task_dag = list()
all_dags_jsons = [join(info_file,f) for f in listdir(info_file)] #list of json files - each json file corresponds to a single DAG
gantt_label = [(info_file + f) for f in listdir(info_file)]
gantt = 0
# count = 0
# count1 = 0
# task_dag_id = 0
frontier_Q = fw.frontier_Q
for i,dag_json_file in enumerate(all_dags_jsons):
if dag_json_file.endswith('json'):
logging.debug("main : Reading json file "+ dag_json_file)
with open(dag_json_file,"r") as f:
info = json.loads(f.read())
logging.debug("main : prepraing task dag number "+str(i))
all_dags.append(fw.TaskDAG(info,dag_number = i ,dataset = 1024,map_all=args.task,all_map_to_gpu=args.all_gpu,\
gpus=gpus,cpus=cpus,ctxs=ctxs,cmd_qs=cmd_qs,use_predefined_mapping=True)) #create dag for info file (ex :- dag_test1/t1.json)
logging.debug("main : prepared task dag number "+str(i)+"\n\n")
fw.frontier_Q_lock.acquire()
frontier_Q.extend(all_dags[-1].free_tasks)
for task in all_dags[-1].free_tasks:
task.has_enqueued = True
fw.frontier_Q_lock.release()
logging.debug("printing initial frontier_Q tasks\n\n")
# for i,task in enumerate(frontier_Q):
# logging.debug("task number "+str(i+1)+ " "+ task.id)
# logging.debug("it's free kernels "+str([k.id for k in task.free_kernels]))
# logging.debug("it's all kernels "+str([k.id for k in task.kernels]))
# logging.debug("it's dag id "+str(task.task_dag_object.id))
# logging.debug("it's optm device is "+str(task.optm_device))
#sys.exit(-1)
start_sched = time.time()
while True:
logging.debug("before selection length of frontier_Q : "+str(len(frontier_Q)))
next_task = random_selector(frontier_Q,start_sched)
if next_task == -1:
logging.debug("all dags are finished ")
break
logging.debug("task selected "+str(next_task.id))
optm_device = next_task.optm_device
if int(optm_device) == 10:
optm_device = "gpu"
logging.debug("gpu selected")
elif int(optm_device) == 0:
optm_device = "cpu"
logging.debug("cpu selected")
else:
raise
logging.debug("after selection length of frontier_Q : "+str(len(frontier_Q))+"\n")
fw.rqLock.acquire()
while not (len(fw.ready_queue[optm_device]) > 0):
fw.rqLock.wait()
#now device is free and time to schedule task
logging.debug("current ready queue "+str(fw.ready_queue[optm_device]))
#print(list(fw.ready_queue[optm_device]))
next_task.allocate_devices(list(fw.ready_queue[optm_device]))
logging.debug(str(fw.ready_queue[optm_device]))
fw.rqLock.release()
if args.use_thread:
dispatch_thread = threading.Thread(target=next_task.dispatch_all,args=())
dispatch_thread.start()
else:
next_task.dispatch_all(gpus, cpus, ctxs, cmd_qs)
#next_task.dispatch_all(gpus, cpus, ctxs,cmd_qs)
#fw.rqLock.acquire()
#logging.debug("Number of threads running are "+str(threading.active_count()))
logging.debug("Profiling the execution")
ref = {'cpu' : None, 'gpu' : None}
for dag in all_dags:
for kernel_id,kernel in dag.kernels.items():
#print "\t Kernel ",kernel.name
dev = kernel.task_object.device
for ev in kernel.write_events:
ev.wait()
start_time = ev.profile.START *1e-9
end_time = ev.profile.END *1e-9
if not ref[dev]:
ref[dev] = start_time
else:
ref[dev] = min(ref[dev],start_time)
for ev in kernel.nd_range_event:
ev.wait()
start_time = ev.profile.START*1e-9
if not ref[dev]:
ref[dev] = start_time
else:
ref[dev] = min(ref[dev],start_time)
for ev in kernel.read_events:
ev.wait()
start_time = ev.profile.START *1e-9
end_time = ev.profile.END *1e-9
if not ref[dev]:
ref[dev] = start_time
else:
ref[dev] = min(ref[dev],start_time)
host_st = None
host_en = None
timestamps = {}
for dag in all_dags:
print("dag number : ",dag.id)
for kernel_id,kernel in dag.kernels.items():
timestamps[kernel.name+str(kernel_id)] = {}
dev = kernel.task_object.device
fin = ref[dev]
kernel_timestamps = timestamps[kernel.name+str(kernel_id)]
print("\t Kernel ",kernel.name, " ",kernel.id, " ",dev)
kernel_timestamps["write"] = {"host_queued_start":kernel.host_events[0].write_start,\
"host_queued_end":kernel.host_events[0].write_end,"device_queued":-1,"device_start":-1,"device_end":-1}
kernel_timestamps["device"] = dev
kernel_timestamps["cmdq"] = kernel.dag_device
st = None
for ev in kernel.write_events:
ev.wait()
queue_time = ev.profile.QUEUED*1e-9
start_time = ev.profile.START *1e-9
end_time = ev.profile.END *1e-9
if kernel_timestamps["write"]["device_queued"] == -1:
kernel_timestamps["write"]["device_queued"] = queue_time
else:
kernel_timestamps["write"]["device_queued"] = min(queue_time,kernel_timestamps["write"]["device_queued"])
if kernel_timestamps["write"]["device_start"] == -1:
kernel_timestamps["write"]["device_start"] = start_time
else:
kernel_timestamps["write"]["device_start"] = min(start_time,kernel_timestamps["write"]["device_start"])
if kernel_timestamps["write"]["device_end"] == -1:
kernel_timestamps["write"]["device_end"] = end_time
else:
kernel_timestamps["write"]["device_end"] = max(end_time,kernel_timestamps["write"]["device_end"])
print("\t\t Write event | Start time ",start_time-ref[dev], " | End time ", end_time-ref[dev])
#kernel_timestamps["write"].append([start_time-ref[dev],end_time-ref[dev]])
if st == None:
st = start_time
else:
st = min(st,start_time)
fin = max(fin,end_time)
kernel_timestamps["nd_range"] = {"device_start":-1,"device_end":-1}
# ev = kernel.nd_range_event
for ev in kernel.nd_range_event:
ev.wait()
start_time = ev.profile.START*1e-9
end_time = ev.profile.END*1e-9
print("\t\t ND range | Start time ",start_time-ref[dev], " | End time ", end_time-ref[dev])
#kernel_timestamps["nd_range"].append([start_time-ref[dev],end_time-ref[dev]])
if st==None:
st = start_time
else:
st = min(st,start_time)
fin = max(fin,end_time)
if kernel_timestamps["nd_range"]["device_start"] == -1:
kernel_timestamps["nd_range"]["device_start"] = start_time
else:
kernel_timestamps["nd_range"]["device_start"] = min(start_time,kernel_timestamps["nd_range"]["device_start"])
if kernel_timestamps["nd_range"]["device_end"] == -1:
kernel_timestamps["nd_range"]["device_end"] = end_time
else:
kernel_timestamps["nd_range"]["device_end"] = max(end_time,kernel_timestamps["nd_range"]["device_end"])
kernel_timestamps["read"] = {"device_start":-1,"device_end":-1}
for ev in kernel.read_events:
ev.wait()
start_time = ev.profile.START*1e-9
end_time = ev.profile.END*1e-9
print("\t\t Read event | Start time ",start_time-ref[dev], " | End time ", end_time-ref[dev])
#kernel_timestamps["read"].append([start_time-ref[dev],end_time-ref[dev]])
if st==None:
st = start_time
else:
st = min(st,start_time)
fin = max(fin,end_time)
if kernel_timestamps["read"]["device_start"] == -1:
kernel_timestamps["read"]["device_start"] = start_time
else:
kernel_timestamps["read"]["device_start"] = min(start_time,kernel_timestamps["read"]["device_start"])
if kernel_timestamps["read"]["device_end"] == -1:
kernel_timestamps["read"]["device_end"] = end_time
else:
kernel_timestamps["read"]["device_end"] = max(end_time,kernel_timestamps["read"]["device_end"])
err = 0
if args.check_error:
if kernel.name.endswith("copy"):
if kernel.name[:4] == "coal":
m = kernel.global_work_size[1]
n = kernel.global_work_size[0]
else:
m = kernel.global_work_size[0]
n = kernel.global_work_size[1]
inp = kernel.data["input"][0]
out = kernel.data["output"][0]
inp = inp.reshape(m,n)
out = out.reshape(m,n)
err = np.mean((out-inp)**2)
if kernel.name.endswith("transpose"):
if kernel.name[:4] == "coal":
m = kernel.global_work_size[1]
n = kernel.global_work_size[0]
else:
m = kernel.global_work_size[0]
n = kernel.global_work_size[1]
inp = kernel.data["input"][0]
out = kernel.data["output"][0]
inp = inp.reshape(m,n)
out = out.reshape(n,m)
err = np.mean((out-inp.T)**2)
if "gemm" in kernel.name.lower():
m = kernel.symbolic_variables["m1"]
p = kernel.symbolic_variables["p1"]
n = kernel.symbolic_variables["n1"]
inp1 = kernel.data["input"][0]
inp2 = kernel.data["input"][1]
bias = kernel.data["input"][2]
out = kernel.data["output"][0]
inp1 = inp1.reshape(m,p)
inp2 = inp2.reshape(p,n)
out = out.reshape(m,n)
err = np.mean((inp1.dot(inp2)+bias-out)**2)
#print "\t \t "+str(kernel_timestamps["write"]["host_queued"]-kernel_timestamps["write"]["device_queued"])
# print "\t \t Time taken(measured by device times stamps)", fin-st
# if kernel.host_events[0].read_end and kernel.host_events[0].write_start:
# host_total_time = kernel.host_events[0].read_end-kernel.host_events[0].write_start
# print "\t \t Time Taken (measured by host time stamps) : ",host_total_time
#
#
#
# if host_en == None:
# host_en = kernel.host_events[0].read_end
# else:
# host_en = max(host_en,kernel.host_events[0].read_end)
#
# if host_st == None:
# host_st = kernel.host_events[0].write_start
# else:
# host_st = min(host_st,kernel.host_events[0].write_start)
#
#
# total_host_overhead = host_total_time - (fin-st)
# print "\t \t Measured Host overhead :",total_host_overhead
# print "\t \t Percentage overhead:",total_host_overhead*100/host_total_time
# if args.check_error:
# print "\t \t Error :- ",err
# print "\n"
#
#
# else:
# print "\t \t Host Profiling data not available, continuing..\n"
#
# en = host_en-host_st
#
# print "Total Time as measured by Host read callback threads is ",en
# #print "Total Time as measured by scheduler is ",total_time_in_multiple_dag_devices
#
# #print timestamps
print("\n")
#print json.dumps(timestamps,sort_keys=True,indent=2)
#print timestamps
if args.full_dump_path == "None":
if args.use_thread:
with open("./scheduling/dumps/thread.json","w") as f:
print("saving to thread.json")
s = json.dumps(timestamps)
f.write(s)
# json.dump(timestamps,f)
else:
with open("./scheduling/dumps/non_thread.json","w") as f:
print("saving to non_thread.json")
s = json.dumps(timestamps)
f.write(s)
# json.dump(timestamps,f)
else:
with open(args.full_dump_path,"w") as f:
print("saving to ",args.full_dump_path)
s = json.dumps(timestamps)
f.write(s)
# json.dump(timestamps,f)
time.sleep(2)
|
email.py
|
from threading import Thread
from apps.exts import mail
from flask_mail import Message
from flask import current_app, render_template
def async_send_mail(app,msg):
with app.app_context():
mail.send(message=msg)
# 封装函数 实现邮件发送
def send_mail(to,subject,template,**kwargs):
# 获取当前的实例
app = current_app._get_current_object()
# message对象
msg = Message(subject=subject,recipients=[to],sender=app.config['MAIL_USERNAME'])
# 浏览器打开显示的内容
msg.html = render_template(template+'.html',**kwargs)
# 客户端打开
msg.body = render_template(template+'.txt',**kwargs)
# 创建线程
thr = Thread(target=async_send_mail,args=[app,msg])
thr.start()
return thr
|
master.py
|
# run queue, e.g. start / shutdown / balance
import zmq
import threading
import time
import sys
import os
from threading import Thread
from Queue import Queue
from apscheduler.scheduler import Scheduler
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'upscale', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from upscale.master import balancer
from upscale.utils.rpc import RemoteClient
#from upscale.utils.decorators import periodic_task, every, adecorator, Dec
#from upscale.utils.decorators import periodic_task, every, adecorator, Dec
from upscale import log as logging
LOG = logging.getLogger('upscale.master')
class Tasks(RemoteClient):
pass
class Worker(RemoteClient):
pass
def queue(f):
""" decorator function that will add function to queue instead of executing them directly """
def wrapper(*args, **kwargs):
q.put((f, args, kwargs))
return wrapper
class Master(object):
def __init__(self):
self.scheduler = Scheduler()
self.scheduler.configure({'daemonic': True})
self.scheduler.add_interval_job(self._balance, seconds=60)
self.scheduler.start()
pass
def _balance(self):
def wrapper():
balancer.rebalance()
self.reload_all()
q.put((wrapper, [], {}))
# reconfigure haproxy
def reload_all(self):
from upscale.utils.common import get_hosts
for host in get_hosts():
print ("Reloading host {0}.".format(host.private_dns_name))
with Tasks("tcp://{0}:10000/".format(host.private_dns_name)) as h:
# should run async and wait for all results to finish
h.reload()
# start host
@queue
def start(self, namespace, application):
from upscale.master.balancer import get_containers
print namespace, application,
(hosts, containers) = get_containers()
# also weighted hosts, so one in static host, one on spot instance
min_host = None
for host in containers:
if (not min_host or len(containers[host])<len(containers[min_host])):
# check if it already contains project
min_host_applications = set([(b.split('_')[0], b.split('_')[1]) for b in containers[host] if len(b.split('_'))==3])
if ((namespace, application) in min_host_applications):
continue
min_host=host
if not min_host:
raise Exception('No host available')
print 'Starting on host {0}.'.format(min_host)
# start container on min host
# check minhost
with Worker("tcp://{0}:10000/".format(hosts[min_host])) as h:
#h.start(namespace, application).get(timeout=5)
print ('Starting new container')
h.start(namespace, application)
self.reload_all()
# health checks, does namespace, application exist
#enqueue(wrapper, )
return (True)
@queue
def destroy(self, namespace, website):
# get all containers for project and destroy them
print namespace, application,
(hosts, containers) = get_containers()
for host in containers:
for container in containers[host]:
pass
@queue
def upgrade(self, namespace, website):
# rolling upgrade, first start new instances with new version,
# then shutdown old ones
# get containers and host of old version
# start new containers with new version
# shutdown old versions
pass
def worker():
""" Worker runs a queue of operations on the upscale cluster. """
while True:
(func, args, kwargs) = q.get()
print func
try:
func(*args, **kwargs)
except Exception, e:
print e
logging.exception('Worker')
from upscale.utils.rpc import Server
import time
import sys
import traceback
from upscale.worker.worker import Worker
q = Queue()
t = Thread(target=worker)
t.daemon = True
t.start()
if __name__ == '__main__':
from upscale.worker import tasks
with Server("tcp://0.0.0.0:5867", {'Master': Master()}) as s:
s.run()
|
clean.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from sys import exit
from .initialize import Initialize
from ..libraries.tools import save_setting
from ..libraries.thread_progress import ThreadProgress
from ..libraries.I18n import I18n
_ = I18n
class Clean(Initialize):
def __init__(self):
super(Clean, self).__init__()
global _
_ = I18n().translate
self.nonblock_clean()
def start_cleaning(self):
"""Cleaning
Starts the cleaning command. This command cleand the binary files
in the .pioenvs folder (hidden in unix system)
"""
if(not self.check_main_requirements()):
exit(0)
self.check_board_selected()
if(not self.board_id):
return
envs = self.get_envs_initialized()
if(envs and self.board_id not in envs):
self.derror("init_not_possible")
return
cmd = ['run', '-t', 'clean', '-e ', self.board_id]
self.run_command(cmd)
self.dstop()
def nonblock_clean(self):
"""New Thread Execution
Starts a new thread to run the start_cleaning method
"""
from threading import Thread
thread = Thread(target=self.start_cleaning)
thread.start()
ThreadProgress(thread, _('processing'), '')
|
vpp_papi.py
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import logging
import collections
import struct
import json
import threading
import fnmatch
import weakref
import atexit
from . vpp_serializer import VPPType, VPPEnumType, VPPUnionType, BaseTypes
from . vpp_serializer import VPPMessage, vpp_get_type
from . vpp_format import VPPFormat
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
class VppEnumType(type):
def __getattr__(cls, name):
t = vpp_get_type(name)
return t.enum
# Python3
# class VppEnum(metaclass=VppEnumType):
# pass
class VppEnum:
__metaclass__ = VppEnumType
def vpp_atexit(vpp_weakref):
"""Clean up VPP connection on shutdown."""
vpp_instance = vpp_weakref()
if vpp_instance and vpp_instance.transport.connected:
vpp_instance.logger.debug('Cleaning up VPP on exit')
vpp_instance.disconnect()
def vpp_iterator(d):
if sys.version[0] == '2':
return d.iteritems()
else:
return d.items()
class VppApiDynamicMethodHolder(object):
pass
class FuncWrapper(object):
def __init__(self, func):
self._func = func
self.__name__ = func.__name__
def __call__(self, **kwargs):
return self._func(**kwargs)
class VPP():
"""VPP interface.
This class provides the APIs to VPP. The APIs are loaded
from provided .api.json files and makes functions accordingly.
These functions are documented in the VPP .api files, as they
are dynamically created.
Additionally, VPP can send callback messages; this class
provides a means to register a callback function to receive
these messages in a background thread.
"""
def process_json_file(self, apidef_file):
api = json.load(apidef_file)
types = {}
for t in api['enums']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'enum', 'data': t}
for t in api['unions']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'union', 'data': t}
for t in api['types']:
t[0] = 'vl_api_' + t[0] + '_t'
types[t[0]] = {'type': 'type', 'data': t}
i = 0
while True:
unresolved = {}
for k, v in types.items():
t = v['data']
if not vpp_get_type(t[0]):
if v['type'] == 'enum':
try:
VPPEnumType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'union':
try:
VPPUnionType(t[0], t[1:])
except ValueError:
unresolved[k] = v
elif v['type'] == 'type':
try:
VPPType(t[0], t[1:])
except ValueError:
unresolved[k] = v
if len(unresolved) == 0:
break
if i > 3:
raise ValueError('Unresolved type definitions {}'
.format(unresolved))
types = unresolved
i += 1
for m in api['messages']:
try:
self.messages[m[0]] = VPPMessage(m[0], m[1:])
except NotImplementedError:
self.logger.error('Not implemented error for {}'.format(m[0]))
def __init__(self, apifiles=None, testmode=False, async_thread=True,
logger=None, loglevel=None,
read_timeout=5, use_socket=False,
server_address='/run/vpp-api.sock'):
"""Create a VPP API object.
apifiles is a list of files containing API
descriptions that will be loaded - methods will be
dynamically created reflecting these APIs. If not
provided this will load the API files from VPP's
default install location.
logger, if supplied, is the logging logger object to log to.
loglevel, if supplied, is the log level this logger is set
to report at (from the loglevels in the logging module).
"""
if logger is None:
logger = logging.getLogger(__name__)
if loglevel is not None:
logger.setLevel(loglevel)
self.logger = logger
self.messages = {}
self.id_names = []
self.id_msgdef = []
self.header = VPPType('header', [['u16', 'msgid'],
['u32', 'client_index']])
self.apifiles = []
self.event_callback = None
self.message_queue = queue.Queue()
self.read_timeout = read_timeout
self.async_thread = async_thread
if use_socket:
from . vpp_transport_socket import VppTransport
else:
from . vpp_transport_shmem import VppTransport
if not apifiles:
# Pick up API definitions from default directory
try:
apifiles = self.find_api_files()
except RuntimeError:
# In test mode we don't care that we can't find the API files
if testmode:
apifiles = []
else:
raise
for file in apifiles:
with open(file) as apidef_file:
self.process_json_file(apidef_file)
self.apifiles = apifiles
# Basic sanity check
if len(self.messages) == 0 and not testmode:
raise ValueError(1, 'Missing JSON message definitions')
self.transport = VppTransport(self, read_timeout=read_timeout,
server_address=server_address)
# Make sure we allow VPP to clean up the message rings.
atexit.register(vpp_atexit, weakref.ref(self))
class ContextId(object):
"""Thread-safe provider of unique context IDs."""
def __init__(self):
self.context = 0
self.lock = threading.Lock()
def __call__(self):
"""Get a new unique (or, at least, not recently used) context."""
with self.lock:
self.context += 1
return self.context
get_context = ContextId()
def get_type(self, name):
return vpp_get_type(name)
@classmethod
def find_api_dir(cls):
"""Attempt to find the best directory in which API definition
files may reside. If the value VPP_API_DIR exists in the environment
then it is first on the search list. If we're inside a recognized
location in a VPP source tree (src/scripts and src/vpp-api/python)
then entries from there to the likely locations in build-root are
added. Finally the location used by system packages is added.
:returns: A single directory name, or None if no such directory
could be found.
"""
dirs = []
if 'VPP_API_DIR' in os.environ:
dirs.append(os.environ['VPP_API_DIR'])
# perhaps we're in the 'src/scripts' or 'src/vpp-api/python' dir;
# in which case, plot a course to likely places in the src tree
import __main__ as main
if hasattr(main, '__file__'):
# get the path of the calling script
localdir = os.path.dirname(os.path.realpath(main.__file__))
else:
# use cwd if there is no calling script
localdir = os.getcwd()
localdir_s = localdir.split(os.path.sep)
def dmatch(dir):
"""Match dir against right-hand components of the script dir"""
d = dir.split('/') # param 'dir' assumes a / separator
length = len(d)
return len(localdir_s) > length and localdir_s[-length:] == d
def sdir(srcdir, variant):
"""Build a path from srcdir to the staged API files of
'variant' (typically '' or '_debug')"""
# Since 'core' and 'plugin' files are staged
# in separate directories, we target the parent dir.
return os.path.sep.join((
srcdir,
'build-root',
'install-vpp%s-native' % variant,
'vpp',
'share',
'vpp',
'api',
))
srcdir = None
if dmatch('src/scripts'):
srcdir = os.path.sep.join(localdir_s[:-2])
elif dmatch('src/vpp-api/python'):
srcdir = os.path.sep.join(localdir_s[:-3])
elif dmatch('test'):
# we're apparently running tests
srcdir = os.path.sep.join(localdir_s[:-1])
if srcdir:
# we're in the source tree, try both the debug and release
# variants.
dirs.append(sdir(srcdir, '_debug'))
dirs.append(sdir(srcdir, ''))
# Test for staged copies of the scripts
# For these, since we explicitly know if we're running a debug versus
# release variant, target only the relevant directory
if dmatch('build-root/install-vpp_debug-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, '_debug'))
if dmatch('build-root/install-vpp-native/vpp/bin'):
srcdir = os.path.sep.join(localdir_s[:-4])
dirs.append(sdir(srcdir, ''))
# finally, try the location system packages typically install into
dirs.append(os.path.sep.join(('', 'usr', 'share', 'vpp', 'api')))
# check the directories for existance; first one wins
for dir in dirs:
if os.path.isdir(dir):
return dir
return None
@classmethod
def find_api_files(cls, api_dir=None, patterns='*'):
"""Find API definition files from the given directory tree with the
given pattern. If no directory is given then find_api_dir() is used
to locate one. If no pattern is given then all definition files found
in the directory tree are used.
:param api_dir: A directory tree in which to locate API definition
files; subdirectories are descended into.
If this is None then find_api_dir() is called to discover it.
:param patterns: A list of patterns to use in each visited directory
when looking for files.
This can be a list/tuple object or a comma-separated string of
patterns. Each value in the list will have leading/trialing
whitespace stripped.
The pattern specifies the first part of the filename, '.api.json'
is appended.
The results are de-duplicated, thus overlapping patterns are fine.
If this is None it defaults to '*' meaning "all API files".
:returns: A list of file paths for the API files found.
"""
if api_dir is None:
api_dir = cls.find_api_dir()
if api_dir is None:
raise RuntimeError("api_dir cannot be located")
if isinstance(patterns, list) or isinstance(patterns, tuple):
patterns = [p.strip() + '.api.json' for p in patterns]
else:
patterns = [p.strip() + '.api.json' for p in patterns.split(",")]
api_files = []
for root, dirnames, files in os.walk(api_dir):
# iterate all given patterns and de-dup the result
files = set(sum([fnmatch.filter(files, p) for p in patterns], []))
for filename in files:
api_files.append(os.path.join(root, filename))
return api_files
@property
def api(self):
if not hasattr(self, "_api"):
raise Exception("Not connected, api definitions not available")
return self._api
def make_function(self, msg, i, multipart, do_async):
if (do_async):
def f(**kwargs):
return self._call_vpp_async(i, msg, **kwargs)
else:
def f(**kwargs):
return self._call_vpp(i, msg, multipart, **kwargs)
f.__name__ = str(msg.name)
f.__doc__ = ", ".join(["%s %s" %
(msg.fieldtypes[j], k)
for j, k in enumerate(msg.fields)])
return f
def _register_functions(self, do_async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
self._api = VppApiDynamicMethodHolder()
for name, msg in vpp_iterator(self.messages):
n = name + '_' + msg.crc[2:]
i = self.transport.get_msg_index(n.encode())
if i > 0:
self.id_msgdef[i] = msg
self.id_names[i] = name
# TODO: Fix multipart (use services)
multipart = True if name.find('_dump') > 0 else False
f = self.make_function(msg, i, multipart, do_async)
setattr(self._api, name, FuncWrapper(f))
else:
self.logger.debug(
'No such message type or failed CRC checksum: %s', n)
def connect_internal(self, name, msg_handler, chroot_prefix, rx_qlen,
do_async):
pfx = chroot_prefix.encode() if chroot_prefix else None
rv = self.transport.connect(name.encode(), pfx, msg_handler, rx_qlen)
if rv != 0:
raise IOError(2, 'Connect failed')
self.vpp_dictionary_maxid = self.transport.msg_table_max_index()
self._register_functions(do_async=do_async)
# Initialise control ping
crc = self.messages['control_ping'].crc
self.control_ping_index = self.transport.get_msg_index(
('control_ping' + '_' + crc[2:]).encode())
self.control_ping_msgdef = self.messages['control_ping']
if self.async_thread:
self.event_thread = threading.Thread(
target=self.thread_msg_handler)
self.event_thread.daemon = True
self.event_thread.start()
return rv
def connect(self, name, chroot_prefix=None, do_async=False, rx_qlen=32):
"""Attach to VPP.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
do_async - if true, messages are sent without waiting for a reply
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
msg_handler = self.transport.get_callback(do_async)
return self.connect_internal(name, msg_handler, chroot_prefix, rx_qlen,
do_async)
def connect_sync(self, name, chroot_prefix=None, rx_qlen=32):
"""Attach to VPP in synchronous mode. Application must poll for events.
name - the name of the client.
chroot_prefix - if VPP is chroot'ed, the prefix of the jail
rx_qlen - the length of the VPP message receive queue between
client and server.
"""
return self.connect_internal(name, None, chroot_prefix, rx_qlen,
do_async=False)
def disconnect(self):
"""Detach from VPP."""
rv = self.transport.disconnect()
self.message_queue.put("terminate event thread")
return rv
def msg_handler_sync(self, msg):
"""Process an incoming message from VPP in sync mode.
The message may be a reply or it may be an async notification.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
# If we have a context, then use the context to find any
# request waiting for a reply
context = 0
if hasattr(r, 'context') and r.context > 0:
context = r.context
if context == 0:
# No context -> async notification that we feed to the callback
self.message_queue.put_nowait(r)
else:
raise IOError(2, 'RPC reply message received in event handler')
def decode_incoming_msg(self, msg):
if not msg:
self.logger.warning('vpp_api.read failed')
return
(i, ci), size = self.header.unpack(msg, 0)
if self.id_names[i] == 'rx_thread_exit':
return
#
# Decode message and returns a tuple.
#
msgobj = self.id_msgdef[i]
if not msgobj:
raise IOError(2, 'Reply message undefined')
r, size = msgobj.unpack(msg)
return r
def msg_handler_async(self, msg):
"""Process a message from VPP in async mode.
In async mode, all messages are returned to the callback.
"""
r = self.decode_incoming_msg(msg)
if r is None:
return
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
def _control_ping(self, context):
"""Send a ping command."""
self._call_vpp_async(self.control_ping_index,
self.control_ping_msgdef,
context=context)
def validate_args(self, msg, kwargs):
d = set(kwargs.keys()) - set(msg.field_by_name.keys())
if d:
raise ValueError('Invalid argument {} to {}'
.format(list(d), msg.name))
def _call_vpp(self, i, msg, multipart, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
multipart - True if the message returns multiple
messages in return.
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
The return value is the message or message array containing
the response. It will raise an IOError exception if there was
no response within the timeout window.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
kwargs['_vl_msg_id'] = i
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
pass
self.validate_args(msg, kwargs)
b = msg.pack(kwargs)
self.transport.suspend()
self.transport.write(b)
if multipart:
# Send a ping after the request - we use its response
# to detect that we have seen all results.
self._control_ping(context)
# Block until we get a reply.
rl = []
while (True):
msg = self.transport.read()
if not msg:
raise IOError(2, 'VPP API client: read failed')
r = self.decode_incoming_msg(msg)
msgname = type(r).__name__
if context not in r or r.context == 0 or context != r.context:
# Message being queued
self.message_queue.put_nowait(r)
continue
if not multipart:
rl = r
break
if msgname == 'control_ping_reply':
break
rl.append(r)
self.transport.resume()
return rl
def _call_vpp_async(self, i, msg, **kwargs):
"""Given a message, send the message and await a reply.
msgdef - the message packing definition
i - the message type index
context - context number - chosen at random if not
supplied.
The remainder of the kwargs are the arguments to the API call.
"""
if 'context' not in kwargs:
context = self.get_context()
kwargs['context'] = context
else:
context = kwargs['context']
try:
if self.transport.socket_index:
kwargs['client_index'] = self.transport.socket_index
except AttributeError:
kwargs['client_index'] = 0
kwargs['_vl_msg_id'] = i
b = msg.pack(kwargs)
self.transport.write(b)
def register_event_callback(self, callback):
"""Register a callback for async messages.
This will be called for async notifications in sync mode,
and all messages in async mode. In sync mode, replies to
requests will not come here.
callback is a fn(msg_type_name, msg_type) that will be
called when a message comes in. While this function is
executing, note that (a) you are in a background thread and
may wish to use threading.Lock to protect your datastructures,
and (b) message processing from VPP will stop (so if you take
a long while about it you may provoke reply timeouts or cause
VPP to fill the RX buffer). Passing None will disable the
callback.
"""
self.event_callback = callback
def thread_msg_handler(self):
"""Python thread calling the user registered message handler.
This is to emulate the old style event callback scheme. Modern
clients should provide their own thread to poll the event
queue.
"""
while True:
r = self.message_queue.get()
if r == "terminate event thread":
break
msgname = type(r).__name__
if self.event_callback:
self.event_callback(msgname, r)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
video.py
|
#!/usr/bin/python3
# part of https://github.com/WolfgangFahl/play-chess-with-a-webcam
import cv2
import numpy as np
import math
from time import strftime
from pcwawc.environment import Environment
from pcwawc.fpscheck import FPSCheck
from imutils import perspective
import argparse
from threading import Thread
import os
import sys
import threading
class Video:
""" Video handling e.g. recording/writing """
@staticmethod
def getVideo():
video=Video()
video.headless=Environment.inContinuousIntegration()
return video
# construct me with no parameters
def __init__(self,title="frame"):
self.title=title
self.cap = None
self.frames = 0
self.ispaused = False
# current Frame
self.frame = None
self.processedFrame=None
self.maxFrames=sys.maxsize
# still image ass video feature for jpg
self.autoPause=False
self.fpsCheck = None
self.debug=False
self.headless=False
pass
# check whether s is an int
@staticmethod
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
@staticmethod
def title(device):
if not Video.is_int(device):
deviceTitle=os.path.basename(device)
else:
deviceTitle="camera %s" % (device)
return deviceTitle
# return if video is paused
def paused(self):
return self.ispaused
# pause the video
def pause(self, ispaused):
self.ispaused = ispaused
# capture from the given device
def capture(self, device):
if Video.is_int(device):
self.device = int(device)
else:
self.device = device
self.open(device)
if device.endswith(".jpg"):
self.maxFrames=1
self.autoPause=True
self.setup(cv2.VideoCapture(self.device))
def setup(self, cap):
""" setup the capturing from the given device """
self.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = int(cap.get(cv2.CAP_PROP_FPS))
self.cap = cap
self.fpsCheck = FPSCheck()
self.fpsCheck.start()
def checkFilePath(self, filePath, raiseException=True):
ok = os.path.exists(filePath)
if raiseException and not ok:
raise Exception("file %s does not exist" % (filePath))
return ok
# capture from the given video filePath
def open(self, filePath):
self.checkFilePath(filePath)
self.setup(cv2.VideoCapture(filePath))
def showImage(self, image, title:str, keyCheck:bool=True, keyWait:int=5):
'''
show the image with the given title
Args:
image: the image to show
title(str): the title of the image
keyCheck(bool): wait for a a key stroke before continuing?
keyWait(int): maximum number of seconds to wait for a key stroke
'''
if not threading.current_thread() is threading.main_thread():
if self.debug:
print ("can't show image %s since not on mainthread" % (title))
return True
if self.headless:
return True
cv2.imshow(title, image)
if keyCheck:
return not cv2.waitKey(keyWait) & 0xFF == ord('q')
else:
return True
def showAndWriteImage(self,image,title,path="/tmp/",imageFormat=".jpg",keyCheck=True,keyWait=5):
result=self.showImage(image, title, keyCheck, keyWait)
if image is not None:
cv2.imshow(title,image)
cv2.imwrite(path+title+imageFormat,image)
return result
# encode the image
def imencode(self, frame, imgformat=".jpg"):
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(imgformat, frame)
return flag, encodedImage
# return a video frame as a jpg image
def readJpgImage(self, show=False, postProcess=None):
ret, frame, quitWanted = self.readFrame(show, postProcess)
encodedImage = None
# ensure the frame was read
if ret:
(flag, encodedImage) = self.imencode(frame)
# ensure the frame was successfully encoded
if not flag:
ret = False
return ret, encodedImage, quitWanted
# return a video frame as a numpy array
def readFrame(self, show=False, postProcess=None):
# when pausing repeat previous frame
if self.ispaused:
# simply return the current frame again
ret = self.frame is not None
else:
ret, self.frame = self.cap.read()
quitWanted = False
if ret == True:
if not self.ispaused:
self.frames = self.frames + 1
if self.frames>=self.maxFrames and self.autoPause:
self.ispaused=True
self.fpsCheck.update()
if not postProcess is None:
try:
self.processedFrame= postProcess(self.frame)
except BaseException as e:
# @TODO log exception
print ("processing error "+str(e))
self.processedFrame=self.frame
else:
self.processedFrame=self.frame
if show:
quitWanted = not self.showImage(self.frame, self.title)
return ret, self.processedFrame, quitWanted
# play the given capture
def play(self):
while(self.cap.isOpened()):
ret, frame, quitWanted = self.readFrame(True)
if ret == True:
if quitWanted:
break
if frame is None:
# TODO decide whether to log a warning here
pass
else:
break
self.close()
def fileTimeStamp(self):
return self.timeStamp(separator='_', timeseparator='')
def timeStamp(self, separator=' ', timeseparator=':'):
return strftime("%Y-%m-%d" + separator + "%H" + timeseparator + "%M" + timeseparator + "%S")
def close(self):
if self.cap is not None:
self.cap.release()
cv2.destroyAllWindows()
def checkCap(self):
if self.cap is None:
raise "Capture is not initialized"
# get a still image
def still(self, prefix, imgformat="jpg", close=True, printHints=True, show=False, postProcess=None):
filename = "%s%s.%s" % (prefix, self.fileTimeStamp(), imgformat)
return self.still2File(filename, format=format, close=close, printHints=printHints, show=show, postProcess=postProcess)
# get a still image
def still2File(self, filename, format="jpg", close=True, printHints=True, show=False, postProcess=None):
self.checkCap()
ret = False
frame = None
if (self.cap.isOpened()):
ret, frame, quitWanted = self.readFrame(show, postProcess)
if ret == True:
if printHints:
print("capture %s with %dx%d" % (
filename, self.width, self.height))
self.writeImage(frame,filename)
if close:
self.close()
return ret, frame
# read an image
def readImage(self, filePath):
self.checkFilePath(filePath)
image = cv2.imread(filePath, 1)
return image
def writeImage(self,image,filepath):
cv2.imwrite(filepath, image)
def prepareRecording(self,filename,width,height,fps=None):
self.checkCap()
if fps is None:
fps=self.fps
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(filename, fourcc, fps,
(width, height))
return out
# record the capture to a file with the given prefix using a timestamp
def record(self, prefix, printHints=True, fps=None):
filename = "%s%s.avi" % (prefix, self.timeStamp())
out=self.prepareRecording(filename,self.width,self.height,fps)
if printHints:
print("recording %s with %dx%d at %d fps press q to stop recording" % (
filename, self.width, self.height, self.fps))
while(self.cap.isOpened()):
ret, frame, quitWanted = self.readFrame(True)
if ret == True:
# flip the frame
# frame = cv2.flip(frame,0)
if quitWanted:
break
# write the frame
out.write(frame)
else:
break
# Release everything if job is finished
self.close()
out.release()
cv2.destroyAllWindows()
if printHints:
print("finished")
# https://stackoverflow.com/a/22921648/1497139
def createBlank(self, width, height, rgb_color=(0, 0, 0)):
"""Create new image(numpy array) filled with certain color in RGB"""
# Create black blank image
image = self.getEmptyImage4WidthAndHeight(width, height, 3)
# Since OpenCV uses BGR, convert the color first
color = tuple(reversed(rgb_color))
# Fill image with color
image[:] = color
return image
def getEmptyImage4WidthAndHeight(self,w,h,channels):
""" get an empty image with the given width height and channels"""
emptyImage = np.zeros((h,w,channels), np.uint8)
return emptyImage
def getEmptyImage(self,image,channels=1):
""" prepare a trapezoid/polygon mask to focus on the square chess field seen as a trapezoid"""
h, w = image.shape[:2]
emptyImage=self.getEmptyImage4WidthAndHeight(w, h, channels)
return emptyImage
def maskImage(self,image,mask):
""" return the masked image that filters with the given mask"""
masked=cv2.bitwise_and(image,image,mask=mask)
return masked
# was: http://www.robindavid.fr/opencv-tutorial/chapter5-line-edge-and-contours-detection.html
# is: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# https://docs.opencv.org/3.4/d9/db0/tutorial_hough_lines.html
def houghTransform(self, image):
"""Performs an Hough Transform to given image.
Returns: lines"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
return lines
def houghTransformP(self, image):
"""Performs a probabilistic Hough Transform to given image.
Returns: lines"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
h, w = image.shape[:2]
minLineLength = h / 16
maxLineGap = h / 24
lines = cv2.HoughLinesP(edges, 1, np.pi / 180,
100, minLineLength, maxLineGap)
return lines
def drawTrapezoid(self, image, points, color):
""" loop over the given points and draw them on the image """
if points is None:
return
prev = None
# if there is exactly four points then close the loop
if len(points)==4:
points.append(points[0])
for (x, y) in points:
cv2.circle(image, (x, y), 10, color, -1)
if prev is not None:
cv2.line(image, (x, y), prev, color, 3, cv2.LINE_AA)
prev = (x, y)
def drawCircle(self, image, center, radius=10, color=(0, 255, 0), thickness=1):
cv2.circle(image, center, radius, color=color, thickness=thickness)
def drawRectangle(self, image, pt1, pt2, color=(0, 255, 0), thickness=1):
cv2.rectangle(image, pt1, pt2, color, thickness)
def drawPolygon(self,image,polygon,color):
""" draw the given polygon onto the given image with the given color"""
cv2.fillConvexPoly(image,polygon,color)
# https://docs.opencv.org/4.1.2/d9/db0/tutorial_hough_lines.html
def drawLines(self, image, lines):
height, width = image.shape[:2]
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + width * (-b)), int(y0 + height * (a)))
pt2 = (int(x0 - width * (-b)), int(y0 - height * (a)))
cv2.line(image, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
def rotate(self, image, angle, center=None, scale=1.0):
# grab the dimensions of the image
(h, w) = image.shape[:2]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w // 2, h // 2)
# perform the rotation (clockwise)
M = cv2.getRotationMatrix2D(center, -angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return the rotated image
return rotated
def warp(self, image, pts, squared=True):
"""apply the four point transform to obtain a birds eye view of the given image """
warped = perspective.four_point_transform(image, pts)
if squared:
height, width = warped.shape[:2]
side = min(width, height)
warped = cv2.resize(warped, (side, side))
return warped
def as2x2(self,row1col1,row1col2,row2col1,row2col2,downScale=2):
height, width = row1col1.shape[:2]
image1,image2,image3,image4=row1col1,row1col2,row2col1,row2col2
if downScale>1:
image1=cv2.resize(image1,(width//downScale,height//downScale))
image2=cv2.resize(image2,(width//downScale,height//downScale))
image3=cv2.resize(image3,(width//downScale,height//downScale))
image4=cv2.resize(image4,(width//downScale,height//downScale))
combined1=np.concatenate((image1,image2),axis=0)
combined2=np.concatenate((image3,image4),axis=0)
combined=np.concatenate((combined1,combined2),axis=1)
return combined
@staticmethod
def getSubRect(image, rect):
x, y, w, h = rect
return image[y:y + h, x:x + w]
# get the intensity sum of a hsv image
def sumIntensity(self, image):
h, s, v = cv2.split(image)
height, width = image.shape[:2]
sumResult = np.sum(v)
return sumResult
# add a timeStamp to the given frame fontScale 1.0
def addTimeStamp(self, frame, withFrames=True, withFPS=True, fontBGRColor=(0, 255, 0), fontScale=1.0, font=cv2.FONT_HERSHEY_SIMPLEX, lineThickness=1):
if frame is not None:
height, width = frame.shape[:2]
# grab the current time stamp and draw it on the frame
now = self.timeStamp()
if withFrames:
now = now + " %d" % (self.frames)
if withFPS and self.fpsCheck is not None:
now = now + "@%.0f fps" % (self.fpsCheck.fps())
fontFactor = width / 960
text_width, text_height = cv2.getTextSize(
now, font, fontScale * fontFactor, lineThickness)[0]
# https://stackoverflow.com/a/34273603/1497139
# frame = frame.copy()
self.drawText(frame, now, (width - int(text_width * 1.1), int(text_height * 1.2)),
font, fontScale * fontFactor, fontBGRColor, lineThickness)
return frame
def drawCenteredText(self,frame,text,x,y,fontBGRColor=(0, 255, 0), fontScale=1.0, font=cv2.FONT_HERSHEY_SIMPLEX, lineThickness=1):
height, width = frame.shape[:2]
fontFactor=width/960
text_width, text_height = cv2.getTextSize(
text, font, fontScale * fontFactor, lineThickness)[0]
self.drawText(frame,text,(x-text_width//2,y+text_height//2),font,fontScale*fontFactor,fontBGRColor,lineThickness)
def drawText(self,frame,text,bottomLeftCornerOfText, font, fontScale,fontBGRColor,lineThickness):
cv2.putText(frame,text, bottomLeftCornerOfText, font, fontScale,fontBGRColor,lineThickness)
# see https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class VideoStream(object):
"""run videograbbing in separate stream"""
def __init__(self, video, show=False, postProcess=None, name='VideoStream'):
self.video = video
self.show = show
self.quit = False
self.frame = None
# initialize the thread name
self.name = name
self.postProcess = postProcess
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
ret, frame, quitWanted = video.readFrame(self.show, self.postProcess)
if quitWanted:
return
if ret:
self.frame = frame
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Video')
parser.add_argument('--record',
action='store_true',
help="record a video from the given input")
parser.add_argument('--still',
action='store_true',
help="record a still image from the given input")
parser.add_argument('--input',
type=int,
default=0,
help="Manually set the input device.")
args = parser.parse_args()
# record a video from the first capture device
video = Video()
video.capture(args.input)
if args.record:
video.record("chessVideo")
if args.still:
video.still("chessImage")
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Gthpcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gthpcoind shutdown."""
from test_framework.test_framework import GthpcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(GthpcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
road_speed_limiter.py
|
import json
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import interp, clip
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
CAMERA_SPEED_FACTOR = 1.05
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
#gps = Thread(target=self.gps_thread, args=[])
#gps.setDaemon(True)
#gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.start_dist = 0
self.longcontrol = Params().get_bool('LongControlEnabled')
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, cluster_speed, is_metric):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
camSpeedFactor = clip(self.roadLimitSpeed.camSpeedFactor, 1.0, 1.1)
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
# log = "RECV: " + str(is_highway)
# log += ", " + str(cam_limit_speed)
# log += ", " + str(cam_limit_speed_left_dist)
# log += ", " + str(section_limit_speed)
# log += ", " + str(section_left_dist)
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
v_ego = cluster_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
v_limit = cam_limit_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
diff_speed = cluster_speed - cam_limit_speed
v_diff = v_ego - v_limit
if self.longcontrol:
sec = interp(v_diff, [2.7, 8.3], [15., 20.])
else:
sec = interp(v_diff, [2.7, 8.3], [17., 23.])
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < v_ego * sec):
if not self.slowing_down:
self.start_dist = cam_limit_speed_left_dist * 1.2
self.slowing_down = True
first_started = True
else:
first_started = False
base = self.start_dist / 1.2 * 0.65
td = self.start_dist - base
d = cam_limit_speed_left_dist - base
if d > 0 and td > 0. and diff_speed > 0 and (section_left_dist is None or section_left_dist < 10):
pp = d / td
else:
pp = 0
return cam_limit_speed * camSpeedFactor + int(
pp * diff_speed), cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * camSpeedFactor, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(cluster_speed, is_metric):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(cluster_speed, is_metric)
if __name__ == "__main__":
main()
|
train.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import functools
import importlib
import multiprocessing as mp
import os
import sys
import time
from tabulate import tabulate
import numpy as np
import megengine as mge
from megengine import distributed as dist
from megengine import jit
from megengine import optimizer as optim
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from official.vision.detection.tools.data_mapper import data_mapper
from official.vision.detection.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler
)
logger = mge.get_logger(__name__)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--ngpus", default=-1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument("--enable_sublinear", action="store_true")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
valid_nr_dev = mge.get_device_count("gpu")
if args.ngpus == -1:
world_size = valid_nr_dev
else:
if args.ngpus > valid_nr_dev:
logger.error("do not have enough gpus for training")
sys.exit(1)
else:
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
log_dir = "log-of-{}".format(os.path.basename(args.file).split(".")[0])
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if world_size > 1:
mp.set_start_method("spawn")
processes = list()
for i in range(world_size):
process = mp.Process(target=worker, args=(i, world_size, args))
process.start()
processes.append(process)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group for gpu%d done", rank)
sys.path.insert(0, os.path.dirname(args.file))
current_network = importlib.import_module(os.path.basename(args.file).split(".")[0])
model = current_network.Net(current_network.Cfg(), batch_size=args.batch_size)
params = model.parameters(requires_grad=True)
model.train()
if rank == 0:
logger.info(get_config_info(model.cfg))
opt = optim.SGD(
params,
lr=model.cfg.basic_lr * world_size * model.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights)
if rank == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(model.batch_size, args.dataset_dir, model.cfg))
for epoch_id in range(model.cfg.max_epoch):
for param_group in opt.param_groups:
param_group["lr"] = (
model.cfg.basic_lr
* world_size
* model.batch_size
* (
model.cfg.lr_decay_rate
** bisect.bisect_right(model.cfg.lr_decay_stages, epoch_id)
)
)
tot_steps = model.cfg.nr_images_epoch // (model.batch_size * world_size)
train_one_epoch(
model,
train_loader,
opt,
tot_steps,
rank,
epoch_id,
world_size,
args.enable_sublinear,
)
if rank == 0:
save_path = "log-of-{}/epoch_{}.pkl".format(
os.path.basename(args.file).split(".")[0], epoch_id
)
mge.save(
{"epoch": epoch_id, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(
model,
data_queue,
opt,
tot_steps,
rank,
epoch_id,
world_size,
enable_sublinear=False,
):
sublinear_cfg = jit.SublinearMemoryConfig() if enable_sublinear else None
@jit.trace(symbolic=True, sublinear_memory_config=sublinear_cfg)
def propagate():
loss_dict = model(model.inputs)
opt.backward(loss_dict["total_loss"])
losses = list(loss_dict.values())
return losses
meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
for step in range(tot_steps):
adjust_learning_rate(opt, epoch_id, step, model, world_size)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
model.inputs["image"].set_value(mini_batch["data"])
model.inputs["gt_boxes"].set_value(mini_batch["gt_boxes"])
model.inputs["im_info"].set_value(mini_batch["im_info"])
tik = time.time()
opt.zero_grad()
loss_list = propagate()
opt.step()
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if rank == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
average_loss = meter.average()
logger.info(
log_info_str,
epoch_id,
step,
tot_steps,
opt.param_groups[0]["lr"],
*average_loss,
*time_meter.average()
)
meter.reset()
time_meter.reset()
def get_config_info(config):
config_table = []
for c, v in config.__dict__.items():
if not isinstance(v, (int, float, str, list, tuple, dict, np.ndarray)):
if hasattr(v, "__name__"):
v = v.__name__
elif hasattr(v, "__class__"):
v = v.__class__
elif isinstance(v, functools.partial):
v = v.func.__name__
config_table.append((str(c), str(v)))
config_table = tabulate(config_table)
return config_table
def adjust_learning_rate(optimizer, epoch_id, step, model, world_size):
base_lr = (
model.cfg.basic_lr
* world_size
* model.batch_size
* (
model.cfg.lr_decay_rate
** bisect.bisect_right(model.cfg.lr_decay_stages, epoch_id)
)
)
# Warm up
if epoch_id == 0 and step < model.cfg.warm_iters:
lr_factor = (step + 1.0) / model.cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def build_dataset(data_dir, cfg):
data_cfg = copy.deepcopy(cfg.train_dataset)
data_name = data_cfg.pop("name")
data_cfg["root"] = os.path.join(data_dir, data_name, data_cfg["root"])
if "ann_file" in data_cfg:
data_cfg["ann_file"] = os.path.join(data_dir, data_name, data_cfg["ann_file"])
data_cfg["order"] = ["image", "boxes", "boxes_category", "info"]
return data_mapper[data_name](**data_cfg)
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, data_dir, cfg):
train_dataset = build_dataset(data_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
T.ToMode(),
],
order=["image", "boxes", "boxes_category"],
),
collator=DetectionPadCollator(),
num_workers=2,
)
return train_dataloader
if __name__ == "__main__":
main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum import keystore, simple_config
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum import constants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet, AddTransactionException
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBTC'
if self.decimal_point == 8:
return 'BTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = displayed_feerate // 1000
else:
# fallback to actual fee
displayed_feerate = fee // size if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = displayed_feerate * size if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = displayed_fee // size if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(feerounding)
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(bool(feerounding))
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTC', 'mBTC', 'bits']
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTC':
self.decimal_point = 8
elif unit_result == 'mBTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
try:
if not self.wallet.add_transaction(tx.txid(), tx):
self.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
self.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
self.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), _("Transaction added to wallet history"))
return True
|
alert_saq.py
|
#!/usr/bin/env python3
# vim: ts=3:sw=3:et
import os
import sys
import traceback
import re
import csv
import time
import logging
import logging.config
import argparse
import datetime
import configparser
import threading
import smtplib
from saq.constants import *
from saq.client import Alert
import csv
from splunklib import SplunkQueryObject
from master_search_file import get_search_string
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--search', required=False, default=None, dest='search',
help="The splunk search to execute.")
parser.add_argument('-g', '--group-by', required=False, default=None, dest='group_by',
help="Group results by the given field name.")
parser.add_argument('-c', '--config', required=False, default='splunk_detect.cfg', dest='config',
help="Configuration file to use.")
parser.add_argument('--log-config', required=False, default='splunk_detect_logging.cfg', dest='log_config',
help="Use the given configuration file to configure logging.")
parser.add_argument('--cron', required=False, default=False, action='store_true', dest='cron_mode',
help="Execute in cron mode, where we run whatever searches are scheduled at this minute.")
parser.add_argument('searches', nargs="*", default=[],
help="Execute the given searches by name. Partial string matching is OK.")
parser.add_argument('--csv', required=False, default='Splunk_Search_Master.csv', dest='csv_path',
help="Path to the CSV file to execute. Defaults to Splunk_Search_Master.csv")
parser.add_argument('--earliest', required=False, default=None, dest='earliest',
help="Override earliest parameter to splunk search. Does not apply to --cron mode. Format is MM/DD/YYYY:hh:mm:ss (or short form)")
parser.add_argument('--latest', required=False, default=None, dest='latest',
help="Override latest parameter to splunk search. Does not apply to --cron mode. Format is MM/DD/YYYY:hh:mm:ss (or short form)")
args = parser.parse_args()
print(args)
try:
# configure logging
logging.config.fileConfig(args.log_config)
except Exception as e:
sys.stderr.write("unable to load logging configuration file {0}: {1}".format(
args.log_config, str(e)))
sys.exit(1)
# load splunk_detect configuration
config = configparser.ConfigParser()
config.read(args.config)
# clear proxy settings
for setting in [ 'http_proxy', 'https_proxy' ]:
if setting in os.environ:
logging.warning("clearing proxy environment variable {0}".format(setting))
del os.environ[setting]
search_queue = [] # list of (search, group_by) tuples to execute in parallel
# are we running in cron mode?
if args.cron_mode:
# get the current minute
current_minute = str(datetime.datetime.now().minute)
# open the csv file and find searches that are scheduled for this minute
reader = csv.DictReader(open(args.csv_path, 'r'))
for row in reader:
# XXX all the columns have <TITLE> but the other functions doesn't expect them
for key in list(row.keys()):
row[key[1:-1]] = row[key]
if row['Schedule'] == current_minute:
search_string = get_search_string(row)
group_by = None
if row['Group_By'] != '':
group_by = row['Group_By']
logging.info(search_string)
search_queue.append((search_string, group_by))
# select searches by name?
elif len(args.searches) > 0:
# open the csv file and find searches that have names that match
reader = csv.DictReader(open(args.csv_path, 'r'))
for row in reader:
# XXX all the columns have <TITLE> but the other functions doesn't expect them
for key in list(row.keys()):
row[key[1:-1]] = row[key]
# allow time spec override
if args.earliest is not None:
if args.latest is None:
args.latest='now'
row['Earliest_Latest'] = ' _index_earliest={0} _index_latest={1}'.format(args.earliest, args.latest)
for search in args.searches:
if search.lower() in row['Saved_Search_Name'].lower():
search_string = get_search_string(row)
group_by = None
if row['Group_By'] != '':
group_by = row['Group_By']
search_queue.append((search_string, group_by))
# manual search?
elif args.search:
search_queue.append((args.search, args.group_by))
else:
logging.fatal("you must specifiy --cron, -s (and -g), or search names to execute")
sys.exit(1)
def handle_search_failure(search, group_by, exception=None):
if not config.getboolean('smtp', 'enabled'):
return
header = '\r\n'.join([
'From: splunk_detect@localhost',
'To: {0}'.format(config.get('smtp', 'recipients')),
'Subject: Splunk Search Failure'])
message = '{0}\r\n\r\n{1}'.format(
header, "The following splunk search failed.\r\n\r\n{0}".format(
search))
if exception is not None:
message += "\r\n\r\nThe following exception was thrown.\r\n\r\n{0}".format(
traceback.format_exc())
else:
message += "\r\n\r\nThe splunk server returned an HTTP error code."
try:
server = smtplib.SMTP(config.get('smtp', 'server'))
server.set_debuglevel(1)
logging.warning("sending email to {0}".format(config.get('smtp', 'recipients')))
server.sendmail('splunk_detect@localhost', config.get('smtp', 'recipients').split(','), message)
server.quit()
except Exception as e:
logging.error("unable to send email: {0}".format(str(e)))
def execute_search_wrapper(search, group_by):
try:
execute_search(search, group_by)
except Exception as e:
logging.error("caught exception {0}".format(str(e)))
handle_search_failure(search, group_by, e)
traceback.print_exc()
def indicator_lookup(row):
ltable_file = config.get('splunk', 'lookuptablefile')
csvfile = csv.DictReader(open(ltable_file))
for indrow in csvfile:
if indrow['Indicator_Type'] == row['Indicator_Type']:
#if we have specified specific fields to match this indicator type on, then only check those fields for matches
if 'Field_Matches' in row:
for key in row['Field_Matches'].split('<!FIELD!>'):
logging.debug("checking {0} against specific field {1} value {2}".format(indrow['Indicator'].lower(), str(key), str(row[key]).lower()))
if isinstance(row[key], list):
logging.debug("field is a list")
for val in row[key]:
logging.debug("checking {0} against specific field {1} value {2}".format(indrow['Indicator'].lower(), str(key), str(val).lower()))
if indrow['Indicator'].lower() in str(val).lower():
return indrow['Indicator'], indrow['ObjectID']
elif indrow['Indicator'].lower() in str(row[key]).lower():
return indrow['Indicator'], indrow['ObjectID']
else:
#else, try all fields
for key in row:
logging.debug("checking {0} against {1}".format(indrow['Indicator'].lower(), str(row[key]).lower()))
if isinstance(row[key], list):
for val in row[key]:
logging.debug("checking {0} against {1}".format(indrow['Indicator'].lower(), str(val).lower()))
if indrow['Indicator'].lower() in str(val).lower():
return indrow['Indicator'], indrow['ObjectID']
elif indrow['Indicator'].lower() in str(row[key]).lower():
return indrow['Indicator'], indrow['ObjectID']
logging.debug("indicator lookup returned no results for {0} {1}".format(str(row['Alert_Name']),str(row)))
return None,None
def execute_search(search, group_by):
"""Execute the given search, optionally grouped into alerts by the given field (or None if no grouping is required.)"""
logging.info("running search {0} grouped by {1}".format(search, group_by))
query_object = SplunkQueryObject(
uri=config.get('splunk', 'Splunk_Server'),
username=config.get('splunk', 'User'),
password=config.get('splunk', 'Pass'),
max_result_count=50,
query_timeout='00:59:59')
company = config.get('saq','company')
compid = config.get('saq','id')
search_result = query_object.query(search)
if not search_result:
logging.error("searched failed")
handle_search_failure(search, group_by)
return False
results_list = query_object.json()
if results_list is None:
logging.error("searched failed")
handle_search_failure(search, group_by)
return False
if len(results_list) == 0:
logging.debug("search returned no results")
return True
# now we want to split these alerts up according to how they should be grouped
# for example if the alerts are based on a search of IDS hits then we might want to group them by src_ip
if group_by:
results = {} # key = group_by column value, value = [] of results
for result in results_list:
#match indicator to row, only add those items to results
ind_value, objectid = indicator_lookup(result)
if ind_value and objectid:
result['Indicator'] = ind_value
result['ObjectID'] = objectid
if group_by not in result:
logging.error("missing group_by column {0}".format(group_by))
continue
# handle splunk returning a list for a field value
key = None
if isinstance(result[group_by], list):
key = ' '.join(result[group_by])
else:
key = result[group_by]
if key not in results:
results[key] = []
results[key].append(result)
else:
# if we're not grouping then just move this entire result into a dummy dict
results = { '': results_list } # kind of a hack to allow the same logic below for both conditions
for group_by_item in list(results.keys()):
results_list = results[group_by_item]
if group_by_item != '':
logging.debug("sending {0} alert details for {1}".format(len(results_list), group_by_item))
if len(results_list) > 0:
# decide on a name for this alert
alert_name = results_list[0]['Alert_Name']
# special case for indicators
if 'Indicator' in results_list[0] and results_list[0]['Indicator'] is not None:
alert_name = '{0} - {1}'.format(alert_name, results_list[0]['Indicator'])
elif group_by_item != '' and group_by_item is not None:
alert_name = '{0} - {1}'.format(alert_name, group_by_item)
else:
alert_name = '{0}'.format(alert_name)
alert_contents = {}
alert_contents['details'] = results_list
a = Alert(
tool='splunk',
company_name=company,
company_id=compid,
tool_instance='splunk_detect',
alert_type='splunk',
desc=alert_name,
event_time=time.strftime("%Y-%m-%d %H:%M:%S"),
details=alert_contents)
FIELD_MAPPING = {
F_IPV4: [ 'src_ip', 'Framed_IP_Address', 'Calling_Station_ID', 'ip_address', 'dest_ip', 'remote_host_ip', 'dst_ip', 'Source_Network_Address' ],
F_FQDN: [ 'uri_host' ],
F_HOSTNAME: [ 'dest_nt_host', 'src_nt_host', 'Computer_Name', 'HostName', 'Workstation_Name', 'ComputerName', 'computer_name' ],
F_ASSET: [ ],
F_USER: [ 'user', 'User_Name', 'username','Account_Name','extracted_username' ],
F_URL: [ 'URI','extracted_urls_mv' ],
F_PCAP: [ ],
F_FILE_PATH: [ 'file_path', 'FullPath', 'ProcessPath', 'docs{}.path', 'docs{}.process_name','attachment_names_mv' ],
F_FILE_NAME: [ 'attachment_names_mv' ],
F_EMAIL_ADDRESS: ['rcpto','from','mailfrom','reply-to','sender','extracted_fromaddr','extracted_toaddr','mail_to','mail_from','env_mail_from','env_mail_to' ],
F_YARA: [ ],
F_INDICATOR: [ 'ObjectID' ],
F_SHA256: ['attachment_hashes_mv','FileHash','ProcessHash','hash_value'],
F_MD5: ['MD5_Checksum','md5'],
F_SHA1: ['hash_value','sha1'],
F_MESSAGE_ID: ['message_id','msg_id']
}
TEMPORAL_OBSERVABLES = [ F_IPV4, F_IPV4_CONVERSATION, F_HOSTNAME ]
INDICATOR_OBSERVABLE = [ F_INDICATOR ]
for row in results_list:
# is this observable type a temporal type?
o_time = row['_time'] if '_time' in row else None
if o_time is not None:
m = re.match(r'^([0-9]{4})-([0-9]{2})-([0-9]{2})T([0-9]{2}):([0-9]{2}):([0-9]{2})\.[0-9]{3}[-+][0-9]{2}:[0-9]{2}$', o_time)
if not m:
logging.error("_time field does not match expected format: {0}".format(o_time))
else:
# reformat this time for ACE
o_time = '{0}-{1}-{2} {3}:{4}:{5}'.format(
m.group(1),
m.group(2),
m.group(3),
m.group(4),
m.group(5),
m.group(6))
# special case for F_IPV4_CONVERSATION types because they have multiple fields
# generate all permutations of combinations of IP addresses
# the check for "is not None" is a hack, not sure why it could be None but we'll catch it here
ipv4s = [row[field] for field in FIELD_MAPPING[F_IPV4] if field in row and row[field] is not None]
if len(ipv4s) > 0:
conversations = []
while len(ipv4s) > 0:
ipv4 = ipv4s.pop()
for other_ipv4 in ipv4s:
a.add_observable(F_IPV4_CONVERSATION, create_ipv4_conversation(ipv4, other_ipv4), o_time)
for o_type in FIELD_MAPPING:
for field_name in FIELD_MAPPING[o_type]:
# does this field exist in this row?
if field_name in row:
# is the value of this field a list of things?
if isinstance(row[field_name], list):
for value in row[field_name]:
if value.strip() != '' and value.strip() != '-':
a.add_observable(o_type, value, o_time if o_type in TEMPORAL_OBSERVABLES else None)
# this is what we pretty much expect
elif isinstance(row[field_name], str):
if row[field_name].strip() != '' and row[field_name].strip() != '-':
a.add_observable(o_type, row[field_name], o_time if o_type in TEMPORAL_OBSERVABLES else None)
elif row[field_name] is None:
if o_type in INDICATOR_OBSERVABLE:
#for the instance where a substring is matched in the log, and the way splunk lookup tables work, it is impossible to lookup the indicator value that matched based on the full string in the log, so re-search the lookup table file and return the match here
ind_value, objectid = indicator_lookup(row)
if ind_value and objectid:
row['Indicator'] = ind_value
row['ObjectID'] = objectid
a.description='{0} - {1}'.format(alert_name, ind_value)
a.add_observable(o_type, objectid, o_time if o_type in TEMPORAL_OBSERVABLES else None)
else:
logging.debug("skipping None value for field {0}".format(field_name))
else:
logging.debug("skipping None value for field {0}".format(field_name))
else:
print(a)
print("field_name:"+field_name)
print("row[fn]:"+row[field_name])
print("type:"+str(type(row[field_name])))
logging.error("unexpected data type for field {0}: {1}: {2}: alert:{3}".format(field_name, type(row[field_name]),row[field_name],a))
try:
#logging.info("submitting alert {0} to {1}".format(a, config.get('saq', 'SAQ_Server')))
logging.info("submitting alert {0} to {1}".format(a, config['saq']['SAQ_Server']))
#a.submit(config.get('saq', 'SAQ_Server'), 'blah')
a.submit(config['saq']['SAQ_Server'], 'blah')
except Exception as e:
logging.error("unable to submit alert: {0}".format(str(e)))
traceback.print_exc()
threads = []
for (search, group_by) in search_queue:
t = threading.Thread(target=execute_search_wrapper, args=(search, group_by))
t.start()
threads.append(t)
for thread in threads:
thread.join()
|
animate.py
|
import shutil
import sys
from contextlib import contextmanager
from threading import Event, Thread
from typing import Generator, List
from pipx.constants import WINDOWS
from pipx.emojis import EMOJI_SUPPORT
stderr_is_tty = sys.stderr.isatty()
CLEAR_LINE = "\033[K"
EMOJI_ANIMATION_FRAMES = ["⣷", "⣯", "⣟", "⡿", "⢿", "⣻", "⣽", "⣾"]
NONEMOJI_ANIMATION_FRAMES = ["", ".", "..", "..."]
EMOJI_FRAME_PERIOD = 0.1
NONEMOJI_FRAME_PERIOD = 1
MINIMUM_COLS_ALLOW_ANIMATION = 16
if WINDOWS:
import ctypes
class _CursorInfo(ctypes.Structure):
_fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _env_supports_animation() -> bool:
(term_cols, _) = shutil.get_terminal_size(fallback=(0, 0))
return stderr_is_tty and term_cols > MINIMUM_COLS_ALLOW_ANIMATION
@contextmanager
def animate(
message: str, do_animation: bool, *, delay: float = 0
) -> Generator[None, None, None]:
if not do_animation or not _env_supports_animation():
# No animation, just a single print of message
print(f"{message}...")
yield
return
event = Event()
if EMOJI_SUPPORT:
animate_at_beginning_of_line = True
symbols = EMOJI_ANIMATION_FRAMES
period = EMOJI_FRAME_PERIOD
else:
animate_at_beginning_of_line = False
symbols = NONEMOJI_ANIMATION_FRAMES
period = NONEMOJI_FRAME_PERIOD
thread_kwargs = {
"message": message,
"event": event,
"symbols": symbols,
"delay": delay,
"period": period,
"animate_at_beginning_of_line": animate_at_beginning_of_line,
}
t = Thread(target=print_animation, kwargs=thread_kwargs)
t.start()
try:
yield
finally:
event.set()
clear_line()
sys.stderr.write("\r")
sys.stdout.write("\r")
def print_animation(
*,
message: str,
event: Event,
symbols: List[str],
delay: float,
period: float,
animate_at_beginning_of_line: bool,
) -> None:
(term_cols, _) = shutil.get_terminal_size(fallback=(9999, 24))
event.wait(delay)
while not event.wait(0):
for s in symbols:
if animate_at_beginning_of_line:
max_message_len = term_cols - len(f"{s} ... ")
cur_line = f"{s} {message:.{max_message_len}}"
if len(message) > max_message_len:
cur_line += "..."
else:
max_message_len = term_cols - len("... ")
cur_line = f"{message:.{max_message_len}}{s}"
clear_line()
sys.stderr.write("\r")
sys.stderr.write(cur_line)
if event.wait(period):
break
# for Windows pre-ANSI-terminal-support (before Windows 10 TH2 (v1511))
# https://stackoverflow.com/a/10455937
def win_cursor(visible: bool) -> None:
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle(-11) # type: ignore[attr-defined]
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) # type: ignore[attr-defined]
ci.visible = visible
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) # type: ignore[attr-defined]
def hide_cursor() -> None:
if stderr_is_tty:
if WINDOWS:
win_cursor(visible=False)
else:
sys.stderr.write("\033[?25l")
sys.stderr.flush()
def show_cursor() -> None:
if stderr_is_tty:
if WINDOWS:
win_cursor(visible=True)
else:
sys.stderr.write("\033[?25h")
sys.stderr.flush()
def clear_line() -> None:
sys.stderr.write(f"{CLEAR_LINE}")
sys.stdout.write(f"{CLEAR_LINE}")
|
logs.py
|
from functools import wraps
import json
try: # Python 3 imports
from urllib.parse import urljoin
except ImportError: # Python 2 imports
from urlparse import urljoin
from collections import defaultdict
from threading import Thread
from twisted.internet import reactor, ssl
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Protocol
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.internet.ssl import ClientContextFactory
from .base_request import BaseRequest
from . import exceptions
from .models.config import Config
from .models.device import Device
from .settings import Settings
class WebClientContextFactory(ClientContextFactory):
"""
This is low level class and is not meant to be used by end users directly.
"""
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
class StreamingParser(Protocol):
"""
This is low level class and is not meant to be used by end users directly.
"""
def __init__(self, callback, error):
self.callback = callback
self.error = error
self.pending = b''
def dataReceived(self, data):
obj = {}
self.pending += data
lines = self.pending.split(b'\n')
self.pending = lines.pop()
for line in lines:
try:
if line:
obj = json.loads(line)
except Exception as e:
self.transport.stopProducing()
self.transport.loseConnection()
if self.error:
self.error(e)
break
self.callback(obj)
def connectionLost(self, reason):
pass
def cbRequest(response, callback, error):
protocol = StreamingParser(callback, error)
response.deliverBody(protocol)
return protocol
def cbDrop(protocol):
protocol.transport.stopProducing()
protocol.transport.loseConnection()
class Subscription:
"""
This is low level class and is not meant to be used by end users directly.
"""
def __init__(self):
self.context_factory = WebClientContextFactory()
self.settings = Settings()
def add(self, uuid, callback, error=None, count=None):
query = 'stream=1'
if count:
query = 'stream=1&count={}'.format(count)
url = urljoin(
self.settings.get('api_endpoint'),
'/device/v2/{uuid}/logs?{query}'.format(uuid=uuid, query=query)
)
headers = {}
headers[b'Authorization'] = ['Bearer {:s}'.format(self.settings.get('token')).encode()]
agent = Agent(reactor, self.context_factory)
d = agent.request(b'GET', url.encode(), Headers(headers), None)
d.addCallback(cbRequest, callback, error)
self.run()
return d
def run(self):
if not reactor.running:
Thread(target=reactor.run, args=(False,)).start()
def stop(self, d):
reactor.callFromThread(d.addCallback, cbDrop)
class Logs:
"""
This class implements functions that allow processing logs from device.
"""
subscriptions = defaultdict(list)
def __init__(self):
self.base_request = BaseRequest()
self.config = Config()
self.device = Device()
self.settings = Settings()
self.subscription_handler = Subscription()
def __exit__(self, exc_type, exc_value, traceback):
reactor.stop()
def subscribe(self, uuid, callback, error=None, count=None):
"""
Subscribe to device logs.
Args:
uuid (str): device uuid.
callback (function): this callback is called on receiving a message.
error (Optional[function]): this callback is called on an error event.
count (Optional[int]): number of historical messages to include.
Returns:
dict: a log entry will contain the following keys: `isStdErr, timestamp, message, isSystem, createdAt`.
"""
self.device.get(uuid)
self.subscriptions[uuid].append(self.subscription_handler.add(uuid, callback, error, count))
def history(self, uuid, count=None):
"""
Get device logs history.
Args:
uuid (str): device uuid.
count (Optional[int]): number of historical messages to include.
"""
raw_query = ''
if count:
raw_query = 'count={}'.format(count)
return self.base_request.request(
'/device/v2/{uuid}/logs'.format(uuid=uuid), 'GET', raw_query=raw_query,
endpoint=self.settings.get('api_endpoint')
)
def unsubscribe(self, uuid):
"""
Unsubscribe from device logs for a specific device.
Args:
uuid (str): device uuid.
"""
if uuid in self.subscriptions:
for d in self.subscriptions[uuid]:
self.subscription_handler.stop(d)
del self.subscriptions[uuid]
def unsubscribe_all(self):
"""
Unsubscribe all subscribed devices.
"""
for device in self.subscriptions:
for d in self.subscriptions[device]:
self.subscription_handler.stop(d)
self.subscriptions = {}
|
server.py
|
#!/usr/bin/env/python
# File name : server.py
# Production : RaspTank
# Website : www.adeept.com
# E-mail : support@adeept.com
# Author : William
# Date : 2018/08/22
import socket
import time
import threading
import move
import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(50)
pwm.set_all_pwm(0,300)
from rpi_ws281x import *
import argparse
import os
import ultra
import FPV
import psutil
import servo
import LED
import findline
step_set = 1
speed_set = 100
rad = 0.6
new_frame = 0
direction_command = 'no'
turn_command = 'no'
#pwm = Adafruit_PCA9685.PCA9685()
#pwm.set_pwm_freq(50)
pos_input = 1
catch_input = 1
cir_input = 6
ultrasonicMode = 0
FindLineMode = 0
FindColorMode = 0
def app_ctrl():
app_HOST = ''
app_PORT = 10123
app_BUFSIZ = 1024
app_ADDR = (app_HOST, app_PORT)
AppSerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
AppSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
AppSerSock.bind(app_ADDR)
def setup():
move.setup()
def appCommand(data_input):
global direction_command, turn_command, pos_input, catch_input, cir_input
if data_input == 'forwardStart\n':
direction_command = 'forward'
move.move(speed_set, direction_command, turn_command, rad)
elif data_input == 'backwardStart\n':
direction_command = 'backward'
move.move(speed_set, direction_command, turn_command, rad)
elif data_input == 'leftStart\n':
turn_command = 'left'
move.move(speed_set, direction_command, turn_command, rad)
elif data_input == 'rightStart\n':
turn_command = 'right'
move.move(speed_set, direction_command, turn_command, rad)
elif 'forwardStop' in data_input:
direction_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'backwardStop' in data_input:
direction_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'leftStop' in data_input:
turn_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'rightStop' in data_input:
turn_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
if data_input == 'lookLeftStart\n':
if cir_input < 12:
cir_input+=1
servo.cir_pos(cir_input)
elif data_input == 'lookRightStart\n':
if cir_input > 1:
cir_input-=1
servo.cir_pos(cir_input)
elif data_input == 'downStart\n':
servo.camera_ang('lookdown',10)
elif data_input == 'upStart\n':
servo.camera_ang('lookup',10)
elif 'lookLeftStop' in data_input:
pass
elif 'lookRightStop' in data_input:
pass
elif 'downStop' in data_input:
pass
elif 'upStop' in data_input:
pass
if data_input == 'aStart\n':
if pos_input < 17:
pos_input+=1
servo.hand_pos(pos_input)
elif data_input == 'bStart\n':
if pos_input > 1:
pos_input-=1
servo.hand_pos(pos_input)
elif data_input == 'cStart\n':
if catch_input < 13:
catch_input+=3
servo.catch(catch_input)
elif data_input == 'dStart\n':
if catch_input > 1:
catch_input-=3
servo.catch(catch_input)
elif 'aStop' in data_input:
pass
elif 'bStop' in data_input:
pass
elif 'cStop' in data_input:
pass
elif 'dStop' in data_input:
pass
print(data_input)
def appconnect():
global AppCliSock, AppAddr
AppSerSock.listen(5)
print('waiting for App connection...')
AppCliSock, AppAddr = AppSerSock.accept()
print('...App connected from :', AppAddr)
appconnect()
setup()
app_threading=threading.Thread(target=appconnect) #Define a thread for FPV and OpenCV
app_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
app_threading.start() #Thread starts
while 1:
data = ''
data = str(AppCliSock.recv(app_BUFSIZ).decode())
if not data:
continue
appCommand(data)
pass
AppConntect_threading=threading.Thread(target=app_ctrl) #Define a thread for FPV and OpenCV
AppConntect_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
AppConntect_threading.start() #Thread starts
def findline_thread(): #Line tracking mode
while 1:
while FindLineMode:
findline.run()
time.sleep(0.2)
def get_cpu_tempfunc():
""" Return CPU temperature """
result = 0
mypath = "/sys/class/thermal/thermal_zone0/temp"
with open(mypath, 'r') as mytmpfile:
for line in mytmpfile:
result = line
result = float(result)/1000
result = round(result, 1)
return str(result)
def get_gpu_tempfunc():
""" Return GPU temperature as a character string"""
res = os.popen('/opt/vc/bin/vcgencmd measure_temp').readline()
return res.replace("temp=", "")
def get_cpu_use():
""" Return CPU usage using psutil"""
cpu_cent = psutil.cpu_percent()
return str(cpu_cent)
def get_ram_info():
""" Return RAM usage using psutil """
ram_cent = psutil.virtual_memory()[2]
return str(ram_cent)
def get_swap_info():
""" Return swap memory usage using psutil """
swap_cent = psutil.swap_memory()[3]
return str(swap_cent)
def info_get():
global cpu_t,cpu_u,gpu_t,ram_info
while 1:
cpu_t = get_cpu_tempfunc()
cpu_u = get_cpu_use()
ram_info = get_ram_info()
time.sleep(3)
def info_send_client():
SERVER_IP = addr[0]
SERVER_PORT = 2256 #Define port serial
SERVER_ADDR = (SERVER_IP, SERVER_PORT)
Info_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Set connection value for socket
Info_Socket.connect(SERVER_ADDR)
print(SERVER_ADDR)
while 1:
try:
Info_Socket.send((get_cpu_tempfunc()+' '+get_cpu_use()+' '+get_ram_info()).encode())
time.sleep(1)
except:
pass
def ultra_send_client():
ultra_IP = addr[0]
ultra_PORT = 2257 #Define port serial
ultra_ADDR = (ultra_IP, ultra_PORT)
ultra_Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Set connection value for socket
ultra_Socket.connect(ultra_ADDR)
print(ultra_ADDR)
while 1:
while ultrasonicMode:
try:
if not FindColorMode:
ultra_Socket.send(str(round(ultra.checkdist(),2)).encode())
time.sleep(0.5)
continue
fpv.UltraData(round(ultra.checkdist(),2))
time.sleep(0.2)
except:
pass
time.sleep(0.5)
def FPV_thread():
fpv=FPV.FPV()
fpv.capture_thread(addr[0])
def ap_thread():
os.system("sudo create_ap wlan0 eth0 AdeeptCar 12345678")
def run():
global direction_command, turn_command, pos_input, catch_input, cir_input, ultrasonicMode, FindLineMode, FindColorMode
move.setup()
findline.setup()
info_threading=threading.Thread(target=info_send_client) #Define a thread for FPV and OpenCV
info_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
info_threading.start() #Thread starts
ultra_threading=threading.Thread(target=ultra_send_client) #Define a thread for FPV and OpenCV
ultra_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ultra_threading.start() #Thread starts
findline_threading=threading.Thread(target=findline_thread) #Define a thread for FPV and OpenCV
findline_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
findline_threading.start() #Thread starts
#move.stand()
ws_R = 0
ws_G = 0
ws_B = 0
Y_pitch = 0
Y_pitch_MAX = 200
Y_pitch_MIN = -200
while True:
data = ''
data = str(tcpCliSock.recv(BUFSIZ).decode())
if not data:
continue
elif 'forward' == data:
direction_command = 'forward'
move.move(speed_set, direction_command, turn_command, rad)
elif 'backward' == data:
direction_command = 'backward'
move.move(speed_set, direction_command, turn_command, rad)
elif 'DS' in data:
direction_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'left' == data:
turn_command = 'left'
move.move(speed_set, direction_command, turn_command, rad)
elif 'right' == data:
turn_command = 'right'
move.move(speed_set, direction_command, turn_command, rad)
elif 'TS' in data:
turn_command = 'no'
move.move(speed_set, direction_command, turn_command, rad)
elif 'out' == data:
if pos_input < 17:
pos_input+=1
servo.hand_pos(pos_input)
elif 'in' == data:
if pos_input > 1:
pos_input-=1
servo.hand_pos(pos_input)
elif 'headup' == data:
servo.camera_ang('lookup',0)
elif 'headdown' == data:
servo.camera_ang('lookdown',0)
elif 'headhome' == data:
servo.initPosAll()
elif 'c_left' == data:
if cir_input < 12:
cir_input+=1
servo.cir_pos(cir_input)
elif 'c_right' == data:
if cir_input > 1:
cir_input-=1
servo.cir_pos(cir_input)
elif 'catch' == data:
if catch_input < 13:
catch_input+=1
servo.catch(catch_input)
elif 'loose' == data:
if catch_input > 1:
catch_input-=1
servo.catch(catch_input)
elif 'wsR' in data:
try:
set_R=data.split()
ws_R = int(set_R[1])
LED.colorWipe(Color(ws_R,ws_G,ws_B))
except:
pass
elif 'wsG' in data:
try:
set_G=data.split()
ws_G = int(set_G[1])
LED.colorWipe(Color(ws_R,ws_G,ws_B))
except:
pass
elif 'wsB' in data:
try:
set_B=data.split()
ws_B = int(set_B[1])
LED.colorWipe(Color(ws_R,ws_G,ws_B))
except:
pass
elif 'FindColor' in data:
fpv.FindColor(1)
FindColorMode = 1
ultrasonicMode = 1
tcpCliSock.send(('FindColor').encode())
elif 'WatchDog' in data:
fpv.WatchDog(1)
tcpCliSock.send(('WatchDog').encode())
elif 'steady' in data:
ultrasonicMode = 1
tcpCliSock.send(('steady').encode())
elif 'FindLine' in data:
FindLineMode = 1
tcpCliSock.send(('FindLine').encode())
elif 'funEnd' in data:
fpv.FindColor(0)
fpv.WatchDog(0)
ultrasonicMode = 0
FindLineMode = 0
FindColorMode = 0
tcpCliSock.send(('FunEnd').encode())
move.motorStop()
time.sleep(0.3)
move.motorStop()
else:
pass
#print(data)
if __name__ == '__main__':
HOST = ''
PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (HOST, PORT)
pwm.set_all_pwm(0,300)
try:
LED = LED.LED()
LED.colorWipe(255,16,0)
except:
print('Use "sudo pip3 install rpi_ws281x" to install WS_281x package')
pass
while 1:
try:
s =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("1.1.1.1",80))
ipaddr_check=s.getsockname()[0]
s.close()
print(ipaddr_check)
except:
ap_threading=threading.Thread(target=ap_thread) #Define a thread for data receiving
ap_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ap_threading.start() #Thread starts
LED.colorWipe(0,16,50)
time.sleep(1)
LED.colorWipe(0,16,100)
time.sleep(1)
LED.colorWipe(0,16,150)
time.sleep(1)
LED.colorWipe(0,16,200)
time.sleep(1)
LED.colorWipe(0,16,255)
time.sleep(1)
LED.colorWipe(35,255,35)
try:
tcpSerSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5) #Start server,waiting for client
print('waiting for connection...')
tcpCliSock, addr = tcpSerSock.accept()
print('...connected from :', addr)
fpv=FPV.FPV()
fps_threading=threading.Thread(target=FPV_thread) #Define a thread for FPV and OpenCV
fps_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
fps_threading.start() #Thread starts
break
except:
LED.colorWipe(0,0,0)
try:
LED.colorWipe(0,80,255)
except:
pass
run()
try:
pwm.set_all_pwm(0,0)
run()
except:
LED.colorWipe(0,0,0)
servo.clean_all()
move.destroy()
|
main.py
|
from pandac.PandaModules import loadPrcFileData
loadPrcFileData('', 'win-size 640 480') # Window size
loadPrcFileData('', 'win-fixed-size #t') # Window is a fixed size
loadPrcFileData('', 'textures-auto-power-2 1')
loadPrcFileData('', 'textures-power-2 up')
loadPrcFileData('', 'load-file-type p3assimp')
from direct.showbase.ShowBase import ShowBase
from panda3d.core import CollisionTraverser, CollisionNode
from panda3d.core import CollisionHandlerQueue, CollisionRay, TransparencyAttrib
from panda3d.core import AmbientLight, DirectionalLight, Vec4, Vec3, Point2
from panda3d.core import CardMaker, Texture, PTAUchar, CPTAUchar, BitMask32
from panda3d.vision import ARToolKit
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
from pieces import create_piece
from config import *
import Ray
import multiprocessing
import numpy as np
import sys
import cv2
import time
class ChessboardDemo(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.disableMouse()
# Setting up webcam image
self.webcam = Webcam()
self.ar2 = ARToolKit.make(self.cam, "data/camera_para.dat", 1)
self.cam.node().getDisplayRegion(0).setSort(20)
# Creating the anchor to the marker
self.anchor = self.render.attachNewNode("Anchor node")
self.anchor.reparent_to(render)
self.ar2.attachPattern("data/marker.patt", self.anchor)
# Setting up lighting
alight = AmbientLight('ambientLight')
alight.setColor(Vec4(0.4, 0.4, 0.4, 1))
alightNP = render.attachNewNode(alight)
dlight = DirectionalLight('directionalLight')
dlight.setDirection(Vec3(-1, 1, -1))
alight.setColor(Vec4(0.4, 0.4, 0.4, 1))
dlightNP = render.attachNewNode(dlight)
render.setLightOff()
render.setLight(alightNP)
render.setLight(dlightNP)
# Setting up players
self.humans = HUMANS
self.ais = AIS
self.ai_queue = multiprocessing.Queue()
# 1 = white, -1 = black
self.turn = 1
# 0 = no check, 1 = white, -1 = black
self.check = 0
self.can_move = True
self.gameover = False
self.texture_black = self.loader.loadTexture(TEXTURE_BLACK)
self.texture_white = self.loader.loadTexture(TEXTURE_WHITE)
self.setup_collision()
self.create_board()
self.moves = self.get_valid_moves()
# Currently highlighted square (list [x,y,z])
self.hiSq = False
# Piece we are currently selecting
self.dragging = False
# Events
taskMgr.add(self.update_webcam, 'cam')
taskMgr.add(self.ai_move, 'ai')
taskMgr.add(self.mouseover, 'mouseover')
self.accept("mouse1", self.left_click)
self.accept("mouse3", self.right_click)
self.accept('escape', sys.exit) # Escape closes the window
def create_board(self):
# C++ object containing the actual chessboard
self.board = Ray.Chess_AI(np.ascontiguousarray(np.array(BOARD)), self.turn, PAWN_2STEP)
# Array containing the piece objects we are going to draw
self.board_array = np.transpose(np.array(BOARD))
self.draw_pieces = np.full_like(self.board_array, None, dtype=np.object)
self.draw_squares = np.zeros_like(self.board_array, dtype=np.object)
max_x, max_y, max_z = BOARD_SIZE
# Creates the 3D objects (from the file pieces.py) and the squares
for z in range(max_z):
for y in range(max_y):
for x in range(max_x):
if self.board_array[x,y,z] != 0:
self.draw_pieces[x,y,z] = create_piece(self.board_array[x,y,z], [x,y,z], self)
# Load, parent, color, and position the model (a single square polygon)
self.draw_squares[x,y,z] = loader.loadModel("models/square")
self.draw_squares[x,y,z].reparentTo(self.anchor)
self.draw_squares[x,y,z].setScale(SCALE)
self.draw_squares[x,y,z].setPos(square_position(x,y,z, BOARD_SIZE))
self.draw_squares[x,y,z].setColor(square_color(x,y,z))
# The bottom one is solid, the rest are a little translucid
if z > 0:
self.draw_squares[x,y,z].setTransparency(TransparencyAttrib.MAlpha)
self.draw_squares[x,y,z].setAlphaScale(0.75)
# Set the model itself to be collideable with the ray.
self.draw_squares[x,y,z].find("**/polygon").node().setIntoCollideMask(BitMask32.bit(1))
# Set a tag on the square's node so we can look up what square this
self.draw_squares[x,y,z].find("**/polygon").node().setTag('square', ','.join([str(dim) for dim in (x,y,z)]))
def setup_collision(self):
self.picker = CollisionTraverser() # Make a traverser
self.pq = CollisionHandlerQueue() # Make a handler
# Make a collision node for our picker ray
self.pickerNode = CollisionNode('mouseRay')
# Attach that node to the camera since the ray will need to be positioned
# relative to it
self.pickerNP = camera.attachNewNode(self.pickerNode)
# Everything to be picked will use bit 1. This way if we were doing other
# collision we could separate it
self.pickerNode.setFromCollideMask(BitMask32.bit(1))
self.pickerRay = CollisionRay()
# Add it to the collision node
self.pickerNode.addSolid(self.pickerRay)
# Register the ray as something that can cause collisions
self.picker.addCollider(self.pickerNP, self.pq)
def ai_move(self, task):
if self.turn in self.ais and not self.gameover:
if self.can_move is True:
# Start the thinking process
self.can_move = False
recursions = 1
if len(self.moves) < 30:
recursions = 2
if len(self.moves) < 12:
recursions = 3
if TEST is True:
print(f'doing {recursions} recursions')
self.start = time.time()
make_ai_think = multiprocessing.Process(target=ai, args=(self.board, self.ai_queue, recursions))
make_ai_think.start()
else:
# The AI function will put the move in this queue when it figures it out
if not self.ai_queue.empty():
if TEST is True:
print(f'Took {time.time()-self.start}.')
piece, move = self.ai_queue.get()
self.can_move = False
self.move_pieces(piece, move)
self.turn = 1 if self.turn == -1 else -1
new_array = np.ascontiguousarray(np.transpose(self.board_array))
self.board.set_board(new_array, self.turn)
# This hides the check on the player's king if there was one
self.hide_possible_moves()
self.moves = self.get_valid_moves()
self.can_move = True
if TEST is True:
print('AI moved')
return Task.cont
def update_webcam(self, task):
self.can_get_image = False
self.webcam_texture = self.webcam.step()
self.ar2.analyze(self.webcam_texture)
self.can_get_image = True
return Task.cont
def move_pieces(self, a, b, move_model=True):
# Move the 3D model of the piece and update its square variable
# Also delete the other one...
if move_model is True:
# If there is a piece on the new location
if self.draw_pieces[b[0], b[1], b[2]] is not None:
# We delete it
self.draw_pieces[b[0], b[1], b[2]].obj.removeNode()
# We move the piece to its new location
self.draw_pieces[b[0], b[1], b[2]] = self.draw_pieces[a[0], a[1], a[2]]
self.draw_pieces[b[0], b[1], b[2]].move([b[0], b[1], b[2]])
# Remove the piece from the old location
self.draw_pieces[a[0], a[1], a[2]] = None
# Move one to other's position
self.board_array[b[0], b[1], b[2]] = self.board_array[a[0], a[1], a[2]]
# Replace one's position with empty
self.board_array[a[0], a[1], a[2]] = 0
def mouseover(self, task):
# If we have a mouse
if self.mouseWatcherNode.hasMouse():
mpos = self.mouseWatcherNode.getMouse()
# Set the position of the ray based on the mouse position
self.pickerRay.setFromLens(self.camNode, mpos.getX(), mpos.getY())
# Do the actual collision pass
self.picker.traverse(self.anchor)
if self.pq.getNumEntries() <= 0:
if self.hiSq is not False:
self.square_default_color(self.hiSq)
self.hiSq = False
else:
# Sort the hits so the closest is first, and highlight that node
self.pq.sortEntries()
dims = self.pq.getEntry(0).getIntoNode().getTag('square').split(',')
x,y,z = [int(dim) for dim in dims]
# Remove highlight from previous square
if self.hiSq is not False and self.hiSq != [x,y,z]:
# Turn square back to its normal, non highlighted color
self.square_default_color(self.hiSq)
# Set the highlight on the current square
self.draw_squares[x,y,z].setColor(HIGHLIGHT)
self.hiSq = [x,y,z]
return Task.cont
def right_click(self):
# Drop the piece
if self.dragging is not False:
# Hide the green/red squares showing where we can move
self.hide_possible_moves()
tmp = self.dragging
self.dragging = False
self.square_default_color(tmp)
def left_click(self):
if self.gameover is False and self.turn in self.humans:
# MOVING SELECTED PIECE
if self.dragging is not False:
# If we have a piece selected and we are hovering over a square
if self.hiSq is not False:
# If the square we are clicking is a possible move, we move
if (self.dragging, self.hiSq) in self.moves:
self.can_move = False
self.move_pieces(self.dragging, self.hiSq)
self.turn = 1 if self.turn == -1 else -1
# Moving the object
new_array = np.ascontiguousarray(np.transpose(self.board_array))
self.board.set_board(new_array, self.turn)
self.hide_possible_moves()
self.moves = self.get_valid_moves()
self.can_move = True
# Hide the green/red squares showing where we can move
self.hide_possible_moves()
# Drop the piece
tmp = self.dragging
self.dragging = False
self.square_default_color(tmp)
# SELECTING PIECE
if self.hiSq is not False:
# If we pick the piece of the side whose turn it is
if self.turn * self.board_array[self.hiSq[0],self.hiSq[1],self.hiSq[2]] > 0:
# Hide the old green/red squares showing where we could move
self.hide_possible_moves()
# Select it
self.dragging = self.hiSq
self.show_possible_moves()
def get_valid_moves(self):
moves = self.board.get_moves()
check_found = self.board.is_in_check()
if check_found is True:
self.check = self.turn
kings = np.argwhere(self.board_array == 6*self.turn)
for king in kings:
self.draw_squares[king[0], king[1], king[2]].setColor(HIGHLIGHT_ATTACK)
if not moves:
print('CHECKMATE')
self.gameover = True
for king in kings:
self.draw_pieces[king[0], king[1], king[2]].obj.setColor(HIGHLIGHT_ATTACK)
else:
print('CHECK')
else:
self.check = 0
if not moves:
self.gameover = True
print('DRAW')
return moves
def square_default_color(self, pos):
'Colors a specific square'
# If we have a piece selected
if self.dragging is not False:
# If it's a move by a selected piece, it's green or red
if (self.dragging, pos) in self.moves:
if self.board_array[pos[0], pos[1], pos[2]] == 0:
self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT_MOVE)
else:
self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT_ATTACK)
# If it's a selected piece, it's blue
elif self.dragging == pos:
self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT)
# If it isn't then it's just black or white
else:
self.draw_squares[pos[0], pos[1], pos[2]].setColor(square_color(*pos))
# If we don't have a piece selected, it's just black or white
else:
self.draw_squares[pos[0], pos[1], pos[2]].setColor(square_color(*pos))
# Mark king in red if in check
if self.check:
if self.board_array[pos[0], pos[1], pos[2]]*self.turn == 6 and self.dragging != pos:
self.draw_squares[pos[0], pos[1], pos[2]].setColor(HIGHLIGHT_ATTACK)
def show_possible_moves(self):
# Changes the color of the squares the selected piece can move to
for piece, move in self.moves:
if piece == [self.dragging[0],self.dragging[1],self.dragging[2]]:
if self.board_array[move[0],move[1],move[2]]*self.turn < 0:
self.draw_squares[move[0],move[1],move[2]].setColor(HIGHLIGHT_ATTACK)
else:
self.draw_squares[move[0],move[1],move[2]].setColor(HIGHLIGHT_MOVE)
def hide_possible_moves(self):
# When we unselect a piece, we remove the coloring from the squares we can move to
for piece, move in self.moves:
self.draw_squares[move[0],move[1],move[2]].setColor(square_color(*move))
class Webcam(DirectObject):
def __init__(self):
'This object deals with obtaining the image from the webcam and processing it'
base.setBackgroundColor(0.5,0.5,0.5)
self.cap = cv2.VideoCapture(0)
[self.cap.read() for i in range(10)]
# Show the image on a card
sm = CardMaker('bg')
sm.setUvRange(Point2(0, 0), Point2(1, 1))
sm.setFrame(-1, 1, -1, 1)
self.test = render2d.attachNewNode(sm.generate(),2)
webcam_img, webcam_texture = self.get_cv_img()
self.test.setTexture(webcam_texture)
def step(self):
# Update texture
webcam_img, webcam_texture = self.get_cv_img()
self.test.setTexture(webcam_texture)
return webcam_texture
def get_cv_img(self):
success, webcam_img = self.cap.read()
if success:
shape = webcam_img.shape
flipped_img = cv2.flip(webcam_img, 0)
webcam_texture = Texture("detect")
webcam_texture.setCompression(Texture.CMOff)
webcam_texture.setup2dTexture(shape[1], shape[0], Texture.TUnsignedByte, Texture.FRgb)
p = PTAUchar.emptyArray(0)
p.setData(flipped_img)
webcam_texture.setRamImage(CPTAUchar(p))
return webcam_img, webcam_texture
def ai(board, queue, recursions):
# Get the move from the c++ code
# In a separate function because it gets multiprocessed by the main function
piece, move = board.best_move(recursions)
queue.put((piece, move))
if __name__ == '__main__':
demo = ChessboardDemo()
demo.run()
|
api_unit_test.py
|
import math
import os
import sys
import threading
import time
import unittest
filename = os.path.dirname(__file__)
gdsName = os.path.join(filename, "../../../../src")
fprimeName = os.path.join(filename, "../../../../../Fw/Python/src")
sys.path.insert(0, gdsName)
sys.path.insert(0, fprimeName)
from fprime_gds.common.testing_fw import predicates
from fprime_gds.common.history.test import TestHistory
from fprime_gds.common.testing_fw.api import IntegrationTestAPI
from fprime_gds.common.pipeline.standard import StandardPipeline
from fprime_gds.common.utils.config_manager import ConfigManager
# these imports are needed to generate data objects.
from fprime.common.models.serialize.i32_type import I32Type
from fprime.common.models.serialize.u32_type import U32Type
from fprime.common.models.serialize.time_type import TimeType
from fprime_gds.common.data_types.ch_data import ChData
from fprime_gds.common.data_types.cmd_data import CmdData
from fprime_gds.common.data_types.event_data import EventData
class UTPipeline(StandardPipeline):
"""
This pipeline shares many of the same calls available in pipeline.standard. It
is used by this testcase to feed simulated data to the test api.
"""
def __init__(self):
self.command_count = 0
self.t0 = TimeType()
StandardPipeline.__init__(self)
def connect(self, address, port):
pass
def disconnect(self):
pass
def send_command(self, command, args):
command_template = self.dictionaries.command_id[command]
cmd_data = CmdData(tuple(args), command_template)
self.histories.commands.data_callback(cmd_data)
for hist in self.coders.command_subscribers:
hist.data_callback(cmd_data)
ev_temp = self.dictionaries.event_name["CommandReceived"]
event = EventData((U32Type(cmd_data.get_id()),), self.t0 + time.time(), ev_temp)
self.enqueue_event(event)
ev_temp = self.dictionaries.event_name["HistorySizeUpdate"]
evr_size = U32Type(len(self.histories.events.retrieve()))
cmd_size = U32Type(len(self.histories.commands.retrieve()))
ch_size = U32Type(len(self.histories.channels.retrieve()))
event = EventData((evr_size, cmd_size, ch_size), self.t0 + time.time(), ev_temp)
self.enqueue_event(event)
self.command_count += 1
ch_temp = self.dictionaries.channel_name["CommandCounter"]
update = ChData(U32Type(self.command_count), self.t0 + time.time(), ch_temp)
self.enqueue_telemetry(update)
def enqueue_event(self, event):
"""
Used by the unit test to feed simulated data objects into the pipeline
"""
self.coders.event_decoder.send_to_all(event)
def enqueue_telemetry(self, channel):
"""
Used by the unit test to feed simulated data objects into the pipeline
"""
self.coders.channel_decoder.send_to_all(channel)
class APITestCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pipeline = UTPipeline()
config = ConfigManager()
path = os.path.join(filename, "./UnitTestDictionary.xml")
down_store = os.path.join(filename, "./")
cls.pipeline.setup(config, path, down_store)
log_path = os.path.join(filename, "./logs")
cls.api = IntegrationTestAPI(cls.pipeline, log_path)
cls.case_list = [] # TODO find a better way to do this.
cls.threads = []
def setUp(self):
for t in self.threads:
if t.isAlive():
t.join()
self.threads.clear()
count = len(self.case_list)
self.api.start_test_case(self._testMethodName, count)
self.case_list.append(1)
self.tHistory = TestHistory()
self.t0 = TimeType()
@classmethod
def tearDownClass(cls):
cls.pipeline.disconnect()
cls.api.teardown()
######################################################################################
# Test Case Helper Methods
######################################################################################
def fill_history(self, callback, items, timestep=0):
for item in items:
if timestep:
time.sleep(timestep)
if isinstance(item, ChData) or isinstance(item, EventData):
if item.time == 0:
item.time = self.t0 + time.time()
callback(item)
def fill_history_async(self, callback, items, timestep=1):
t = threading.Thread(target=self.fill_history, args=(callback, items, timestep))
self.threads.append(t)
t.start()
return t
def assert_lists_equal(self, expected, actual):
assert len(expected) == len(
actual
), "the given list should have had the length {}, but instead had {}\nExpected {}\nActual{}".format(
len(expected), len(actual), expected, actual
)
for i in range(len(expected)):
assert (
expected[i] == actual[i]
), "the {} element of the expected list should be {}, but was {}.".format(
i, expected[i], actual[i]
)
def get_counter_sequence(self, length):
seq = []
for i in range(0, length):
ch_temp = self.pipeline.dictionaries.channel_name["Counter"]
seq.append(ChData(U32Type(i), TimeType(), ch_temp))
return seq
def get_oscillator_sequence(self, length):
seq = []
for i in range(0, length):
ch_temp = self.pipeline.dictionaries.channel_name["Oscillator"]
val = int(round(10 * math.sin(math.radians(i))))
seq.append(ChData(I32Type(val), TimeType(), ch_temp))
return seq
def get_severity_event(self, severity="DIAGNOSTIC"):
name = "Severity" + severity
temp = self.pipeline.dictionaries.event_name[name]
event = EventData(tuple(), TimeType(), temp)
return event
def get_severity_sequence(self, length, severity="DIAGNOSTIC"):
seq = []
for i in range(0, length):
seq.append(self.get_severity_event(severity))
return seq
class AssertionFailure(Exception):
"""
Used to differentiate an AssertionError in test cases that intentionally raise an
assertion error.
"""
######################################################################################
# Test Cases
######################################################################################
def test_dummy_pipeline(self):
length = 15
event_list = self.get_severity_sequence(length)
t1 = self.fill_history_async(self.pipeline.enqueue_event, event_list, 0.1)
print("waiting for queue to fill")
pred = predicates.greater_than_or_equal_to(length // 2)
results = self.api.await_event_count(pred)
assert pred(len(results)), "the correct amount of objects was received"
t1.join()
evr_hist = self.api.get_event_test_history()
item_count = len(evr_hist)
assert (
item_count == length
), "Were the correct number of items in the history? ({},{})".format(
item_count, length
)
def test_find_history_item(self):
self.fill_history(self.tHistory.data_callback, range(0, 50))
self.fill_history(self.tHistory.data_callback, range(0, 50))
pred = predicates.equal_to(25)
result = self.api.find_history_item(pred, self.tHistory)
assert result == 25, "The search should have returned 25, but found {}".format(
result
)
result = self.api.find_history_item(pred, self.tHistory, start=50)
assert result == 25, "The search should have returned 25, but found {}".format(
result
)
result = self.api.find_history_item(pred, self.tHistory, start=80)
assert (
result is None
), "The search should have returned None, but found {}".format(result)
def test_find_history_item_timeout(self):
pred = predicates.equal_to(25)
listA = range(0, 50)
self.fill_history_async(self.tHistory.data_callback, listA, 0.01)
result = self.api.find_history_item(pred, self.tHistory, timeout=1)
assert result == 25, "The search should have returned 25, but found {}".format(
result
)
pred = predicates.equal_to(49)
result = self.api.find_history_item(pred, self.tHistory, timeout=1)
assert result == 49, "The search should have returned 49, but found {}".format(
result
)
self.tHistory.clear()
listA = range(0, 50)
pred = predicates.equal_to(49)
self.fill_history_async(self.tHistory.data_callback, listA, 0.1)
result = self.api.find_history_item(pred, self.tHistory, timeout=1)
assert (
result is None
), "The search should have returned None, but found {}".format(result)
def test_find_history_sequence(self):
sequence = []
for i in range(30, 40, 2):
sequence.append(predicates.equal_to(i))
self.fill_history(self.tHistory.data_callback, range(0, 50))
results = self.api.find_history_sequence(sequence, self.tHistory)
assert len(results) == len(
sequence
), "The search should have found {}, but returned {}".format(
range(30, 40, 2), results
)
self.assert_lists_equal(range(30, 40, 2), results)
results = self.api.find_history_sequence(sequence, self.tHistory, start=34)
assert len(results) != len(
sequence
), "The search should have returned an incomplete list, but found {}".format(
results
)
self.fill_history(self.tHistory.data_callback, range(0, 50))
results = self.api.find_history_sequence(sequence, self.tHistory, start=34)
assert len(results) == len(
sequence
), "The search should have found {}, but returned {}".format(
range(30, 40, 2), results
)
self.assert_lists_equal(range(30, 40, 2), results)
results = self.api.find_history_sequence(sequence, self.tHistory, start=90)
assert len(results) != len(
sequence
), "The search should have returned an incomplete list, but found {}".format(
results
)
def test_find_history_sequence_timeout(self):
sequence = []
for i in range(30, 40, 2):
sequence.append(predicates.equal_to(i))
self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01)
results = self.api.find_history_sequence(sequence, self.tHistory, timeout=1)
assert results is not None, "The search should have found a sequence"
self.assert_lists_equal(range(30, 40, 2), results)
self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01)
results = self.api.find_history_sequence(
sequence, self.tHistory, start=34, timeout=1
)
assert results is not None, "The search should have found a sequence"
self.assert_lists_equal(range(30, 40, 2), results)
self.tHistory.clear()
self.fill_history_async(self.tHistory.data_callback, range(25, 50), 0.1)
results = self.api.find_history_sequence(
sequence, self.tHistory, start=90, timeout=1
)
assert len(results) != len(
sequence
), "The search should have returned an incomplete list, but found {}".format(
results
)
def test_find_history_count(self):
count_pred = predicates.greater_than_or_equal_to(10)
search_pred = predicates.greater_than_or_equal_to(40)
self.fill_history(self.tHistory.data_callback, range(0, 50))
results = self.api.find_history_count(count_pred, self.tHistory)
self.assert_lists_equal(range(0, 50), results)
results = self.api.find_history_count(count_pred, self.tHistory, search_pred)
self.assert_lists_equal(range(40, 50), results)
self.fill_history(self.tHistory.data_callback, range(50, 70))
results = self.api.find_history_count(count_pred, self.tHistory, search_pred)
self.assert_lists_equal(range(40, 70), results)
results = self.api.find_history_count(count_pred, self.tHistory, start=60)
self.assert_lists_equal(range(60, 70), results)
def test_find_history_count_timeout(self):
count_pred = predicates.greater_than_or_equal_to(10)
search_pred = predicates.greater_than_or_equal_to(40)
self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01)
results = self.api.find_history_count(count_pred, self.tHistory)
assert (
len(results) < 10
), "The search should have returned an incomplete list, but found {}".format(
results
)
results = self.api.find_history_count(
count_pred, self.tHistory, search_pred, timeout=2
)
self.assert_lists_equal(range(40, 50), results)
self.fill_history_async(self.tHistory.data_callback, range(50, 60), 0.01)
results = self.api.find_history_count(
count_pred, self.tHistory, search_pred, start=50, timeout=2
)
self.assert_lists_equal(range(50, 60), results)
self.tHistory.clear()
self.fill_history_async(self.tHistory.data_callback, range(35, 60), 0.1)
results = self.api.find_history_count(
count_pred, self.tHistory, search_pred, timeout=1
)
assert (
len(results) < 10
), "The search should have returned an incomplete list, but found {}".format(
results
)
def test_get_latest_fsw_time(self):
ts0 = self.api.get_latest_time()
time.sleep(0.1)
ts1 = self.api.get_latest_time()
assert (
ts0 is ts1
), "The starting timestamp should not have changed if no dataobjects were enqueued"
count_seq = self.get_counter_sequence(100)
event_seq = self.get_severity_sequence(100)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.02)
t2 = self.fill_history_async(self.pipeline.enqueue_event, event_seq, 0.02)
last = ts0
for i in range(1, 10):
time.sleep(0.1)
tsi = self.api.get_latest_time()
assert tsi > last, "Iter {}: {} should be greater than {}".format(
i, tsi, last
)
last = tsi
t1.join()
t2.join()
tsn_1 = self.api.get_latest_time()
assert (
tsn_1 > last
), "The final timestamp, {}, should be greater than {}.".format(tsn_1, last)
time.sleep(0.1)
tsn_2 = self.api.get_latest_time()
assert (
tsn_2 == tsn_1
), "The timestamp should not have changed, while no data was streaming."
def test_clear_histories(self):
eventHistory = self.api.get_event_test_history()
channelHistory = self.api.get_telemetry_test_history()
commandHistory = self.api.get_command_test_history()
self.api.clear_histories()
assert eventHistory.size() == 0, "eventHistory should be empty"
assert channelHistory.size() == 0, "channelHistory should be empty"
count_seq = self.get_counter_sequence(100)
event_seq = self.get_severity_sequence(100)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.02)
t2 = self.fill_history_async(self.pipeline.enqueue_event, event_seq, 0.02)
t1.join()
t2.join()
sizeE = eventHistory.size()
iE = sizeE // 2
firstE = eventHistory[iE]
timeE = firstE.get_time()
sizeC = channelHistory.size()
iC = 0
for i in range(0, channelHistory.size()):
if channelHistory[i].get_time() >= timeE:
iC = i
break
firstC = channelHistory[iC]
self.api.clear_histories(timeE)
msg = "The event history should have been reduced by {} elements".format(iE)
assert sizeE - iE == eventHistory.size(), msg
msg = "The element with the timestamp should be first in the history"
assert firstE is eventHistory[0], msg
msg = "The channel history should have been reduced by {} elements".format(iC)
assert sizeC - iC == channelHistory.size(), msg
msg = "The first element in the history should be the first with a valid time"
assert firstC is channelHistory[0], msg
args1 = []
self.api.send_command("apiTester.TEST_CMD_1", args1)
assert commandHistory.size() > 0, "history size should be greater than 0"
assert channelHistory.size() > 0, "history size should be greater than 0"
assert eventHistory.size() > 0, "history size should be greater than 0"
self.api.clear_histories()
assert commandHistory.size() == 0, "history size should be 0"
assert channelHistory.size() == 0, "history size should be 0"
assert eventHistory.size() == 0, "history size should be 0"
def test_registering_and_removing_subhistories(self):
# Verifying that retrieving a subhistory for events behaves as expected
event_hist = self.api.get_event_test_history()
self.pipeline.enqueue_event(self.get_severity_event())
assert event_hist.size() == 1, "There should be one event in the api's history"
event_subhist = self.api.get_event_subhistory()
assert event_subhist.size() == 0, "There should be no events in the subhistory"
self.pipeline.enqueue_event(self.get_severity_event())
assert event_hist.size() == 2, "There should be two events in the api's history"
assert event_subhist.size() == 1, "There should be one event in the subhistory"
assert self.api.remove_event_subhistory(event_subhist), "remove should succeed"
self.pipeline.enqueue_event(self.get_severity_event())
assert (
event_hist.size() == 3
), "There should be three events in the api's history"
assert event_subhist.size() == 1, "There should be one event in the subhistory"
self.api.clear_histories()
assert event_hist.size() == 0, "There should be no events in the api's history"
assert event_subhist.size() == 1, "There should be one event in the subhistory"
assert not self.api.remove_event_subhistory(
event_subhist
), "should not remove twice"
# same checks, but for telemetry
telem_seq = self.get_counter_sequence(3)
telem_hist = self.api.get_telemetry_test_history()
self.pipeline.enqueue_telemetry(telem_seq[0])
assert telem_hist.size() == 1, "There should be one update in the api's history"
telem_subhist = self.api.get_telemetry_subhistory()
assert telem_subhist.size() == 0, "There should be no updates in the subhistory"
self.pipeline.enqueue_telemetry(telem_seq[1])
assert (
telem_hist.size() == 2
), "There should be two updates in the api's history"
assert telem_subhist.size() == 1, "There should be one update in the subhistory"
assert self.api.remove_telemetry_subhistory(
telem_subhist
), "remove should succeed"
self.pipeline.enqueue_telemetry(telem_seq[2])
assert (
telem_hist.size() == 3
), "There should be three updates in the api's history"
assert telem_subhist.size() == 1, "There should be one update in the subhistory"
self.api.clear_histories()
assert telem_hist.size() == 0, "There should be no updates in the api's history"
assert telem_subhist.size() == 1, "There should be one update in the subhistory"
assert not self.api.remove_telemetry_subhistory(
telem_subhist
), "should not remove twice"
def test_translate_command_name(self):
assert self.api.translate_command_name("apiTester.TEST_CMD_1") == 1
assert self.api.translate_command_name("apiTester.TEST_CMD_2") == 2
assert self.api.translate_command_name("apiTester.TEST_CMD_3") == 3
assert self.api.translate_command_name(1) == 1
assert self.api.translate_command_name(2) == 2
assert self.api.translate_command_name(3) == 3
try:
self.api.translate_command_name("DOES_NOT_EXIST")
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
try:
self.api.translate_command_name(0)
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
def test_send_command(self):
args1 = []
self.api.send_command("apiTester.TEST_CMD_1", args1)
self.api.send_command(1, args1)
args2 = ["0x01", "0x02"]
self.api.send_command("apiTester.TEST_CMD_2", args2)
self.api.send_command(2, args2)
args3 = ["test message for the test command"]
self.api.send_command("apiTester.TEST_CMD_3", args3)
self.api.send_command(3, args3)
hist = self.api.get_command_test_history()
assert hist.size() == 6
for cmd in hist:
print(cmd)
def test_send_and_await_telemetry(self):
result = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels="CommandCounter"
)
assert (
result is not None
), "the search should find the telemetry generated by UTPipeline"
self.api.clear_histories()
seq = ["CommandCounter"] + ["Counter"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results1 = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels=seq
)
assert len(results1) == 6, "Should have gotten 6 results out of the await"
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results2 = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels=seq
)
assert len(results2) == 6, "Should have gotten 6 results out of the await"
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
self.api.clear_histories()
seq = ["CommandCounter"] + ["Oscillator"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_oscillator_sequence(10), 0.01
)
results = self.api.send_and_await_telemetry(
"apiTester.TEST_CMD_1", channels=seq
)
assert len(results) == 6, "Should have gotten 6 results out of the await"
def test_send_and_await_event(self):
result = self.api.send_and_await_event(
"apiTester.TEST_CMD_1", events="CommandReceived"
)
assert (
result is not None
), "the search should have found the CommandReceived Event"
self.api.clear_histories()
seq = ["CommandReceived"] + ["SeverityDIAGNOSTIC"] * 5
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results1 = self.api.send_and_await_event("apiTester.TEST_CMD_1", events=seq)
assert len(results1) == 6, "Should have gotten 6 results out of the await"
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results2 = self.api.send_and_await_event("apiTester.TEST_CMD_1", events=seq)
assert len(results2) == 6, "Should have gotten 6 results out of the await"
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
def test_send_and_assert_telemetry(self):
self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels="CommandCounter"
)
self.api.clear_histories()
seq = ["CommandCounter"] + ["Counter"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results1 = self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels=seq, timeout=5
)
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01
)
results2 = self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels=seq, timeout=5
)
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
self.api.clear_histories()
seq = ["CommandCounter"] + ["Oscillator"] * 5
self.fill_history_async(
self.pipeline.enqueue_telemetry, self.get_oscillator_sequence(10), 0.01
)
self.api.send_and_assert_telemetry(
"apiTester.TEST_CMD_1", channels=seq, timeout=5
)
def test_send_and_assert_event(self):
self.api.send_and_assert_event("apiTester.TEST_CMD_1", events="CommandReceived")
self.api.clear_histories()
seq = ["CommandReceived"] + ["SeverityDIAGNOSTIC"] * 5
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results1 = self.api.send_and_assert_event(
"apiTester.TEST_CMD_1", events=seq, timeout=5
)
self.fill_history_async(
self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01
)
results2 = self.api.send_and_assert_event(
"apiTester.TEST_CMD_1", events=seq, timeout=5
)
for i in range(0, 6):
assert results1[i] != results2[i], "These sequences should be unique items"
def test_translate_telemetry_name(self):
assert self.api.translate_telemetry_name("CommandCounter") == 1
assert self.api.translate_telemetry_name("Oscillator") == 2
assert self.api.translate_telemetry_name("Counter") == 3
assert self.api.translate_telemetry_name(1) == 1
assert self.api.translate_telemetry_name(2) == 2
assert self.api.translate_telemetry_name(3) == 3
try:
self.api.translate_command_name("DOES_NOT_EXIST")
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
try:
self.api.translate_command_name(0)
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
def test_get_telemetry_pred(self):
pred = predicates.telemetry_predicate()
result = self.api.get_telemetry_pred(pred)
assert pred == result, "should return when channel is already telem_pred"
update = self.get_counter_sequence(1)[0]
pred = self.api.get_telemetry_pred(update.get_id(), update.get_val())
assert pred(update), "predicate should return true when fields are specified"
def test_await_telemetry(self):
seq = self.get_counter_sequence(20)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[0:10], 0.01)
result = self.api.await_telemetry("Counter", 8)
assert (
result is not None
), "Await should have found a correct channel update: {}".format(result)
time.sleep(1)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[10:20], 0.01)
result = self.api.await_telemetry("Counter", 8)
assert result is None, "Await should not have found an update: {}".format(
result
)
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, seq, 0.1)
result = self.api.await_telemetry("Counter", 15, timeout=1)
assert result is None, "Await should not have found an update: {}".format(
result
)
def test_await_telemetry_sequence(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
search_seq = []
for i in range(15, 20):
pred = self.api.get_telemetry_pred("Counter", i)
search_seq.append(pred)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_sequence(search_seq)
assert len(results) == len(search_seq), "lists should have the same length"
for i in range(0, len(results)):
msg = predicates.get_descriptive_string(results[i], search_seq[i])
assert search_seq[i](results[i]), msg
t1.join()
t2.join()
results = self.api.await_telemetry_sequence(search_seq)
assert len(results) < len(
search_seq
), "repeating the search should not complete"
self.api.clear_histories()
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_sequence(search_seq, timeout=1)
assert len(results) < len(
search_seq
), "repeating the search should not complete"
t1.join()
t2.join()
def test_await_telemetry_count(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
pred = predicates.greater_than_or_equal_to(10)
search_pred = self.api.get_telemetry_pred("Counter", pred)
count_pred = predicates.within_range(10, 20)
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_count(count_pred, search_pred)
msg = predicates.get_descriptive_string(len(results), count_pred)
assert count_pred(len(results)), msg
t1.join()
t2.join()
self.api.clear_histories()
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
results = self.api.await_telemetry_count(100)
assert len(results) == 100, "await count should have found 100 items"
t1.join()
t2.join()
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.02)
results = self.api.await_telemetry_count(100, timeout=1)
assert len(results) < 100, "await count should have found fewer 100 items"
def test_assert_telemetry(self):
seq = self.get_counter_sequence(20)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[0:10], 0.01)
self.api.assert_telemetry("Counter", 8, timeout=1)
time.sleep(1)
self.fill_history_async(self.pipeline.enqueue_telemetry, seq[10:20], 0.01)
try:
self.api.assert_telemetry("Counter", 8, start="NOW", timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, seq, 0.1)
try:
self.api.assert_telemetry("Counter", 15, timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
def test_assert_telemetry_sequence(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
search_seq = []
for i in range(15, 20):
pred = self.api.get_telemetry_pred("Counter", i)
search_seq.append(pred)
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=5)
self.api.assert_telemetry_sequence(search_seq)
time.sleep(1)
try:
self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=5)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.07)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
try:
self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
def test_assert_telemetry_count(self):
count_seq = self.get_counter_sequence(20)
sin_seq = self.get_oscillator_sequence(100)
pred = predicates.greater_than_or_equal_to(10)
search_pred = self.api.get_telemetry_pred("Counter", pred)
count_pred = predicates.within_range(10, 20)
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
self.api.assert_telemetry_count(count_pred, search_pred, timeout=2)
self.api.assert_telemetry_count(count_pred, search_pred)
self.api.clear_histories()
t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01)
self.api.assert_telemetry_count(100, timeout=2)
t1.join()
t2.join()
try:
self.api.assert_telemetry_count(100)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
self.api.clear_histories()
self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05)
self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.02)
try:
self.api.assert_telemetry_count(100, timeout=1)
raise self.AssertionFailure()
except AssertionError:
assert True, "api raised the correct error"
except self.AssertionFailure:
assert False, "api failed to raise an assertion error"
def test_translate_event_name(self):
assert self.api.translate_event_name("CommandReceived") == 1
assert self.api.translate_event_name("HistorySizeUpdate") == 2
assert self.api.translate_event_name("SeverityCOMMAND") == 3
assert self.api.translate_event_name("SeverityACTIVITY_LO") == 4
assert self.api.translate_event_name("SeverityACTIVITY_HI") == 5
assert self.api.translate_event_name("SeverityWARNING_LO") == 6
assert self.api.translate_event_name("SeverityWARNING_HI") == 7
assert self.api.translate_event_name("SeverityDIAGNOSTIC") == 8
assert self.api.translate_event_name("SeverityFATAL") == 9
for i in range(1, 10):
assert self.api.translate_event_name(i) == i
try:
self.api.translate_event_name("DOES_NOT_EXIST")
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
try:
self.api.translate_event_name(0)
assert False, "the api should have raised a KeyError"
except KeyError:
assert True, "the api raised the correct error"
def test_get_event_pred(self):
pred = predicates.event_predicate()
result = self.api.get_event_pred(pred)
assert pred == result, "should return when channel is already event_pred"
message = self.get_severity_event("FATAL")
pred = self.api.get_event_pred(
message.get_id(), message.get_args(), message.get_severity()
)
assert pred(message), "predicate should return true when fields are specified"
"""
def test_await_event(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_await_event_sequence(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_await_event_count(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_assert_event(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_assert_event_sequence(self):
raise NotImplementedError("Test Case is not yet implemented")
def test_assert_event_count(self):
raise NotImplementedError("Test Case is not yet implemented")
"""
if __name__ == "__main__":
unittest.main()
|
main.py
|
import shutil
from datetime import datetime, date
from time import gmtime, strftime
import zipfile
import re
import glob
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import tkinter.ttk as ttk
from tkinter import filedialog
import os
import hashlib
import tkinter as tk
import threading
from xlwt import Workbook
import pandas
orari = []
user = os.environ["USERNAME"]
window = tk.Tk()
window.geometry("900x625")
window.title("WhatsApp Scraper")
window.grid_columnconfigure(0, weight=1)
window.resizable(False, False)
chromeDriverPath = os.path.dirname(os.path.abspath(__file__))
pyExePath = os.path.dirname(os.path.abspath(__file__))
NAMES = []
log_dict = {}
language = 'italian'
window.iconbitmap('whatsapp.ico')
wb = Workbook()
sheet1 = wb.add_sheet('Hash', cell_overwrite_ok=True)
nRow = 4
countVideo = 1
countImage = 1
countAudio = 1
countDoc = 1
sheet1.write(0, 0, 'WhatsappScraper_v.1')
sheet1.write(0, 1, 'https://github.com/fpollicelli/whatsappScraper.git')
sheet1.write(0, 2, 'Authors: Francesca Pollicelli')
sheet1.write(0, 3, 'Domenico Palmisano')
sheet1.write(1, 0, 'File')
sheet1.write(1, 1, 'Timestamp')
sheet1.write(1, 2, 'MD5')
sheet1.write(1, 3, 'SHA512')
wb.save(pyExePath + '\\log.xls')
def detectLanguage(driver):
global language
try:
element = WebDriverWait(driver, 50).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="app"]/div/div/div[4]/div/div/div[2]/h1'))
)
welcome = driver.find_element_by_xpath('//*[@id="app"]/div/div/div[4]/div/div/div[2]/h1')
welcome = welcome.get_attribute("innerHTML")
if welcome == 'Keep your phone connected':
language = 'english'
else:
language = 'italian'
except:
language = 'italian'
return
def findChromeDriver():
for root, dirs, files in os.walk(chromeDriverPath):
if "chromedriver.exe" in files:
return os.path.join(root, "chromedriver.exe")
def openChrome():
global nRow
x = pyExePath.replace('/', '\\')
options = webdriver.ChromeOptions() # stabilire connessione con whatsapp web
options.add_experimental_option("prefs", {
"download.default_directory": r"" + x + "\Scraped\Media",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
})
options.add_argument("--remote-debugging-port=9222")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
# CREAZIONE PROFILO SOLO PER DEBUG
# '''
options.add_argument(
"user-data-dir=C:\\Users\\" + user + "\\AppData\\Local\\Google\\Chrome\\User Data\\Profile 1") # crea un nuovo profilo utente in chrome per scansionare il qw
# '''
args = ["hide_console", ]
driver = webdriver.Chrome(options=options, executable_path=findChromeDriver(), service_args=args)
# apre whatsapp nel browser
driver.get('http://web.whatsapp.com')
try:
element = WebDriverWait(driver, 50).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="side"]/header/div[1]/div/img'))
)
except:
if language == 'italian':
text = 'impossibile connettersi a WhatsApp Web'
else:
text = 'unable to connect to WhatsApp Web'
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
for key, value in log_dict.items():
sheet1.write(nRow, 0, value)
sheet1.write(nRow, 1, key)
nRow = nRow + 1
driver.close()
wb.save(pyExePath + '\\log.xls')
return driver
def readMessages(name, driver):
global countVideo
global countImage
global countAudio
global countDoc
timezone = strftime("GMT%z", gmtime())
timezone = timezone[:-2]
if language == 'italian':
text = "scraping dei messaggi in corso..."
else:
text = 'scraping messages in progress'
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
dir = pyExePath + '/Scraped/Chat/'
if not os.path.exists(dir):
os.makedirs(dir)
f = open(dir + name + '.csv', 'w', encoding='utf-8')
if language == 'italian':
f.write('Data,Ora,Mittente,Messaggio\n')
else:
f.write('Date,Time,Sender,Message\n')
trovato = False
while trovato == False:
try:
element = driver.find_element_by_xpath("//*[@id='main']/div[3]/div/div/div[2]/div[2]/div/div/div/span/span")
trovato = True
except:
trovato = False
driver.find_element_by_xpath("//*[@id='main']/div[3]/div/div").send_keys(Keys.CONTROL + Keys.HOME)
messageContainer = driver.find_elements_by_xpath("//div[contains(@class,'message-')]")
for messages in messageContainer:
if (save_media.get() == 1):
if language == 'italian':
text = "salvataggio degli audio in corso..."
else:
text = 'scraping audio...'
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
try:
vocal = messages.find_element_by_xpath(".//span[contains(@data-testid,'ptt-status')]")
vocal.click()
try:
time.sleep(5)
down = messages.find_element_by_xpath(".//span[contains(@data-testid,'audio-download')]")
down.click()
time.sleep(5)
try:
element = WebDriverWait(driver, 50).until(
EC.presence_of_element_located(
(By.XPATH, ".//span[contains(@data-testid,'audio-play')]"))
)
except:
if language == 'italian':
text = "impossibile scaricare l'audio"
else:
text = 'unable to download the audio'
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
except:
pass
downContext = messages.find_element_by_xpath(".//span[contains(@data-testid,'down-context')]")
downContext.click()
if language == 'italian':
button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located(
(By.XPATH, ".//div[contains(@aria-label,'Scarica')]")))
else:
button = WebDriverWait(driver, 30).until(expected_conditions.presence_of_element_located(
(By.XPATH, ".//div[contains(@aria-label,'Download')]")))
button.click()
except:
pass
try:
try: # SALVATAGGIO DI DOC IN CSV
download = messages.find_element_by_xpath(
".//button[contains(@title,'Scarica')]")
except:
pass
else:
oraData = info[info.find('[') + 1: info.find(']') + 1]
if language == 'english':
data = oraData[oraData.find(' ') + 4: oraData.find(']')]
else:
data = oraData[oraData.find(' ') + 1: oraData.find(']')]
ora = oraData[oraData.find('[') + 1: oraData.find(',')]
orari.append(ora)
mittente = info.split(']')[1].strip()
mittente = mittente.split(':')[0].strip()
download = download.get_attribute('title')
if language == 'italian':
download = download[9:-1]
else:
download = download[10:-1]
if len(download) > 90:
download = download[:90]
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, "Doc: " + download + '...'))
else:
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, "Doc: " + download))
finalMessage = data + "," + ora + " " + timezone + "," + mittente + "," + "Doc: " + download
window.update()
f.write(finalMessage)
f.write('\n')
try: # SALVATAGGIO DI AUDIO IN CSV
audio = messages.find_element_by_xpath(".//span[contains(@data-testid,'ptt-status')]")
# WhatsApp Ptt 2021-02-17 at 17.17.26.ogg
oraData = info[info.find('[') + 1: info.find(']') + 1]
if language == 'english':
data = oraData[oraData.find(' ') + 4: oraData.find(']')]
else:
data = oraData[oraData.find(' ') + 1: oraData.find(']')]
ora = oraData[oraData.find('[') + 1: oraData.find(',')]
orari.append(ora)
mittente = info.split(']')[1].strip()
mittente = mittente.split(':')[0].strip()
audio_name = "WhatsApp Audio " + str(countAudio) + ".ogg"
countAudio = countAudio + 1
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, audio_name))
finalMessage = data + "," + ora + " " + timezone + "," + mittente + "," + "Audio: " + audio_name
window.update()
f.write(finalMessage)
f.write('\n')
except:
pass
try: # SALVATAGGIO DI IMG IN CSV
img = messages.find_element_by_xpath(".//img[contains(@src,'blob')]")
except:
pass
else:
info = messages.find_element_by_xpath(".//div[contains(@data-pre-plain-text,'[')]")
info = info.get_attribute("data-pre-plain-text")
oraData = info[info.find('[') + 1: info.find(']') + 1]
if language == 'english':
data = oraData[oraData.find(' ') + 4: oraData.find(']')]
else:
data = oraData[oraData.find(' ') + 1: oraData.find(']')]
ora = oraData[oraData.find('[') + 1: oraData.find(',')]
orari.append(ora)
mittente = info.split(']')[1].strip()
mittente = mittente.split(':')[0].strip()
img_name = "WhatsApp Image " + str(countImage) + ".jpeg"
countImage = countImage + 1
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, img_name))
finalMessage = data + "," + ora + " " + timezone + "," + mittente + "," + "Img: " + img_name
window.update()
f.write(finalMessage)
f.write('\n')
try: # SALVATAGGIO DI VIDEO IN CSV
video = messages.find_element_by_xpath(".//span[contains(@data-testid,'media')]")
except:
pass
else:
oraData = info[info.find('[') + 1: info.find(']') + 1]
if language == 'english':
data = oraData[oraData.find(' ') + 4: oraData.find(']')]
else:
data = oraData[oraData.find(' ') + 1: oraData.find(']')]
ora = oraData[oraData.find('[') + 1: oraData.find(',')]
orari.append(ora)
mittente = info.split(']')[1].strip()
mittente = mittente.split(':')[0].strip()
video_name = "WhatsApp Video " + str(countVideo) + ".mp4"
countVideo = countVideo + 1
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, video_name))
finalMessage = data + "," + ora + " " + timezone + "," + mittente + "," + video_name
window.update()
f.write(finalMessage)
f.write('\n')
message = messages.find_element_by_xpath(
".//span[contains(@class,'selectable-text copyable-text')]"
).text
emojis = messages.find_elements_by_xpath(
".//img[contains(@class,'selectable-text copyable-text')]")
if len(emojis) != 0:
for emoji in emojis:
message = message + emoji.get_attribute("data-plain-text")
info = messages.find_element_by_xpath(".//div[contains(@data-pre-plain-text,'[')]")
info = info.get_attribute("data-pre-plain-text")
oraData = info[info.find('[') + 1: info.find(']') + 1]
if language == 'english':
data = oraData[oraData.find(' ') + 4: oraData.find(']')]
else:
data = oraData[oraData.find(' ') + 1: oraData.find(']')]
ora = oraData[oraData.find('[') + 1: oraData.find(',')]
orari.append(ora)
mittente = info.split(']')[1].strip()
mittente = mittente.split(':')[0].strip()
message = message.replace("\n", " ")
if len(message) > 90:
trimMessage = message[:90]
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, trimMessage + '...'))
else:
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, message))
finalMessage = data + "," + ora + " " + timezone + "," + mittente + "," + message
window.update()
f.write(finalMessage)
f.write('\n')
# only emojis in the message
except NoSuchElementException:
try:
for emoji in messages.find_elements_by_xpath(
".//img[contains(@class,'selectable-text copyable-text')]"):
info = messages.find_element_by_xpath(".//div[contains(@data-pre-plain-text,'[')]")
info = info.get_attribute("data-pre-plain-text")
message = emoji.get_attribute("data-plain-text")
oraData = info[info.find('[') + 1: info.find(']') + 1]
ora = oraData[oraData.find('[') + 1: oraData.find(',')]
orari.append(ora)
data = oraData[oraData.find(' ') + 1: oraData.find(']')]
mittente = info.split(']')[1].strip()
mittente = mittente.split(':')[0].strip()
message = message.replace("\n", " ")
if len(message) > 90:
trimMessage = message[:90]
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, trimMessage + '...'))
else:
tree.insert("", 0, values=(data, ora + " " + timezone, mittente, message))
finalMessage = data + "," + ora + " " + timezone + "," + mittente + "," + message
window.update()
f.write(finalMessage)
f.write('\n')
except NoSuchElementException:
pass
f.close()
if language == 'italian':
text = "generazione del doppio hash della chat in corso..."
else:
text = 'generating double hash...'
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
return
def getDateTime():
now = datetime.now()
if language == 'english':
today = date.today().strftime("%m/%d/%Y")
else:
today = date.today().strftime("%d/%m/%Y")
current_time = now.strftime("%H:%M:%S")
timezone = strftime("GMT%z", gmtime())
dateTime = today + ' ' + current_time + ' ' + timezone
return dateTime
def getChatLabels():
global nRow
if choose_label["text"] == '':
del NAMES[:]
if language == 'italian':
text = "apertura di WhatsApp Web in corso..."
else:
text = 'opening WhatsApp Web...'
try:
f = open(pyExePath + "\\chat.zip", 'r')
except:
pass
else:
f.close()
os.remove(pyExePath + "\\chat.zip")
try:
f = open(pyExePath + "\\hashing.csv", 'r')
except:
pass
else:
f.close()
os.remove(pyExePath + "\\hashing.csv")
try:
f = open(pyExePath + "\\media.zip", 'r')
except:
pass
else:
f.close()
os.remove(pyExePath + "\\media.zip")
try:
f = open(pyExePath + "\\log.xls", 'r')
except:
pass
else:
f.close()
os.remove(pyExePath + "\\log.xls")
log_dict.clear()
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
tree.delete(*tree.get_children())
driver = openChrome()
chatLabels = []
chatName = []
chatLabels_nodups = []
archiviat = 0
toArch = []
detectLanguage(driver)
if len(NAMES) != 0:
for i in range(0, len(NAMES)):
if 'str' in NAMES[i]:
break
try:
found = driver.find_element_by_xpath(".//span[contains(@title,'" + NAMES[i] + "')]")
chatLabels.append(found)
except:
pass
try:
menuDots = driver.find_element_by_xpath("//*[@id='side']/header/div[2]/div/span/div[3]/div/span")
menuDots.click()
archiv = driver.find_element_by_xpath("//*[@id='side']/header/div[2]/div/span/div[3]/span/div/ul/li[4]/div")
archiv.click()
time.sleep(2)
for i in range(0, len(NAMES)):
try:
recent = driver.find_element_by_xpath(
('//*[@id="app"]/div/div/div[2]/div[1]/span/div/span/div/div'))
found = recent.find_element_by_xpath(".//span[contains(@title,'" + NAMES[i] + "')]")
actionChains = ActionChains(driver)
actionChains.context_click(found).perform()
time.sleep(2)
estrai = driver.find_element_by_xpath('//*[@id="app"]/div/span[4]/div/ul/li[1]/div')
estrai.click()
time.sleep(10)
toArch.append(NAMES[i])
archiviat = 1
except:
if language == 'italian':
text = "errore: contatto non trovato"
else:
text = "error: can't find the contact"
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
goBack = driver.find_element_by_xpath(
'//*[@id="app"]/div/div/div[2]/div[1]/span/div/span/div/header/div/div[1]/button/span')
goBack.click()
for i in range(0, len(toArch)):
found = driver.find_element_by_xpath(".//span[contains(@title,'" + toArch[i] + "')]")
chatLabels.append(found)
except:
pass
iterChatList(chatLabels, driver)
if archiviat == 1:
if language == 'italian':
text = "spostamento delle chat de-archiviate in archivio in corso..."
else:
text = "moving de-archived chats to archive..."
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
archiviaChat(toArch, driver)
if language == 'italian':
text = "scraping terminato con successo"
else:
text = "scraping successfully completed"
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
choose_label.configure(text="")
window.update()
driver.close()
wb.save(pyExePath + '\\log.xls')
path = pyExePath + '\\Scraped\\Chat'
create_zip(path, 'chat.zip')
zip_hasher('chat.zip')
if save_media.get() == 1:
path = pyExePath + '\\Scraped\\Media'
renameMedia()
create_zip(path, 'media.zip')
zip_hasher('media.zip')
path = pyExePath + '\\Scraped'
shutil.rmtree(path)
open_folder = os.path.realpath(pyExePath)
os.startfile(open_folder)
read_file = pandas.read_excel(pyExePath + '\\log.xls')
read_file.to_csv(pyExePath + '\\hashing.csv', index=None, header=True)
return
if (archiviate.get() == 1):
if language == 'italian':
text = "spostamento delle chat archiviate in generali in corso..."
else:
text = "moving archived chats in general..."
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
chatLabelsDeArch = moveArchiviate(driver)
recentList = driver.find_elements_by_xpath('//*[@id="pane-side"]/div[1]/div/div/div')
for list in recentList:
chatLabels.append(list)
time.sleep(7)
label = list.find_elements_by_xpath('.//span[contains(@dir,"auto")]')
time.sleep(5)
chatName.append(label[0].get_attribute('title'))
chatLabels.sort(key=lambda x: int(x.get_attribute('style').split("translateY(")[1].split('px')[0]),
reverse=False)
# ne ho n
for x in chatLabels:
driver.execute_script("arguments[0].scrollIntoView();", x)
recentList_scrolled = driver.find_elements_by_xpath('//*[@id="pane-side"]/div[1]/div/div/div')
# li ho scrollati
for list_scrolled in recentList_scrolled:
chatLabels.append(list_scrolled)
label = list_scrolled.find_elements_by_xpath('.//span[contains(@dir,"auto")]')
chatName.append(label[0].get_attribute('title'))
inds = []
seen = set()
for i, ele in enumerate(chatName):
if ele not in seen:
inds.append(i)
seen.add(ele)
chatLabels = [i for j, i in enumerate(chatLabels) if j in inds]
chatLabels.sort(key=lambda x: int(x.get_attribute('style').split("translateY(")[1].split('px')[0]), reverse=False)
iterChatList(chatLabels, driver)
if (archiviate.get() == 1):
if language == 'italian':
text = "spostamento delle chat de-archiviate in archivio in corso..."
else:
text = "moving de-archived chats to archive..."
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
archiviaChat(chatLabelsDeArch, driver)
if language == 'italian':
text = "scraping terminato con successo"
else:
text = "scraping successfully completed"
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
choose_label.configure(text="")
window.update()
for key, value in log_dict.items():
sheet1.write(nRow, 0, value)
sheet1.write(nRow, 1, key)
nRow = nRow + 1
driver.close()
wb.save(pyExePath + '\\log.xls')
path = pyExePath + '\\Scraped\\Chat'
create_zip(path, 'chat.zip')
zip_hasher('chat.zip')
if save_media.get() == 1:
path = pyExePath + '\\Scraped\\Media'
renameMedia()
create_zip(path, 'media.zip')
zip_hasher('media.zip')
path = pyExePath + '\\Scraped'
shutil.rmtree(path)
open_folder = os.path.realpath(pyExePath)
os.startfile(open_folder)
del NAMES[:]
read_file = pandas.read_excel(pyExePath + '\\log.xls')
read_file.to_csv(pyExePath + '\\hashing.csv', index=None, header=True)
return
def renameMedia():
audio = 1
image = 1
video = 1
os.chdir(pyExePath + '\\Scraped\\Media\\')
for file in glob.glob("*.ogg"):
os.rename(file, "WhatsApp Audio " + str(audio) + ".ogg")
audio = audio + 1
for file in glob.glob("*.jpeg"):
os.rename(file, "WhatsApp Image " + str(image) + ".jpeg")
image = image + 1
for file in glob.glob("*.mp4"):
os.rename(file, "WhatsApp Video " + str(video) + ".mp4")
video = video + 1
os.chdir(pyExePath)
return
def archiviaChat(chatLabelsDeArch, driver):
for chat in chatLabelsDeArch:
chatElement = driver.find_element_by_xpath("//span[contains(@title,'" + chat + "')]")
actionChains = ActionChains(driver)
actionChains.context_click(chatElement).perform()
archivia = driver.find_element_by_xpath('//*[@id="app"]/div/span[4]/div/ul/li[1]/div')
time.sleep(2)
archivia.click()
time.sleep(10)
return
def iterChatList(chatLabels, driver):
if language == 'italian':
text = "caricamento delle chat in corso..."
else:
text = "loading chats..."
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
for chat in chatLabels:
chat.click()
try:
element = WebDriverWait(driver, 40).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="main"]/header/div[2]/div[1]/div/span'))
)
label = chat.find_elements_by_xpath('//*[@id="main"]/header/div[2]/div[1]/div/span')
chatName = label[0].get_attribute('title')
if len(chatName) == 0:
label = chat.find_elements_by_xpath(
'//*[@id="main"]/header/div[2]/div[1]/div/span/span') # se il nome contiene un'emoji, va nello span di sotto
chatName = label[0].get_attribute('title')
readMessages(chatName, driver)
if (save_media.get() == 1):
saveMedia(chatName, driver)
if language == 'italian':
text = "generazione del doppio hash della chat in corso..."
else:
text = 'generating double hash...'
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
except:
if language == 'italian':
text = "impossibile caricare le chat"
else:
text = 'failed loading chats'
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
return
def saveMedia(name, driver):
menu = driver.find_element_by_xpath("(//div[@title='Menu'])[2]")
menu.click()
info = driver.find_element_by_xpath('//*[@id="main"]')
try:
if language == 'italian':
element = WebDriverWait(driver, 3).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@aria-label,'Info gruppo')]"))
)
info = driver.find_element_by_xpath("//div[contains(@aria-label,'Info gruppo')]")
info.click()
else:
element = WebDriverWait(driver, 3).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@aria-label,'Group info')]"))
)
info = driver.find_element_by_xpath("//div[contains(@aria-label,'Group info')]")
info.click()
except:
try:
if language == 'italian':
element = WebDriverWait(driver, 3).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@aria-label,'Info contatto')]"))
)
info = driver.find_element_by_xpath("//div[contains(@aria-label,'Info contatto')]")
info.click()
else:
element = WebDriverWait(driver, 3).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@aria-label,'Contact info')]"))
)
info = driver.find_element_by_xpath("//div[contains(@aria-label,'Contact info')]")
info.click()
except:
try:
if language == 'italian':
element = WebDriverWait(driver, 3).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@aria-label,'Info lista broadcast')]"))
)
info = driver.find_element_by_xpath("//div[contains(@aria-label,'Info lista broadcast')]")
info.click()
else:
element = WebDriverWait(driver, 3).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@aria-label,'Broadcast list info')]"))
)
info = driver.find_element_by_xpath("//div[contains(@aria-label,'Broadcast list info')]")
info.click()
except:
if language == 'italian':
text = "impossibile localizzare le info"
else:
text = "can't locate info"
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
try:
if language == 'italian':
element = WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, "//span[text()='Media, link e documenti']"))
)
media = driver.find_element_by_xpath("//span[text()='Media, link e documenti']")
media.click()
else:
element = WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@title,'Media, Links and Docs')]"))
)
media = driver.find_element_by_xpath("//div[contains(@title,'Media, Links and Docs')]")
media.click()
saveImgVidAud(name, driver)
saveDoc(name, driver)
except:
if language == 'italian':
text = "impossibile localizzare i media"
else:
text = "can't locate media"
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
return
def saveDoc(name, driver):
if language == 'italian':
text = "salvataggio dei documenti in corso..."
else:
text = "saving documents..."
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
time.sleep(3)
if language == 'italian':
docs_xpath = '//button[text()="Documenti"]'
else:
docs_xpath = '//button[text()="DOCS"]'
docs = driver.find_element_by_xpath(docs_xpath)
docs.click()
dir = pyExePath + '\\Scraped\\Media\\'
noMedia = False
if not os.path.exists(dir):
os.makedirs(dir)
try:
element = WebDriverWait(driver, 15).until(
EC.element_to_be_clickable((By.XPATH,
"//*[@id='app']/div/div/div[2]/div[3]/span/div/span/div/div[2]/span/div/div/div/div/div/div/div/div"))
)
doc_list = driver.find_elements_by_xpath(
"//*[@id='app']/div/div/div[2]/div[3]/span/div/span/div/div[2]/span/div/div/div/div/div/div/div/div")
for document in doc_list:
document.click()
time.sleep(4)
except:
noMedia = True
return
def saveImgVidAud(name, driver):
if language == 'italian':
text = "apertura dei media in corso..."
else:
text = "opening media..."
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
window.update()
dir = pyExePath + '\\Scraped\\Media\\'
noMedia = False
if not os.path.exists(dir):
os.makedirs(dir)
try:
element = WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, "//div[contains(@style,'background-image')]"))
)
image = driver.find_element_by_xpath("//div[contains(@style,'background-image')]")
noMedia = False
lastimg = 'false'
driver.execute_script("arguments[0].click();", image)
while (lastimg == 'false'):
try:
if language == 'italian':
downloadXpath = "//div[@aria-label='Scarica']"
else:
downloadXpath = "//div[@aria-label='Download']"
download = driver.find_element_by_xpath(downloadXpath)
download.click()
except:
pass
try:
element = WebDriverWait(driver, 10).until(
EC.element_to_be_clickable(
(By.XPATH, '//*[@id="app"]/div/span[3]/div/div/div[2]/div[2]/div[1]/div'))
)
nextButton = driver.find_element_by_xpath('//*[@id="app"]/div/span[3]/div/div/div[2]/div[2]/div[1]/div')
lastimg = nextButton.get_attribute("aria-disabled")
nextButton.click()
except:
lastimg = 'true'
except:
noMedia = True
return
# SELECT FOLDER
def selectFolder():
global pyExePath
pyExePath = filedialog.askdirectory()
choose_dest_label.configure(text=pyExePath)
return
def getChatFromCSV():
if language == 'italian':
text = "ricerca delle chat selezionate in corso..."
else:
text = "searching for selected chats..."
output_label_2.configure(text=text)
log_dict[getDateTime()] = text
tree.delete(*tree.get_children())
if language == 'italian':
text = "Seleziona un file"
else:
text = "Select a file"
filename = filedialog.askopenfilename(initialdir="/", title=text,
filetypes=(("CSV files", "*.csv*"), ("all files", "*.*")))
nomeFile = os.path.basename(filename)
if nomeFile != "":
choose_label.configure(text=nomeFile)
choose_label.configure(fg="black")
f = open(filename, 'r')
line = f.read()
global NAMES
NAMES = line.split(",")
return
def moveArchiviate(driver):
menuDots = driver.find_element_by_xpath("//*[@id='side']/header/div[2]/div/span/div[3]/div/span")
time.sleep(2)
menuDots.click()
time.sleep(2)
archiv = driver.find_element_by_xpath("//*[@id='side']/header/div[2]/div/span/div[3]/span/div/ul/li[4]/div")
archiv.click()
time.sleep(2)
chatLabels = []
recentList = driver.find_elements_by_xpath(
'//*[@id="app"]/div/div/div[2]/div[1]/span/div/span/div/div/div[1]/div/div/div')
for label in recentList:
chatLabels.append(label)
chatLabels.sort(key=lambda x: int(x.get_attribute('style').split("translateY(")[1].split('px')[0]), reverse=False)
chatNames = []
for chat in chatLabels:
label = chat.find_elements_by_xpath('.//span[contains(@dir,"auto")]')
for labels in label:
chatNames.append(labels.get_attribute('title'))
for names in chatNames:
if names == '':
chatNames.remove(names)
for chat in chatLabels:
actionChains = ActionChains(driver)
actionChains.context_click(chat).perform()
estrai = driver.find_element_by_xpath('//*[@id="app"]/div/span[4]/div/ul/li[1]/div')
estrai.click()
time.sleep(4)
goBack = driver.find_element_by_xpath(
'//*[@id="app"]/div/div/div[2]/div[1]/span/div/span/div/header/div/div[1]/button/span')
goBack.click()
return chatNames
def disableEvent(event):
return "break"
it = ['Data gg/mm/aaaa', 'Ora', 'Mittente', 'Messaggio', 'scraper pronto',
'Autori: Domenico Palmisano e Francesca Pollicelli', 'Opzioni',
'Caricare lista contatti', 'Avvia scraper', 'Scraping chat archiviate', 'Cartella di destinazione']
en = ['Date mm/dd/yyyy', 'Time', 'Sender', 'Message', 'scraper ready',
'Authors: Domenico Palmisano and Francesca Pollicelli', 'Options',
'Load contact list', 'Start scraper', 'Scraping archived chats', 'Destination folder']
def change_language(index, value, op):
if comboLang.get() == 'English':
if len(orari) != 0:
if 'AM' in orari[0] or 'PM' in orari[0]:
tree.heading(0, text='Date mm/dd/yyyy', anchor=tk.W)
else:
tree.heading(0, text='Date dd/mm/yyyy', anchor=tk.W)
else:
tree.heading(0, text=en[0], anchor=tk.W)
tree.heading(1, text=en[1], anchor=tk.W)
tree.heading(2, text=en[2], anchor=tk.W)
tree.heading(3, text=en[3], anchor=tk.W)
output_label_2.config(text=en[4])
credit_label.config(text=en[5])
label.config(text=en[6])
choose_1.config(text=en[7])
choose_2.config(text=en[8])
c2.config(text=en[9])
choose_dest.config(text=en[10])
else:
if len(orari) != 0:
if 'AM' in orari[0] or 'PM' in orari[0]:
tree.heading(0, text='Data mm/gg/aaaa', anchor=tk.W)
else:
tree.heading(0, text='Data gg/mm/aaaa', anchor=tk.W)
else:
tree.heading(0, text=it[0], anchor=tk.W)
tree.heading(1, text=it[1], anchor=tk.W)
tree.heading(2, text=it[2], anchor=tk.W)
tree.heading(3, text=it[3], anchor=tk.W)
output_label_2.config(text=it[4])
credit_label.config(text=it[5])
label.config(text=it[6])
choose_1.config(text=it[7])
choose_2.config(text=it[8])
c2.config(text=it[9])
choose_dest.config(text=it[10])
return
def getfilesfrom(directory):
return filter(lambda x:
not os.path.isdir(os.path.join(directory, x)),
os.listdir(directory))
def create_zip(directory, zip_name):
zf = zipfile.ZipFile(pyExePath + "\\" + zip_name, mode='w', compression=zipfile.ZIP_DEFLATED)
filestozip = getfilesfrom(directory)
for afile in filestozip:
zf.write(os.path.join(directory, afile), afile)
zf.close()
return
def zip_hasher(zip_name):
global nRow
dateTime = getDateTime()
with open(pyExePath + "\\" + zip_name, "rb") as f:
hash_md5 = hashlib.md5()
hash_sha512 = hashlib.sha512()
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
hash_sha512.update(chunk)
md5_digest = hash_md5.hexdigest()
sha512_digest = hash_sha512.hexdigest()
if save_media.get() == 1:
if zip_name == "chat.zip":
sheet1.write(2, 0, zip_name)
sheet1.write(2, 1, dateTime)
sheet1.write(2, 2, md5_digest)
sheet1.write(2, 3, sha512_digest)
wb.save(pyExePath + '\\log.xls')
else:
sheet1.write(3, 0, zip_name)
sheet1.write(3, 1, dateTime)
sheet1.write(3, 2, md5_digest)
sheet1.write(3, 3, sha512_digest)
wb.save(pyExePath + '\\log.xls')
else:
sheet1.write(2, 0, zip_name)
sheet1.write(2, 1, dateTime)
sheet1.write(2, 2, md5_digest)
sheet1.write(2, 3, sha512_digest)
wb.save(pyExePath + '\\log.xls')
return
tree = ttk.Treeview(window, show="headings", columns=(it[0], it[1], it[2], it[3]), height=14)
tree.heading(it[0], text=it[0], anchor=tk.W)
tree.heading(it[1], text=it[1], anchor=tk.W)
tree.heading(it[2], text=it[2], anchor=tk.W)
tree.heading(it[3], text=it[3], anchor=tk.W)
tree.column('#1', minwidth=110, stretch=False, width=110)
tree.column('#2', minwidth=90, stretch=False, width=90)
tree.column('#3', minwidth=140, stretch=False, width=140)
tree.column('#4', minwidth=535, stretch=True, width=535)
style = ttk.Style(window)
tree.grid(row=5, column=0, padx=10, pady=10, stick='W')
vsbar = tk.Scrollbar(window, orient=tk.VERTICAL, command=tree.yview)
vsbar.place(x=868, y=279, height=280, width=20)
tree.configure(yscrollcommand=vsbar.set)
style.theme_use("clam")
style.configure("Treeview", background="white",
fieldbackground="white", foreground="white")
tree.bind("<Button-1>", disableEvent)
title = tk.Label(window, text="WhatsApp Scraper", font=("Helvetica", 24))
title.grid(row=0, column=0, sticky="N", padx=20, pady=10)
output_label = tk.Label(text="Log: ")
output_label.grid(row=6, column=0, sticky="W", padx=10, pady=10)
output_label_2 = tk.Label(text=it[4], bg="white", fg="black", borderwidth=2, relief="groove", anchor='w')
output_label_2.configure(width=50)
output_label_2.grid(row=6, column=0, sticky="W", padx=45, pady=10)
credit_label = tk.Label(window, text=it[5])
credit_label.grid(row=6, column=0, stick="E", padx=10, pady=0)
xf = tk.Frame(window, relief=tk.GROOVE, borderwidth=2, width=920, height=70)
xf.grid(row=1, column=0, sticky="W", padx=10, pady=10)
label = tk.Label(xf, text=it[6])
label.place(relx=.06, rely=0.04, anchor=tk.W)
choose_1 = tk.Button(text=it[7], command=lambda: threading.Thread(target=getChatFromCSV).start())
choose_1.grid(row=1, column=0, sticky="W", padx=30, pady=10)
xf_2 = tk.Frame(window, relief=tk.GROOVE, borderwidth=2, width=920, height=70)
xf_2.grid(row=2, column=0, sticky="W", padx=10, pady=10)
choose_dest_label = tk.Label(text="", bg="white", fg="black", borderwidth=2, relief="groove", anchor='w')
choose_dest_label.configure(width=55)
choose_dest_label.grid(row=2, column=0, sticky="W", padx=185, pady=10)
choose_dest = tk.Button(text=it[10], command=lambda: threading.Thread(target=selectFolder).start())
choose_dest.grid(row=2, column=0, sticky="W", padx=30, pady=10)
choose_label = tk.Label(text="", bg="white", fg="black", borderwidth=2, relief="groove", anchor='w')
choose_label.configure(width=55)
choose_label.grid(row=1, column=0, sticky="W", padx=185, pady=10)
choose_2 = tk.Button(text=it[8], command=lambda: threading.Thread(target=getChatLabels).start())
choose_2.grid(row=2, column=0, sticky="E", padx=30, pady=10)
save_media = tk.IntVar()
c1 = tk.Checkbutton(window, text='Scraping media', variable=save_media, onvalue=1, offvalue=0)
c1.grid(row=1, column=0, stick="E", padx=200, pady=10)
archiviate = tk.IntVar()
c2 = tk.Checkbutton(window, text=it[9], variable=archiviate, onvalue=1, offvalue=0)
c2.grid(row=1, column=0, stick="E", padx=30, pady=10)
v = tk.StringVar()
v.trace('w', change_language)
comboLang = ttk.Combobox(window, textvar=v, state="readonly",
values=[
"English",
"Italian"])
comboLang.grid(row=0, column=0, sticky="W", padx=10, pady=10)
comboLang.set('Italian')
if __name__ == '__main__':
window.mainloop()
# done: rimuovere profilo 1, commentare per renderlo più generale
# pyinstaller --noconsole --icon=whatsapp.ico --name WhatsAppScraper --onefile main.py
# Whatsappscraper_v.1
# TODO:
# test su più media in csv
# 3) commentare codice + alleggerire codice (pulizia) -- opzionale: test sonar
# done:
# orari con timezone
# media scaricato che rimandi al media
# zip con tutte le conversaz
# zip con tutti i media
# 1) gestire data e ora in anteprima con fuso orario e formato orario
# file excel con log + hash ---> in progress
# file csv con log + hash
# Whatsappscraper_v.1
|
server.py
|
#!/usr/bin/python3
import socket, sys, threading
# Simple chat client that allows multiple connections via threads
PORT = 3333 # the port number to run our server on
__version__ = "0.0.1"
class ChatServer(threading.Thread):
conns = list()
def __init__(self, port, host='localhost'):
threading.Thread.__init__(self)
self.port = port
self.host = host
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.users = {} # current connections
try:
self.server.bind((self.host, self.port))
except socket.error:
print('Bind failed %s' % (socket.error))
sys.exit()
self.server.listen(10)
# Not currently used. Ensure sockets are closed on disconnect
def exit(self):
self.server.close()
def run_thread(self, conn, addr):
ChatServer.conns.append(conn)
print('Client connected with ' + addr[0] + ':' + str(addr[1]))
while True:
try:
data = conn.recv(1024)
if len(data) == 0:
break
reply = data
print(str(reply, 'utf-8'))
for minicon in ChatServer.conns:
# send instead of sendall to prevent overhead
minicon.sendall(reply)
except KeyboardInterrupt:
break
conn.close() # Close
ChatServer.conns.remove(conn)
def run(self):
print('Waiting for connections on port %s\nPress ctrl + c to destroy server' % (self.port))
# We need to run a loop and create a new thread for each connection
while True:
conn = None
addr = None
try:
conn, addr = self.server.accept()
if not conn:
continue;
except KeyboardInterrupt:
if conn:
exit()
print('\nServer has been killed')
break
threading.Thread(target=self.run_thread, args=(conn, addr)).start()
if __name__ == '__main__':
server = ChatServer(PORT)
server.run()
|
wsdump.py
|
#!/Users/lijing@us.ibm.com/root/temp/tmp/2020/watson-voice-bot/mytestenv/bin/python3.8
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
clang-tidy-diff.py
|
#!/usr/bin/env python
#
#===- clang-tidy-diff.py - ClangTidy Diff Checker ------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
# ORIGINAL: https://github.com/llvm-mirror/clang-tools-extra/blob/master/clang-tidy/tool/run-clang-tidy.py
r"""
ClangTidy Diff Checker
======================
This script reads input from a unified diff, runs clang-tidy on all changed
files and outputs clang-tidy warnings in changed lines only. This is useful to
detect clang-tidy regressions in the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-tidy-diff.py -p1
svn diff --diff-cmd=diff -x-U0 | \
clang-tidy-diff.py -fix -checks=-*,modernize-use-override
"""
import argparse
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
try:
import yaml
except ImportError:
yaml = None
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def run_tidy(task_queue, lock, timeout):
watchdog = None
while True:
command = task_queue.get()
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if timeout is not None:
watchdog = threading.Timer(timeout, proc.kill)
watchdog.start()
stdout, stderr = proc.communicate()
with lock:
sys.stdout.write(stdout.decode('utf-8') + '\n')
sys.stdout.flush()
if stderr:
sys.stderr.write(stderr.decode('utf-8') + '\n')
sys.stderr.flush()
except Exception as e:
with lock:
sys.stderr.write('Failed: ' + str(e) + ': '.join(command) + '\n')
finally:
with lock:
if (not timeout is None) and (not watchdog is None):
if not watchdog.is_alive():
sys.stderr.write('Terminated by timeout: ' +
' '.join(command) + '\n')
watchdog.cancel()
task_queue.task_done()
def start_workers(max_tasks, tidy_caller, task_queue, lock, timeout):
for _ in range(max_tasks):
t = threading.Thread(target=tidy_caller, args=(task_queue, lock, timeout))
t.daemon = True
t.start()
def merge_replacement_files(tmpdir, mergefile):
"""Merge all replacement files in a directory into a single file"""
# The fixes suggested by clang-tidy >= 4.0.0 are given under
# the top level key 'Diagnostics' in the output yaml files
mergekey = "Diagnostics"
merged = []
for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
content = yaml.safe_load(open(replacefile, 'r'))
if not content:
continue # Skip empty files.
merged.extend(content.get(mergekey, []))
if merged:
# MainSourceFile: The key is required by the definition inside
# include/clang/Tooling/ReplacementsYaml.h, but the value
# is actually never used inside clang-apply-replacements,
# so we set it to '' here.
output = { 'MainSourceFile': '', mergekey: merged }
with open(mergefile, 'w') as out:
yaml.safe_dump(output, out)
else:
# Empty the file:
open(mergefile, 'w').close()
def main():
parser = argparse.ArgumentParser(description=
'Run clang-tidy against changed files, and '
'output diagnostics only for modified '
'lines.')
parser.add_argument('-clang-tidy-binary', metavar='PATH',
default='clang-tidy',
help='path to clang-tidy binary')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to check '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc)',
help='custom pattern selecting file paths to check '
'(case insensitive, overridden by -regex)')
parser.add_argument('-j', type=int, default=1,
help='number of tidy instances to be run in parallel.')
parser.add_argument('-timeout', type=int, default=None,
help='timeout per each file in seconds.')
parser.add_argument('-fix', action='store_true', default=False,
help='apply suggested fixes')
parser.add_argument('-checks',
help='checks filter, when not specified, use clang-tidy '
'default',
default='')
parser.add_argument('-path', dest='build_path',
help='Path used to read a compile command database.')
if yaml:
parser.add_argument('-export-fixes', metavar='FILE', dest='export_fixes',
help='Create a yaml file to store suggested fixes in, '
'which can be applied with clang-apply-replacements.')
parser.add_argument('-extra-arg', dest='extra_arg',
action='append', default=[],
help='Additional argument to append to the compiler '
'command line.')
parser.add_argument('-extra-arg-before', dest='extra_arg_before',
action='append', default=[],
help='Additional argument to prepend to the compiler '
'command line.')
parser.add_argument('-quiet', action='store_true', default=False,
help='Run clang-tidy in quiet mode')
clang_tidy_args = []
argv = sys.argv[1:]
if '--' in argv:
clang_tidy_args.extend(argv[argv.index('--'):])
argv = argv[:argv.index('--')]
args = parser.parse_args(argv)
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ \"?(.*?/){%s}([^ \t\n\"]*)' % args.p, line)
if match:
filename = match.group(2)
if filename is None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1
lines_by_file.setdefault(filename, []).append([start_line, end_line])
if not any(lines_by_file):
print("No relevant changes found.")
sys.exit(0)
max_task_count = args.j
if max_task_count == 0:
max_task_count = multiprocessing.cpu_count()
max_task_count = min(len(lines_by_file), max_task_count)
tmpdir = None
if yaml and args.export_fixes:
tmpdir = tempfile.mkdtemp()
# Tasks for clang-tidy.
task_queue = queue.Queue(max_task_count)
# A lock for console output.
lock = threading.Lock()
# Run a pool of clang-tidy workers.
start_workers(max_task_count, run_tidy, task_queue, lock, args.timeout)
# Form the common args list.
common_clang_tidy_args = []
if args.fix:
common_clang_tidy_args.append('-fix')
if args.checks != '':
common_clang_tidy_args.append('-checks=' + args.checks)
if args.quiet:
common_clang_tidy_args.append('-quiet')
if args.build_path is not None:
common_clang_tidy_args.append('-p=%s' % args.build_path)
for arg in args.extra_arg:
common_clang_tidy_args.append('-extra-arg=%s' % arg)
for arg in args.extra_arg_before:
common_clang_tidy_args.append('-extra-arg-before=%s' % arg)
for name in lines_by_file:
line_filter_json = json.dumps(
[{"name": name, "lines": lines_by_file[name]}],
separators=(',', ':'))
# Run clang-tidy on files containing changes.
command = [args.clang_tidy_binary]
command.append('-line-filter=' + line_filter_json)
if yaml and args.export_fixes:
# Get a temporary file. We immediately close the handle so clang-tidy can
# overwrite it.
(handle, tmp_name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
os.close(handle)
command.append('-export-fixes=' + tmp_name)
command.extend(common_clang_tidy_args)
command.append(name)
command.extend(clang_tidy_args)
task_queue.put(command)
# Wait for all threads to be done.
task_queue.join()
if yaml and args.export_fixes:
print('Writing fixes to ' + args.export_fixes + ' ...')
try:
merge_replacement_files(tmpdir, args.export_fixes)
except:
sys.stderr.write('Error exporting fixes.\n')
traceback.print_exc()
if tmpdir:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
|
mp_classify.py
|
#!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from six import itervalues,iterkeys
import sys, os
import timeit
import numpy as np
import multiprocessing as mp
import ctypes
import signal
import threading
import time
from vai.dpuv1.rt import xdnn, xdnn_io
from vai.dpuv1.rt.vitis.python.dpu.runner import Runner
###################################################
# Pre-process
###################################################
class UserPreProcess():
def __init__(self, args, img_paths, input_shapes, shared_trans_arrs):
np.random.seed(123) # for reproducibility
self._args = args
self._firstInputShape = input_shapes[0]
self._shared_trans_arrs = shared_trans_arrs
self._imgpaths = img_paths
current = mp.current_process()
self._procid = (int(current._identity[0]) - 1) % args['numprepproc']
#HWC format as this is the native format that comes out of jpeg decode
self._meanarr = np.zeros ( (self._firstInputShape[2], self._firstInputShape[3], self._firstInputShape[1],), dtype = np.float32, order='C' )
self._meanarr += args['img_mean']
def run(self, inum_chunk):
write_slot = self._shared_trans_arrs.openWriteId()
write_arrs = self._shared_trans_arrs.accessNumpyBuffer(write_slot)
if not self._args['benchmarkmode']:
for i, inum in enumerate(inum_chunk):
write_arrs[0][i][:], shape = xdnn_io.loadImageBlobFromFile(self._imgpaths[inum], self._args['img_raw_scale'], self._meanarr,
self._args['img_input_scale'], self._firstInputShape[2], self._firstInputShape[3])
write_arrs[-1][i][0] = inum
write_arrs[-1][i][1:4] = shape
# Fill -1 for unfilled image slots in whole batch
write_arrs[-1][len(inum_chunk):][:] = -1
self._shared_trans_arrs.closeWriteId(write_slot)
###################################################
# Post-process
###################################################
class ZmqResultPublisher:
def __init__(self, devid):
import zmq
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:55{}5".format(devid))
def send(self, data):
self.socket.send(data)
class UserPostProcess():
def __init__(self, args, img_paths, fpgaOutputs, output_shapes,shared_output_arrs):
self.args = args
self.img_paths = img_paths
self.fpgaOutputs = fpgaOutputs
self.output_shapes = output_shapes
self._shared_output_arrs = shared_output_arrs
self.numProcessed = 0
self.startTime = None
self.cpuOp = xdnn.XDNNCPUOp(self.args['weights']);
#
# This function post-processes FPGA output:
# 1) Compute the final FC + Softmax layers
# 2) Print classification & accuracy
#
def run(self, imgList, fpgaOutput_list, fpgaOutputShape_list, shape_list):
fpgaOutput = fpgaOutput_list[0]
fpgaOutputShape = fpgaOutputShape_list[0]
if self.numProcessed == 0:
self.startTime = timeit.default_timer()
self.labels = xdnn_io.get_labels(self.args['labels'])
self.zmqPub = None
if self.args['zmqpub']:
self.zmqPub = ZmqResultPublisher(self.args['deviceID'])
self.goldenMap = None
if self.args['golden']:
self.goldenMap = xdnn_io.getGoldenMap(self.args['golden'])
self.top5Count = 0
self.top1Count = 0
self.fcOutput = np.empty((self.args['batch_sz'], self.args['outsz'],),
dtype=np.float32, order='C')
self.numProcessed += len(imgList)
npout_view = fpgaOutput
if self.cpuOp._weight is not None:
self.cpuOp.computeFC(npout_view, self.fcOutput)
else:
self.fcOutput=npout_view
smaxOutput = self.cpuOp.computeSoftmax(self.fcOutput)
if self.args['golden']:
for i,p in enumerate ( imgList ):
#topk = xdnn_io.getTopK( smaxOutput[i], self.labels, 1)
#print(imgList[i], topk)
self.top1Count += xdnn_io.isTopK(\
smaxOutput[i], self.goldenMap, p, self.labels, 1)
self.top5Count += xdnn_io.isTopK(\
smaxOutput[i], self.goldenMap, p, self.labels, 5)
if self.zmqPub is not None:
predictMsg = xdnn_io.getClassification(\
smaxOutput, imgList, self.labels, zmqPub=True)
self.zmqPub.send(predictMsg)
def loop(self):
fpgaOutputShape = self.output_shapes[0]
fpgaOutputShape[0] = self.args['batch_sz']
frame_count = 0
while True:
read_slot = self._shared_output_arrs.openReadId()
if read_slot is None:
break
read_slot_arrs = self._shared_output_arrs.accessNumpyBuffer(read_slot)
# array_sum = [read_slot_arrs[0][i].sum() for i in range(self.args['batch_sz'])]
imgList = []
shape_list = []
num_images = (read_slot_arrs[-1].shape)[0]
for image_num in range(num_images):
image_id = read_slot_arrs[-1][image_num][0]
if image_id == -1:
break
imgList.append(self.img_paths[int(image_id)])
shape_list.append(read_slot_arrs[-1][image_num][1:4])
if self.args["benchmarkmode"]:
if (self.numProcessed == 0):
self.startTime = timeit.default_timer()
self.numProcessed += len(imgList)
self._shared_output_arrs.closeReadId(read_slot)
continue
self.run(imgList,read_slot_arrs[0:-1], [fpgaOutputShape], shape_list)
self._shared_output_arrs.closeReadId(read_slot)
self.finish()
def finish(self):
print( "Total Images: %g " % ( float(self.numProcessed) ))
print( "Performance: %g images/s" % ( float(self.numProcessed) / (timeit.default_timer() - self.startTime ) ))
if self.args['golden'] and self.numProcessed:
print("\nAverage accuracy (n=%d) Top-1: %.1f%%, Top-5: %.1f%%\n" \
% (self.numProcessed,
float(self.top1Count)/float(self.numProcessed)*100.,
float(self.top5Count)/float(self.numProcessed)*100.))
###################################################
# Instantiate pre/post processes,
# allow user to register own classes
###################################################
g_preClass = UserPreProcess
g_postClass = UserPostProcess
g_preInst = None
g_postInst = None
def register_pre(preClass):
global g_preClass
g_preClass = preClass
def register_post(postClass):
global g_postClass
g_postClass = postClass
def init_pre_process(args, img_paths, input_shapes, shared_trans_arrs):
global g_preClass
global g_preInst
g_preInst = g_preClass(args, img_paths, input_shapes, shared_trans_arrs)
def run_pre_process(imgpath_idx):
global g_preInst
return g_preInst.run(imgpath_idx)
def post_process( args, img_paths, fpgaOutputs, output_shapes,shared_output_arrs):
global g_postClass
global g_postInst
g_postInst = g_postClass( args, img_paths, fpgaOutputs, output_shapes, shared_output_arrs)
g_postInst.loop()
###################################################
# FPGA
###################################################
def fpga_wait( runner, q, shared_output_arrs, shared_trans_arrs):
numProcessed = 0
while True:
write_slot, read_slot, jid = q.get()
if write_slot is None:
break
runner.wait(jid)
#qFpga.put(img_num)
shared_trans_arrs.closeReadId(read_slot)
shared_output_arrs.closeWriteId(write_slot)
shared_output_arrs.close()
def fpga_process(args, num_img, compJson, shared_trans_arrs,shared_output_arrs):
runner = Runner(args['vitis_rundir'])
qWait = mp.Queue(maxsize=100)
t = threading.Thread(target=fpga_wait, args=(runner, qWait, shared_output_arrs, shared_trans_arrs))
t.start()
numProcessed = 0
startTime = time.time()
while numProcessed < num_img or args['perpetual']:
# Get the buffer for fpga output
write_slot = shared_output_arrs.openWriteId()
write_slot_arrs = shared_output_arrs.accessNumpyBuffer(write_slot)
# Get the input buffer for fpga exec
read_slot = shared_trans_arrs.openReadId()
if read_slot is None: break
read_slot_arrs = shared_trans_arrs.accessNumpyBuffer(read_slot)
# Copy meta data from input to output
write_slot_arrs[-1][:] = read_slot_arrs[-1][:]
# Start execution
jid = runner.execute_async(read_slot_arrs[:-1], write_slot_arrs[:-1])
# runner.wait(jid)
qWait.put((write_slot, read_slot, jid))
#shared_trans_arrs.closeReadId(read_slot)
numProcessed += 1
if(args['perpetual'] == False):
if numProcessed == num_img:
break
qWait.put((None, None, None))
t.join()
# Current version does copies...
# Assumes all types are np.float32/ctypes.c_float
class SharedMemoryQueue:
def __init__(self, name, length, buf_shapes_list):
print("Creating SharedMemoryQueue",name)
self._name = name
self._len = length
# Hard coded for floats...
self._mem_type = ctypes.c_float
self._np_type = np.float32
# put() function copies into the free list
self._freeList = mp.Queue(length)
# get() function gets id of open slot. consumer needs to confirm when data is read
self._readList = mp.Queue(length)
self._buf_shapes_list = buf_shapes_list
self._buf_sizes_list = list(map(lambda x: int(np.prod(x)), buf_shapes_list))
print("Creating Shared Memory with buf_shape_list =", self._buf_shapes_list)
self._shared_memory_arrs = list()
for i in range(length):
buf_list = list()
for buf_size in self._buf_sizes_list:
buf_list.append(mp.Array(self._mem_type, buf_size))
self._shared_memory_arrs.append(buf_list)
self._freeList.put(i)
def close(self):
self._readList.put(None)
def accessBuffer(self, slot_id):
return self._shared_memory_arrs[slot_id]
def accessNumpyBuffer(self, slot_id):
buf_list = list()
for i in range(len(self._buf_shapes_list)):
np_arr = np.frombuffer(self._shared_memory_arrs[slot_id][i].get_obj(), dtype = self._np_type)
np_arr = np.reshape(np_arr, self._buf_shapes_list[i], order = 'C')
buf_list.append(np_arr)
return buf_list
def openWriteId(self):
id = self._freeList.get()
return id
def closeWriteId(self, id):
# finished writing slot id
self._readList.put(id)
def openReadId(self):
id = self._readList.get()
return id
def closeReadId(self, id):
# finished reading slot id
self._freeList.put(id)
def dump(self):
for i in range(self._len):
buf_list = self.accessNumpyBUffer(i)
for np_arr in buf_list:
print("Slot=",i,"Array=",j,"Val=",np_arr)
###################################################
# "Main"
###################################################
def run(args=None):
if not args:
parser = xdnn_io.default_parser_args()
parser.add_argument('--numprepproc', type=int, default=1,
help='number of parallel processes used to decode and quantize images')
parser.add_argument('--numstream', type=int, default=16,
help='number of FPGA streams')
parser.add_argument('--deviceID', type=int, default=0,
help='FPGA no. -> FPGA ID to run in case multiple FPGAs')
parser.add_argument('--benchmarkmode', type=int, default=0,
help='bypass pre/post processing for benchmarking')
parser.add_argument('--profile', action='store_true',
help='Print average latencies for preproc/exec/postproc')
args = parser.parse_args()
args = xdnn_io.make_dict_args(args)
sharedInputArrs = []
fpgaOutputs = []
compilerJSONObj = xdnn.CompilerJsonParser(args['netcfg'])
input_shapes = [v for k,v in compilerJSONObj.getInputs().items()]
output_shapes = [v for k,v in compilerJSONObj.getOutputs().items()]
for in_idx in range(len(input_shapes)):
input_shapes[in_idx][0] = args['batch_sz']
for out_idx in range(len(output_shapes)):
output_shapes[out_idx][0] = args['batch_sz']
input_sizes = map(lambda x: np.prod(x), input_shapes)
output_sizes = map(lambda x: np.prod(x), output_shapes)
num_shared_slots = args['numstream']
# shared memory from preprocessing to fpga forward
shared_trans_arrs = SharedMemoryQueue("trans",num_shared_slots*(args['numprepproc']*args['batch_sz']),
input_shapes +[(args['batch_sz'], 4)])
# shared memory from fpga forward to postprocessing
shared_output_arrs = SharedMemoryQueue("output",num_shared_slots, output_shapes + [(args['batch_sz'], 4)])
# Form list of images to chunks of batch_sz
img_paths = xdnn_io.getFilePaths(args['images'])
imgids = list(range(len(img_paths)))
imgid_chunks = [ imgids[i:i+args['batch_sz']] for i in range(0, len(img_paths), args['batch_sz']) ]
# Start all processes
p = mp.Pool(initializer = init_pre_process,
initargs = (args, img_paths, input_shapes, shared_trans_arrs, ), processes = args['numprepproc'])
xdnnProc = mp.Process(target=fpga_process, args=(args, len(imgid_chunks), compilerJSONObj,shared_trans_arrs,shared_output_arrs,))
postProc = mp.Process(target=post_process, args=(args, img_paths, fpgaOutputs,output_shapes,shared_output_arrs,))
xdnnProc.start()
postProc.start()
t1 = timeit.default_timer()
if args['perpetual']:
while True:
res = [p.map_async(run_pre_process, imgid_chunks)]
for j in res:
j.wait()
del j
else:
p.map_async(run_pre_process, imgid_chunks)
xdnnProc.join()
postProc.join()
p.close()
p.join()
t2 = timeit.default_timer()
total_t = t2 - t1
if(args['profile']):
print("Total Wall Time : {:.2f} seconds\nTotal Images / Wall Time: {:.2f}".format(total_t, \
len(img_paths)/total_t))
if __name__ == '__main__':
run()
|
train.py
|
#/usr/bin/python3
from sys import argv
print("loading")
import torch
import kaldi_io as kio
import numpy as np
from model import XVectorModel
from torch import nn
from torch import optim
from sys import exit
import os
from math import isnan
from math import floor
from queue import Queue
from threading import Thread
from time import sleep
from utils import shuffle_scp
from utils import get_speaker_id
from utils import read_flavour
from utils import get_labels_and_count
from utils import get_random_frame
from utils import get_speaker_count
speech_length = 400
batch_size = 64
amount_of_speaker = get_speaker_count()
learning_rate = 0.001
train_scp = os.environ['TRAIN_SCP']
spklbl_file_name = "./exp/spklbl"
device = torch.device(os.environ['TRAINING_DEVICE'])
flavour = read_flavour(argv)
if len(argv) < 3:
print("you need to enter a directory for the models")
exit(1)
model_dir = argv[2]
labels, amount_of_speaker = get_labels_and_count(spklbl_file_name)
xmodel = XVectorModel(amount_of_speaker, flavour=flavour, device=device, learning=True).to(device)
optimizer = optim.Adam(xmodel.parameters(), lr=learning_rate)
batch_queue = Queue()
# used for debugging if NaN values start to come up again
#torch.autograd.set_detect_anomaly(True)
def set_speech_length(speech_length):
speech_length = speech_length
def id_to_vector(id):
vector = np.zeros([amount_of_speaker])
vector[id - 1] = 1
return vector
def train_one_batch(batch_x, batch_y, epochnum, batchnum):
optimizer.zero_grad()
x = torch.from_numpy(batch_x).to(device)
target = torch.from_numpy(batch_y).type(torch.long).to(device)
loss, penalty = xmodel(x, target)
(loss + penalty).backward()
optimizer.step()
# train
if isnan(loss):
print("************************** HALT AND CATCH FIRE *****************************")
exit(1)
print("epoch : " + str(epochnum) + " batch: " + str(batchnum) + " Loss is: " + str(float(loss)))
def get_one_batch_with_random_frames(shuffled_scp_file):
batch_x = []
batch_y = []
for key, mat in kio.read_mat_scp(shuffled_scp_file):
if mat.shape[0] >= speech_length:
x = get_random_frame(mat, speech_length)
# y = id_to_vector(labels[get_speaker_id(key)]) # use this for mse loss
y = labels[get_speaker_id(key)] # use this for cross entropy loss
batch_x.append(x)
batch_y.append(y)
if len(batch_x) == 64:
yield (np.array(batch_x), np.array(batch_y))
batch_x = []
batch_y = []
def get_one_batch_with_sequential_frames(shuffled_scp_file):
batch_x = []
batch_y = []
for key, mat in kio.read_mat_scp(shuffled_scp_file):
# y = id_to_vector(labels[get_speaker_id(key)]) # use this for mse loss
y = labels[get_speaker_id(key)] # use this for cross entropy loss
for i in range(0, floor(mat.shape[0] / speech_length)):
start = i * speech_length
stop = start + speech_length
batch_x.append(mat[start:stop])
batch_y.append(y)
if len(batch_x) == 64:
yield (np.array(batch_x), np.array(batch_y))
batch_x = []
batch_y = []
def batch_loader_thread(scp_files):
shuffled_scp_file = shuffle_scp(scp_files)
for batch in get_one_batch_with_sequential_frames(shuffled_scp_file):
batch_queue.put(batch)
def train_epoch(epoch_num, scp_files):
batch_num = 0
t = Thread(target=batch_loader_thread, args=([scp_files]), daemon=True)
t.start()
while t.isAlive():
while not batch_queue.empty():
(batch_x, batch_y) = batch_queue.get()
train_one_batch(batch_x, batch_y, epoch_num, batch_num)
batch_num = batch_num + 1
# if main thread is to fast it gets penalized
sleep(0.01)
def predict_id(x):
batch_x = torch.from_numpy(x)
batch_x = batch_x.unsqueeze(0).to(device)
calc_x = xmodel(batch_x)
return calc_x.cpu().detach().numpy()[0].argmax()
def train(num_of_epochs, scp_files, model_dir=""):
for i in range(0, num_of_epochs):
train_epoch(i, scp_files)
if not model_dir == "":
torch.save(xmodel.state_dict(), model_dir + "/raw_model" + ("%02d" % i) + ".pt")
# training
print("starting")
train(num_of_epochs=20, model_dir=model_dir, scp_files=[train_scp])
|
browserstack_tests.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as expected
from selenium.webdriver.common.by import By
from queue import Queue
from threading import Thread
import requests
import os, json
import unittest
import time
CONFIG_FILE = os.environ['CONFIG_FILE'] if 'CONFIG_FILE' in os.environ else 'single.json'
TASK_ID = int(os.environ['TASK_ID']) if 'TASK_ID' in os.environ else 0
with open(CONFIG_FILE) as data_file:
CONFIG = json.load(data_file)
BROWSERSTACK_USERNAME = os.environ['BROWSERSTACK_USERNAME'] if 'BROWSERSTACK_USERNAME' in os.environ else CONFIG['user']
BROWSERSTACK_ACCESS_KEY = os.environ['BROWSERSTACK_ACCESS_KEY'] if 'BROWSERSTACK_ACCESS_KEY' in os.environ else CONFIG['key']
environments = [
{
"browser": "chrome",
"browser_version": "73.0",
"os": "Windows",
"os_version": "10"
},
{
"browser": "firefox",
"browser_version": "66.0",
"os": "Windows",
"os_version": "10"
},
{
"browser": "safari",
"os": "OS X",
"os_version": "Mojave"
},
{
"browser": "Edge",
"browser_version": "18.0",
"os": "Windows",
"os_version": "10"
}]
q = Queue(maxsize=0)
for environment in environments:
q.put(environment)
num_threads = 10
def run_test(q):
while q.empty() is False:
tests_successful = False
try:
# Setup
environment = q.get()
print("%s: Starting" % environment["browser"])
for key in CONFIG["capabilities"]:
if key not in environment:
environment[key] = CONFIG["capabilities"][key]
driver = webdriver.Remote(desired_capabilities=environment,
command_executor="http://%s:%s@%s/wd/hub" % (BROWSERSTACK_USERNAME, BROWSERSTACK_ACCESS_KEY, 'hub.browserstack.com'))
#Start actually testing the code
driver.get("http://connectid.pythonanywhere.com/")
assert "Connect-ID" in driver.title
driver.find_element_by_xpath \
("//*[@id=\"page-top\"]/header/div/div/a").click()
WebDriverWait(driver, 10).until(expected.title_is('LinkedIn Login, LinkedIn Sign in | LinkedIn'))
driver.find_element_by_id("username").send_keys("jeroen.jerry@hotmail.com")
elem = driver.find_element_by_id("password")
elem.send_keys("linkedintest!")
elem.submit()
WebDriverWait(driver, 10).until(expected.title_is('Connect-ID'))
assert "Connect-ID" in driver.title
# navigate through the page and see if things work, mac already fails with finding the css selector...
#driver.find_element_by_css_selector("body > div:nth-child(3) > div > div.col-lg-3.hidden-sm.hidden-md > div > div > div > div > h5 > a").click()
driver.find_element_by_xpath("/html/body/div[2]/div/div[1]/div/div/div/div/h5/a").click()
assert "Create a new project" in driver.page_source
driver.find_element_by_css_selector("body > div:nth-child(3) > div > form > button").click()
assert "This field is required" in driver.page_source
navbar_element = driver.find_element_by_css_selector("#navbarSupportedContent > ul > li:nth-child(2) > a")
if not navbar_element.is_displayed():
driver.find_element_by_css_selector("body > nav > button > span").click()
navbar_element.click()
assert "jeroentest veltmans" in driver.page_source #take name of the logged in test user
navbar_element = driver.find_element_by_css_selector("#navbarSupportedContent > ul > li:nth-child(3) > a")
if not navbar_element.is_displayed():
driver.find_element_by_css_selector("body > nav > button > span").click()
navbar_element.click()
#could send test message and see if this one is shown or not
navbar_element = driver.find_element_by_id("navbarDropdown")
if not navbar_element.is_displayed():
driver.find_element_by_css_selector("body > nav > button > span").click()
navbar_element.click()
driver.find_element_by_css_selector("#navbarSupportedContent > ul > li.nav-item.dropdown.show > div > a:nth-child(3)").click()
assert "Create a new project" in driver.page_source
'''
navbar_element = driver.find_element_by_id("navbarDropdown")
if not navbar_element.is_displayed():
driver.find_element_by_css_selector("body > nav > button > span").click()
navbar_element.click()
second_element = driver.find_element_by_css_selector("#navbarSupportedContent > ul > li.nav-item.dropdown.show > div > a:nth-child(1)")
assert "jeroentest veltmans" in driver.page_source
driver.find_element_by_css_selector("body > div:nth-child(3) > div.jumbotron > div > div > div.col-lg-7.col-xl-8 > h1 > span > a").click()
assert "Headline:" in driver.page_source
'''
tests_successful = True
except (AssertionError) as e:
print(e)
finally: # Teardown
if not tests_successful:
requests.put('https://' + BROWSERSTACK_USERNAME + ':' + BROWSERSTACK_ACCESS_KEY + '@api.browserstack.com/automate/sessions/'
+ driver.session_id + '.json', data={"status": "failed", "reason": "did not pass an assertion test"})
driver.quit()
q.task_done()
for i in range(num_threads):
worker = Thread(target=run_test, args=(q,))
worker.setDaemon(True)
worker.start()
q.join()
|
resnet50.py
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
import runner
import xir.graph
import pathlib
import xir.subgraph
import os
import input_fn
import math
import threading
import time
import sys
'''
Calculate softmax
data: data to be calculated
size: data size
return: softamx result
'''
def CPUCalcSoftmax(data,size):
sum=0.0
result = [0 for i in range(size)]
for i in range(size):
result[i] = math.exp(data[i])
sum +=result[i]
for i in range(size):
result[i] /=sum
return result
def get_script_directory():
path = os.getcwd()
return path
'''
Get topk results according to its probability
datain: data result of softmax
filePath: filePath in witch that records the infotmation of kinds
'''
def TopK(datain,size,filePath):
cnt=[i for i in range(size) ]
pair=zip(datain,cnt)
pair=sorted(pair,reverse=True)
softmax_new,cnt_new=zip(*pair)
fp=open(filePath, "r")
data1=fp.readlines()
fp.close()
for i in range(5):
flag=0
for line in data1:
if flag==cnt_new[i]:
print("Top[%d] %f %s" %(i, (softmax_new[i]),(line.strip)("\n")))
flag=flag+1
SCRIPT_DIR = get_script_directory()
calib_image_dir = SCRIPT_DIR + "/images/"
label_file="./words.txt"
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
global threadnum
threadnum = 0
'''
run resnt50 with batch
dpu: dpu runner
img: imagelist to be run
cnt: threadnum
'''
def runResnet50(dpu,img,cnt):
"""get tensor"""
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
outputHeight = outputTensors[0].dims[1]
outputWidth = outputTensors[0].dims[2]
outputChannel = outputTensors[0].dims[3]
outputSize = outputHeight*outputWidth*outputChannel
softmax = np.empty(outputSize)
batchSize = inputTensors[0].dims[0]
n_of_images = len(img)
count = 0
while count < cnt:
runSize = batchSize
shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndim)][1:])
"""prepare batch input/output """
outputData = []
inputData = []
outputData.append(np.empty((runSize,outputHeight,outputWidth,outputChannel), dtype = np.float32, order = 'C'))
inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C'))
"""init input image to input buffer """
for j in range(runSize):
imageRun = inputData[0]
imageRun[j,...] = img[(count+j)% n_of_images].reshape(inputTensors[0].dims[1],inputTensors[0].dims[2],inputTensors[0].dims[3])
"""run with batch """
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
for j in range(len(outputData)):
outputData[j] = outputData[j].reshape(runSize, outputSize)
"""softmax calculate with batch """
for j in range(runSize):
softmax = CPUCalcSoftmax(outputData[0][j], outputSize)
#TopK(softmax, outputSize, label_file)
count = count + runSize
def get_subgraph (g):
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.children
if s.metadata.get_attr_str ("device") == "DPU"]
return sub
def main(argv):
global threadnum
listimage=os.listdir(calib_image_dir)
threadAll = []
threadnum = int(argv[1])
i = 0
global runTotall
runTotall = len(listimage)
g = xir.graph.Graph.deserialize(pathlib.Path(argv[2]))
subgraphs = get_subgraph (g)
assert len(subgraphs) == 1 # only one DPU kernel
all_dpu_runners = [];
for i in range(int(threadnum)):
all_dpu_runners.append(runner.Runner(subgraphs[0], "run"));
"""image list to be run """
time1 = int(round(time.time() * 1000))
img = []
for i in range(runTotall):
path = os.path.join(calib_image_dir,listimage[i])
img.append(input_fn.preprocess_fn(path))
time_pre = int(round(time.time() * 1000))
start = 0
for i in range(int(threadnum)):
if (i==threadnum-1):
end = len(img)
else:
end = start+(len(img)//threadnum)
t1 = threading.Thread(target=runResnet50, args=(all_dpu_runners[i], img[start:end], len(img[start:end])))
threadAll.append(t1)
start = end
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = int(round(time.time() * 1000))
timetotal = time2 - time1
fps = float(runTotall * 1000 / timetotal)
#print("Pre time: %d ms" %(time_pre - time1))
#print("DPU + post time: %d ms" %(time2 - time_pre))
#print("Total time : %d ms" %timetotal)
#print("Total frames : %d" %len(img))
print("Performance : %.2f FPS" %fps)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("please input thread number and json file path.")
else :
main(sys.argv)
|
skill.py
|
# coding: utf-8
"""
Contains code for handling skills.
"""
from mycroft.configuration import Configuration
from gi.repository import Gtk, GObject
import os.path
import re
import threading
import xdg
mycroft_config = None
mycroft_config_set = False
# Getting the config takes a while, and locks up the application.
# Run this in a thread to avoid this.
def set_config_func():
"""Gets the Mycroft config and stores it in a global variable."""
global mycroft_config
global mycroft_config_set
mycroft_config = Configuration.get()
mycroft_config_set = True
if not mycroft_config_set:
set_config_thread = threading.Thread(target=set_config_func)
set_config_thread.start()
def skill_id_to_path(skill_id):
"""
Takes a skill ID and tries to find its path.
Derived from mycroft.util.resolve_resource_file.
"""
global mycroft_config
global mycroft_config_set
# Look in XDG_DATA_DIRS
for conf_dir in xdg.BaseDirectory.load_data_paths('mycroft'):
filename = os.path.join(conf_dir, 'skills', skill_id)
if os.path.isdir(filename):
return filename
# Look in old user location
filename = os.path.join(os.path.expanduser('~'), '.mycroft', 'skills', skill_id)
if os.path.isdir(filename):
return filename
# Get global dir
if mycroft_config_set:
data_dir = os.path.join(os.path.expanduser(mycroft_config['data_dir']), 'skills')
else:
# Just assume /opt/mycroft for now
data_dir = os.path.join('opt', 'mycroft', 'skills')
filename = os.path.expanduser(os.path.join(data_dir, skill_id))
if os.path.isdir(filename):
return filename
return None # Skill cannot be resolved
class LapelSkill(GObject.Object):
"""
GObject wrapper for Mycroft skills, created from dicts provided by
the 'skillmanager.list' message.
"""
__gtype_name__ = 'LapelSkill'
def __init__(self, skill_id, data=None):
"""Initializes a LapelSkill object."""
super().__init__()
self.skill_id = skill_id
self.skill_path = skill_id_to_path(self.skill_id)
if data:
self.active = data['active']
else:
self.active = True
# Get data from README
if self.skill_path:
readme = os.path.join(self.skill_path, 'README.md')
if not os.path.isfile(readme):
print("Couldn't find information for " + self.skill_id)
self.data = None
else:
with open(readme) as readme_raw:
readme_content = readme_raw.read()
self.data = {}
# Get icon and title
img_title_exp = re.compile("<img[^>]*src='([^']*)'.*\/>\s(.*)") # noqa: W605
img_title_match = img_title_exp.findall(readme_content)
if img_title_match:
try:
self.data['icon'] = img_title_match[0][0]
self.data['title'] = img_title_match[0][1]
except (ValueError, KeyError):
print("Could not find title and icon for skill with ID " + self.skill_id)
self.data['icon'] = None
self.data['title'] = None
else:
print("Could not find title and icon for skill with ID " + self.skill_id)
self.data['icon'] = None
self.data['title'] = None
# Get description
description_exp = re.compile("^# .*\n(.*)") # noqa: W605
description_match = description_exp.findall(readme_content)
if description_match:
self.data['description'] = description_match[0]
else:
self.data['description'] = None
# Get examples
examples_exp = re.compile('## Examples.*\n.*"(.*)"\n\*\s"(.*)"') # noqa: W605
examples_match = examples_exp.findall(readme_content)
if examples_match:
self.data['examples'] = examples_match[0]
else:
self.data['examples'] = []
# Get categories
category_exp = re.compile("## Category.*\n\*\*(.*)\*\*") # noqa: W605
category_match = category_exp.findall(readme_content)
if category_match:
self.data['category'] = category_match[0]
else:
self.data['category'] = None
# TODO: Get tags
else:
self.data = None
@GObject.Property(type=str, flags=GObject.ParamFlags.READABLE)
def id(self):
"""The skill's unique ID."""
return self.skill_id
@GObject.Property(type=str, flags=GObject.ParamFlags.READABLE)
def path(self):
"""The path to the skill's source code."""
return self.skill_path
@GObject.Property(type=bool, default=True)
def is_active(self):
"""Whether the skill is active or not."""
return self.active
@Gtk.Template(resource_path='/org/dithernet/lapel/ui/skillview.ui')
class SkillView(Gtk.Box):
"""
GTK widget for displaying the data of a message contained in a
LapelMessage object.
"""
__gtype_name__ = 'SkillView'
title_label = Gtk.Template.Child()
description_label = Gtk.Template.Child()
examples_label = Gtk.Template.Child()
def __init__(self):
super().__init__()
def bind_to_skill(self, skill):
"""Binds the SkillView to a LapelSkill."""
self.skill = skill
if skill.data:
self.title_label.set_label(skill.data['title'])
if skill.data['description']:
self.description_label.set_label(skill.data['description'])
else:
self.description_label.set_use_markup(True)
# TRANSLATORS: Shown in the skills menu when no a skill has no provided description.
self.description_label.set_label('<i>' + _('No description found.') + '</i>') # noqa: F821
if skill.data['examples']:
examples = ''
for example in skill.data['examples']:
if examples != '':
examples += '\n'
examples += '• ' + example
self.examples_label.set_label(examples)
else:
self.examples_label.set_use_markup(True)
# TRANSLATORS: Shown in the skills menu when no a skill has no provided examples.
self.examples_label.set_label('<i>' + _('No examples found.') + '</i>') # noqa: F821
else:
self.title_label.set_label(skill.id)
self.examples_label.set_use_markup(True)
# TRANSLATORS: Shown in the skills menu when a skill's information could not be found.
self.examples_label.set_label('<i>' + _("Skill data not found.") + '</i>') # noqa: F821
|
app.py
|
from __future__ import print_function
import logging
import os.path
import sys
import argparse
import random
import fnmatch
from functools import partial
import subprocess
import threading
import sys
import platform
from Qt import QtCore, QtWidgets, QtGui
from pxr import Usd, Sdf, Ar, UsdUtils, Tf
import utils
from vendor.Nodz import nodz_main
import text_view
import info_panel
import re
from pprint import pprint
digitSearch = re.compile(r'\b\d+\b')
logger = logging.getLogger('usd-noodle')
logger.setLevel(logging.INFO)
if not len(logger.handlers):
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logger.propagate = False
def launch_usdview(usdfile):
print('launching usdview', usdfile)
subprocess.call(['usdview', usdfile], shell=True)
class DependencyWalker(object):
def __init__(self, usdfile):
self.usdfile = usdfile
self.walk_attributes = True
logger.info('DependencyWalker'.center(40, '-'))
logger.info('Loading usd file: {}'.format(self.usdfile))
self.nodes = {}
self.edges = []
self.resolver = Ar.GetResolver()
self.resolver.ConfigureResolverForAsset(usdfile)
self.visited_nodes = []
self.errored_nodes = []
def start(self):
self.visited_nodes = []
self.nodes = {}
self.edges = []
self.init_edges = []
layer = Sdf.Layer.FindOrOpen(self.usdfile)
if not layer:
return
# scrub the initial file path
# to get around upper/lowercase drive letters
# and junk like that
layer_path = Sdf.ComputeAssetPathRelativeToLayer(layer, os.path.basename(self.usdfile))
self.usdfile = layer_path
info = {}
info['online'] = os.path.isfile(layer_path)
info['path'] = layer_path
info['type'] = 'sublayer'
self.nodes[layer_path] = info
self.walkStageLayers(layer_path)
def get_flat_child_list(self, path):
ret = [path]
for key, child in path.nameChildren.items():
ret.extend(self.get_flat_child_list(child))
ret = list(set(ret))
return ret
def flatten_ref_list(self, ref_or_payload):
ret = []
for itemlist in [ref_or_payload.appendedItems, ref_or_payload.explicitItems, ref_or_payload.addedItems,
ref_or_payload.prependedItems, ref_or_payload.orderedItems]:
for payload in itemlist:
ret.append(payload)
return list(set(ret))
def resolve(self, layer, path):
if self.resolver.IsRelativePath(path):
return self.resolver.AnchorRelativePath(layer.realPath, path)
else:
resolved = self.resolver.Resolve(path)
if resolved:
return resolved
else:
# resolver will return None on invalid paths
# we still want the path regardless
return path
def walkStageLayers(self, layer_path, level=1):
id = '-' * (level)
sublayers = []
payloads = []
references = []
try:
layer = Sdf.Layer.FindOrOpen(layer_path)
except Tf.ErrorException as e:
info = {}
info['online'] = True
info['error'] = True
info['path'] = layer_path
self.nodes[layer_path] = info
self.errored_nodes.append(layer_path)
logger.info('usd file: {} had load errors'.format(layer_path))
return
if not layer:
return
# print(id, layer.realPath)
root = layer.pseudoRoot
# print(id, 'root', root)
# print(id, 'children'.center(40, '-'))
child_list = []
# info packet from the root prim
if layer_path in self.nodes:
info = self.nodes[layer_path]
child_list = self.get_flat_child_list(root)
info_dict = dict()
for key in root.ListInfoKeys():
if key in ['subLayers', 'subLayerOffsets']:
continue
info_dict[key] = root.GetInfo(key)
info['info'] = info_dict
info['specifier'] = root.specifier.displayName
info['muted'] = layer.IsMuted()
info['defaultPrim'] = layer.defaultPrim
info['PseudoRoot'] = layer.pseudoRoot.name
info['RootPrims'] = [x.path.GetPrimPath().pathString for x in layer.rootPrims]
self.nodes[layer_path] = info
for child in child_list:
# print(id, child)
if self.walk_attributes:
attributes = child.attributes
for attr in attributes:
# we are looking for "asset" type attributes
# references to external things
if attr.typeName == 'asset':
value = attr.default
# sometimes you get empty paths
if not value:
continue
if not value.path:
continue
resolved_path = self.resolve(layer, value.path)
info = {}
info['online'] = os.path.isfile(resolved_path)
info['path'] = resolved_path
filebase, ext = os.path.splitext(resolved_path)
info['type'] = 'ext'
if ext in ['.jpg', '.tex', '.tx', '.png', '.exr', '.hdr', '.tga', '.tif', '.tiff',
'.pic', '.gif', '.psd', '.ptex', '.cin', '.dpx', '.bmp', '.iff',
'.mov', '.m4v', '.mp4', '.webp']:
info['type'] = 'tex'
info['colorspace'] = attr.colorSpace
self.nodes[resolved_path] = info
if not [layer_path, resolved_path, info['type']] in self.edges:
self.edges.append([layer_path, resolved_path, info['type']])
clip_info = child.GetInfo("clips")
# pprint(clip_info)
for clip_set_name in clip_info:
clip_set = clip_info[clip_set_name]
# print(clip_set_name, clip_set.get("assetPaths"), clip_set.get("manifestAssetPath"), clip_set.get()
# "primPath")
"""
@todo: subframe handling
integer frames: path/basename.###.usd
subinteger frames: path/basename.##.##.usd.
@todo: non-1 increments
"""
clip_asset_paths = clip_set.get("assetPaths")
# don't use resolved path in case either the first or last file is missing from disk
firstFile = str(clip_asset_paths[0].path)
lastFile = str(clip_asset_paths[-1].path)
if digitSearch.findall(firstFile):
firstFileNum = digitSearch.findall(firstFile)[-1]
else:
firstFileNum = '???'
if digitSearch.findall(lastFile):
lastFileNum = digitSearch.findall(lastFile)[-1]
else:
lastFileNum = '???'
digitRange = str(firstFileNum + '-' + lastFileNum)
nodeName = ''
firstFileParts = firstFile.split(firstFileNum)
for i in range(len(firstFileParts) - 1):
nodeName += str(firstFileParts[i])
nodeName += digitRange
nodeName += firstFileParts[-1]
allFilesFound = True
for path in clip_asset_paths:
clip_path = self.resolve(layer, path.path)
if not os.path.isfile(clip_path):
allFilesFound = False
break
# TODO : make more efficient - looping over everything currently
# TODO: validate presence of all files in the clip seq. bg thread?
manifestPath = clip_set.get("manifestAssetPath")
refpath = self.resolve(layer, clip_asset_paths[0].path)
clipmanifest_path = self.resolve(layer, manifestPath.path)
info = {}
info['online'] = allFilesFound
info['path'] = refpath
info['type'] = 'clip'
info['primPath'] = clip_set.get("primPath")
info['clipSet'] = clip_set_name
self.nodes[nodeName] = info
if not [layer_path, nodeName, 'clip'] in self.edges:
self.edges.append([layer_path, nodeName, 'clip'])
if not [nodeName, clipmanifest_path, 'manifest'] in self.edges:
self.edges.append([nodeName, clipmanifest_path, 'manifest'])
if child.variantSets:
for varset in child.variantSets:
# print(child, 'variant set', varset.name)
variant_path = '{}:{}'.format(os.path.splitext(layer.realPath)[0], varset.name)
varprim = varset.owner
info = {}
info['online'] = True
info['path'] = variant_path
info['type'] = 'variant'
info['variant_set'] = varset.name
info['variants'] = [str(x) for x in varset.variants.keys()]
info['current_variant'] = varprim.variantSelections.get(varset.name)
self.nodes[variant_path] = info
if not [layer_path, variant_path, 'variant'] in self.edges:
self.edges.append([layer_path, variant_path, 'variant'])
for variant_name in varset.variants.keys():
variant = varset.variants[variant_name]
# so variants can host payloads and references
# we get to these through the variants primspec
# and then add them to our list of paths to inspect
for primspec_child in self.get_flat_child_list(variant.primSpec):
for payload in self.flatten_ref_list(primspec_child.payloadList):
pathToResolve = payload.assetPath
if pathToResolve:
refpath = self.resolve(layer, pathToResolve)
payloads.append(refpath)
info = {}
info['online'] = os.path.isfile(refpath)
info['path'] = refpath
info['type'] = 'payload'
self.nodes[refpath] = info
if not [variant_path, refpath, variant_name] in self.edges:
self.edges.append([variant_path, refpath, variant_name])
for reference in self.flatten_ref_list(primspec_child.referenceList):
pathToResolve = reference.assetPath
if pathToResolve:
refpath = self.resolve(layer, pathToResolve)
references.append(refpath)
info = {}
info['online'] = os.path.isfile(refpath)
info['path'] = refpath
info['type'] = 'reference'
self.nodes[refpath] = info
if not [variant_path, refpath, variant_name] in self.edges:
self.edges.append([variant_path, refpath, variant_name])
payloadList = self.flatten_ref_list(child.payloadList)
for payload in payloadList:
pathToResolve = payload.assetPath
if pathToResolve:
refpath = self.resolve(layer, pathToResolve)
payloads.append(refpath)
info = {}
info['online'] = os.path.isfile(refpath)
info['path'] = refpath
info['type'] = 'payload'
self.nodes[refpath] = info
if not [layer_path, refpath, 'payload'] in self.edges:
self.edges.append([layer_path, refpath, 'payload'])
referenceList = self.flatten_ref_list(child.referenceList)
for reference in referenceList:
pathToResolve = reference.assetPath
if pathToResolve:
refpath = self.resolve(layer, pathToResolve)
references.append(refpath)
info = {}
info['online'] = os.path.isfile(refpath)
info['path'] = refpath
info['type'] = 'reference'
self.nodes[refpath] = info
if not [layer_path, refpath, 'reference'] in self.edges:
self.edges.append([layer_path, refpath, 'reference'])
for rel_sublayer in layer.subLayerPaths:
refpath = self.resolve(layer, rel_sublayer)
sublayers.append(refpath)
info = {}
info['online'] = os.path.isfile(refpath)
info['path'] = refpath
info['type'] = 'sublayer'
self.nodes[refpath] = info
if not [layer_path, refpath, 'sublayer'] in self.edges:
self.edges.append([layer_path, refpath, 'sublayer'])
sublayers = list(set(sublayers))
references = list(set(references))
payloads = list(set(payloads))
if sublayers:
logger.debug((id, 'sublayerPaths'.center(40, '-')))
logger.debug((id, sublayers))
for sublayer in sublayers:
self.walkStageLayers(sublayer, level=level + 1)
if references:
logger.debug((id, 'references'.center(40, '-')))
logger.debug((id, references))
for reference in references:
self.walkStageLayers(reference, level=level + 1)
if payloads:
logger.debug((id, 'payloads'.center(40, '-')))
logger.debug((id, payloads))
for payload in payloads:
self.walkStageLayers(payload, level=level + 1)
def find_node(node_coll, attr_name, attr_value):
for x in node_coll:
node = node_coll[x]
if getattr(node, attr_name) == attr_value:
return node
class FindNodeWindow(QtWidgets.QDialog):
def __init__(self, nodz, parent=None):
self.nodz = nodz
super(FindNodeWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Tool | QtCore.Qt.WindowStaysOnTopHint)
self.build_ui()
def search(self):
search_text = self.searchTxt.text()
self.foundNodeList.clear()
if search_text == '':
return
for x in sorted(self.nodz.scene().nodes):
this_node = self.nodz.scene().nodes[x]
if fnmatch.fnmatch(this_node.label.lower(), '*%s*' % search_text.lower()):
self.foundNodeList.addItem(QtWidgets.QListWidgetItem(this_node.label))
def item_selected(self, *args):
items = self.foundNodeList.selectedItems()
if items:
sel = [x.text() for x in items]
for x in self.nodz.scene().nodes:
node = self.nodz.scene().nodes[x]
if node.label in sel:
node.setSelected(True)
else:
node.setSelected(False)
self.nodz._focus()
def build_ui(self):
lay = QtWidgets.QVBoxLayout()
self.setLayout(lay)
self.searchTxt = QtWidgets.QLineEdit()
self.searchTxt.textChanged.connect(self.search)
lay.addWidget(self.searchTxt)
self.foundNodeList = QtWidgets.QListWidget()
self.foundNodeList.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.foundNodeList.itemSelectionChanged.connect(self.item_selected)
lay.addWidget(self.foundNodeList)
class NodeGraphWindow(QtWidgets.QDialog):
def __init__(self, usdfile=None, walk_attributes=False, parent=None):
self.usdfile = usdfile
self.root_node = None
super(NodeGraphWindow, self).__init__(parent)
self.settings = QtCore.QSettings("chrisg", "usd-noodle")
self.setWindowTitle("Noodle")
self.nodz = None
self.walk_attributes = walk_attributes
self.find_win = None
self.build_ui()
self.show()
if self.usdfile:
self.load_file()
def loadTextChkChanged(self, state):
self.walk_attributes = self.loadTextChk.isChecked()
def build_ui(self):
if self.settings.value("geometry"):
self.restoreGeometry(self.settings.value("geometry"))
else:
sizeObject = QtWidgets.QDesktopWidget().screenGeometry(-1)
self.resize(sizeObject.width() * 0.8, sizeObject.height() * 0.8)
self.setWindowFlags(
self.windowFlags() | QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint)
self.top_layout = QtWidgets.QVBoxLayout()
# self.top_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.top_layout)
self.toolbar_lay = QtWidgets.QHBoxLayout()
self.top_layout.addLayout(self.toolbar_lay)
noodle_label = QtWidgets.QLabel()
icon = QtGui.QPixmap()
icon.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons", 'noodle.png'))
noodle_label.setPixmap(icon.scaled(32, 32,
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
)
self.toolbar_lay.addWidget(noodle_label)
self.openBtn = QtWidgets.QPushButton("Open...", )
self.openBtn.setShortcut('Ctrl+o')
self.openBtn.clicked.connect(self.manualOpen)
self.toolbar_lay.addWidget(self.openBtn)
self.reloadBtn = QtWidgets.QPushButton("Reload")
self.reloadBtn.setShortcut('Ctrl+r')
self.reloadBtn.clicked.connect(self.load_file)
self.toolbar_lay.addWidget(self.reloadBtn)
self.loadTextChk = QtWidgets.QCheckBox("Load Textures")
self.loadTextChk.setChecked(self.walk_attributes)
self.loadTextChk.stateChanged.connect(self.loadTextChkChanged)
self.toolbar_lay.addWidget(self.loadTextChk)
self.findBtn = QtWidgets.QPushButton("Find...")
self.findBtn.setShortcut('Ctrl+f')
self.findBtn.clicked.connect(self.findWindow)
self.toolbar_lay.addWidget(self.findBtn)
self.layoutBtn = QtWidgets.QPushButton("Layout Nodes")
self.layoutBtn.clicked.connect(self.layout_nodes)
self.toolbar_lay.addWidget(self.layoutBtn)
toolbarspacer = QtWidgets.QSpacerItem(10, 10, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.toolbar_lay.addItem(toolbarspacer)
self.splitter = QtWidgets.QSplitter()
self.top_layout.addWidget(self.splitter)
main_widget = QtWidgets.QWidget()
main_widget.setContentsMargins(0, 0, 0, 0)
self.splitter.addWidget(main_widget)
lay = QtWidgets.QVBoxLayout()
lay.setContentsMargins(0, 0, 0, 0)
main_widget.setLayout(lay)
self.top_layout.addLayout(lay)
logger.info('building nodes')
configPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'nodz_config.json')
self.nodz = nodz_main.Nodz(self, configPath=configPath)
self.nodz.editLevel = 1
# self.nodz.editEnabled = False
lay.addWidget(self.nodz)
self.nodz.initialize()
self.nodz.fitInView(-500, -500, 500, 500)
info_scroll = QtWidgets.QScrollArea()
info_scroll.setWidgetResizable(True)
self.info_panel = info_panel.InfoPanel(parent=self)
info_scroll.setWidget(self.info_panel)
self.splitter.addWidget(info_scroll)
self.splitter.setSizes([self.width() * 0.8, self.width() * 0.2])
self.nodz.signal_NodeMoved.connect(self.on_nodeMoved)
self.nodz.signal_NodeSelected.connect(self.on_nodeSelected)
self.nodz.signal_NodeContextMenuEvent.connect(self.node_context_menu)
self.nodz.signal_KeyPressed.connect(self.pickwalk)
if self.settings.value("splitterSizes"):
self.splitter.restoreState(self.settings.value("splitterSizes"))
def pickwalk(self, key):
if not self.nodz.scene().selectedItems():
return
original_sel = self.nodz.scene().selectedItems()
sel = original_sel[0]
clear_selection = False
if key == QtCore.Qt.Key_Right:
plug_names = list(sel.plugs.keys())
if plug_names:
plug = plug_names[0]
for i, conn in enumerate(sel.plugs[plug].connections):
parent_node = self.nodz.scene().nodes[conn.socketNode]
parent_node.setSelected(True)
clear_selection = True
break
elif key == QtCore.Qt.Key_Left:
socket_names = list(sel.sockets.keys())
if socket_names:
socket = socket_names[0]
for i, conn in enumerate(sel.sockets[socket].connections):
child_node = self.nodz.scene().nodes[conn.plugNode]
child_node.setSelected(True)
clear_selection = True
break
elif key == QtCore.Qt.Key_Up:
plug_names = list(sel.plugs.keys())
if plug_names:
plug = plug_names[0]
for i, conn in enumerate(sel.plugs[plug].connections):
siblings = []
for par_conn in conn.socketItem.connections:
siblings.append(par_conn.plugNode)
cur_index = siblings.index(sel.name)
if cur_index == 0:
return
sibling = siblings[cur_index - 1]
child_node = self.nodz.scene().nodes[sibling]
child_node.setSelected(True)
clear_selection = True
elif key == QtCore.Qt.Key_Down:
plug_names = list(sel.plugs.keys())
if plug_names:
plug = plug_names[0]
for i, conn in enumerate(sel.plugs[plug].connections):
siblings = []
for par_conn in conn.socketItem.connections:
siblings.append(par_conn.plugNode)
cur_index = siblings.index(sel.name)
if cur_index == len(siblings) - 1:
return
sibling = siblings[cur_index + 1]
child_node = self.nodz.scene().nodes[sibling]
child_node.setSelected(True)
clear_selection = True
if clear_selection:
for node in original_sel:
node.setSelected(False)
def on_nodeMoved(self, nodeName, nodePos):
# print('node {0} moved to {1}'.format(nodeName, nodePos))
pass
def on_nodeSelected(self, selected_nodes):
if not selected_nodes:
return
node = self.get_node_from_name(selected_nodes[0])
userdata = node.userData
path = userdata.get('path')
if path:
self.info_panel.loadData(path, userdata)
def findWindow(self):
if self.find_win:
self.find_win.close()
self.find_win = FindNodeWindow(self.nodz, parent=self)
self.find_win.show()
self.find_win.activateWindow()
def get_node_from_name(self, node_name):
return self.nodz.scene().nodes[node_name]
def node_path(self, node_name):
node = self.get_node_from_name(node_name)
userdata = node.userData
path = userdata.get('path')
if path:
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(path)
print(path)
def reveal_file(self, node_name):
node = self.get_node_from_name(node_name)
userdata = node.userData
browsePath = userdata.get('path')
if browsePath:
pltName = platform.system()
if pltName == 'Windows':
browsePath = browsePath.replace('/', '\\')
os.system("start explorer.exe /select,{}".format(browsePath))
elif pltName == 'Darwin':
os.system('open -R "{}"'.format(browsePath))
elif pltName == 'Linux':
os.system('xdg-open "{}"'.format(os.path.dirname(browsePath)))
def node_upstream(self, node_name):
start_node = self.get_node_from_name(node_name)
connected_nodes = start_node.upstream_nodes()
for node_name in self.nodz.scene().nodes:
node = self.nodz.scene().nodes[node_name]
if node in connected_nodes:
node.setSelected(True)
else:
node.setSelected(False)
def view_usdfile(self, node_name):
node = self.get_node_from_name(node_name)
userdata = node.userData
path = userdata.get('path')
layer = Sdf.Layer.FindOrOpen(path)
if layer:
win = text_view.TextViewer(input_text=layer.ExportToString(), title=path, parent=self)
win.show()
def view_usdview(self, node_name):
node = self.get_node_from_name(node_name)
userdata = node.userData
path = userdata.get('path')
worker = threading.Thread(target=launch_usdview, args=[path])
worker.start()
# subprocess.call(['usdview', path], shell=True)
# os.system('usdview {}'.format(path))
def node_context_menu(self, event, node):
menu = QtWidgets.QMenu()
menu.addAction("Copy Node Path", partial(self.node_path, node))
menu.addAction("Select upstream", partial(self.node_upstream, node))
menu.addAction("Reveal in filesystem", partial(self.reveal_file, node))
usd_submenu = menu.addMenu("USD")
usd_submenu.addAction("Inspect layer...", partial(self.view_usdfile, node))
usd_submenu.addAction("UsdView...", partial(self.view_usdview, node))
tex_submenu = menu.addMenu("Texture")
menu.exec_(event.globalPos())
def load_file(self):
if not os.path.isfile(self.usdfile):
raise RuntimeError("Cannot find file: %s" % self.usdfile)
self.nodz.clearGraph()
self.root_node = None
self.setWindowTitle('Noodle - {}'.format(self.usdfile))
x = DependencyWalker(self.usdfile)
x.walk_attributes = self.walk_attributes
x.start()
# get back the scrubbed initial file path
# which will let us find the start node properly
self.usdfile = x.usdfile
nodz_scene = self.nodz.scene()
# pprint(x.nodes)
nds = []
for i, node in enumerate(x.nodes):
info = x.nodes[node]
pos = QtCore.QPointF(0, 0)
node_label = os.path.basename(node)
# node colouring / etc based on the node type
node_preset = 'node_default'
node_icon = "sublayer.png"
if info.get("type") == 'clip':
node_preset = 'node_clip'
node_icon = "clip.png"
elif info.get("type") == 'payload':
node_preset = 'node_payload'
node_icon = "payload.png"
elif info.get("type") == 'variant':
node_preset = 'node_variant'
node_icon = "variant.png"
elif info.get("type") == 'specialize':
node_preset = 'node_specialize'
node_icon = "specialize.png"
elif info.get("type") == 'reference':
node_preset = 'node_reference'
node_icon = "reference.png"
elif info.get("type") == 'tex':
node_preset = 'node_texture'
node_icon = "texture.png"
if not node in nds:
nodeA = self.nodz.createNode(name=node, label=node_label, preset=node_preset, position=pos)
if self.usdfile == node:
self.root_node = nodeA
node_icon = "noodle.png"
icon = QtGui.QIcon(os.path.join(os.path.dirname(os.path.abspath(__file__)), "icons", node_icon))
nodeA.icon = icon
nodeA.setToolTip(node_label)
if nodeA:
self.nodz.createAttribute(node=nodeA, name='out', index=0, preset='attr_preset_1',
plug=True, socket=False, dataType=int, socketMaxConnections=-1)
nodeA.userData = info
if info.get('error', False) is True:
self.nodz.createAttribute(node=nodeA, name='ERROR', index=0, preset='attr_preset_2',
plug=False, socket=False)
if info['online'] is False:
self.nodz.createAttribute(node=nodeA, name='OFFLINE', index=0, preset='attr_preset_2',
plug=False, socket=False)
# override the node's draw pen with a
# lovely red outline
nodeA._pen = QtGui.QPen()
nodeA._pen.setStyle(QtCore.Qt.SolidLine)
nodeA._pen.setWidth(5)
nodeA._pen.setColor(QtGui.QColor(255, 0, 0))
nds.append(node)
# pprint(x.edges)
# 'wiring nodes'.center(40, '-')
# create all the node connections
for edge in x.edges:
start = edge[0]
end = edge[1]
port_type = edge[2]
try:
start_node = self.nodz.scene().nodes[start]
self.nodz.createAttribute(node=start_node, name=port_type, index=-1, preset='attr_preset_1',
plug=False, socket=True, dataType=int, socketMaxConnections=-1)
# # sort the ports alphabetically
# start_node.attrs = sorted(start_node.attrs)
self.nodz.createConnection(end, 'out', start, port_type)
except:
print('cannot find start node', start)
# layout nodes!
self.nodz.arrangeGraph(self.root_node)
# self.nodz.autoLayoutGraph()
self.nodz._focus()
if x.errored_nodes:
message = 'Some layers had load errors:\n'
for errpath in x.errored_nodes:
message += '{}\n'.format(errpath)
QtWidgets.QMessageBox.warning(self, 'File Parsing errors', message, QtWidgets.QMessageBox.Ok)
def layout_nodes(self):
# layout nodes!
self.nodz.arrangeGraph(self.root_node)
# self.nodz.autoLayoutGraph()
self.nodz._focus(all=True)
def manualOpen(self):
"""
Manual open method for manually opening the manually opened files.
"""
startPath = None
if self.usdfile:
startPath = os.path.dirname(self.usdfile)
multipleFilters = "USD Files (*.usd *.usda *.usdc) (*.usd *.usda *.usdc);;All Files (*.*) (*.*)"
options = QtWidgets.QFileDialog.DontUseNativeDialog
try:
# qt 5.2 and up
options = options | QtWidgets.QFileDialog.DontUseCustomDirectoryIcons
except:
pass
filename = QtWidgets.QFileDialog.getOpenFileName(
self, 'Open File', startPath or '/', multipleFilters,
None, options)
if filename[0]:
print(filename[0])
self.usdfile = filename[0]
self.load_file()
def closeEvent(self, event):
"""
Window close event. Saves preferences. Impregnates your dog.
"""
if self.find_win:
self.find_win.close()
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("splitterSizes", self.splitter.saveState())
super(NodeGraphWindow, self).closeEvent(event)
def main(usdfile=None, walk_attributes=False):
par = QtWidgets.QApplication.activeWindow()
win = NodeGraphWindow(usdfile=usdfile, parent=par, walk_attributes=walk_attributes)
return win
|
views.py
|
# coding=utf-8
import json
import threading
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, Http404
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from frigg.builds.models import Build
from frigg.utils import github_api_get_request
@login_required
def overview(request):
return render(request,
"builds/overview.html",
{'builds': Build.objects.all().order_by("-id")[0:100]})
@login_required
def build(request, build_id):
try:
build = Build.objects.get(id=build_id)
return render(request, "builds/build.html", {'build': build})
except Build.DoesNotExist:
raise Http404
def build_pull_request(data):
repo_url = "git@github.com:%s/%s.git" % (data['repo_owner'], data['repo_name'])
build = Build.objects.create(git_repository=repo_url,
pull_request_id=data['pull_request_id'],
branch=data['branch'],
sha=data["sha"])
t = threading.Thread(target=build.run_tests)
t.setDaemon(True)
t.start()
@login_required
def deploy_master_branch(request, build_id):
build = Build.objects.get(id=build_id)
build.deploy()
return HttpResponse("Deployed")
@csrf_exempt
def github_webhook(request):
try:
event = request.META['HTTP_X_GITHUB_EVENT']
except KeyError:
return HttpResponse("Missing HTTP_X_GITHUB_EVENT")
if event == "issue_comment":
data = json.loads(request.body)
if data['comment']['body'] == "retest now please":
url = data['issue']['pull_request']['url'][29:]
pr_data = json.loads(github_api_get_request(url))
pull_request = {'repo_name': pr_data['head']['repo']['name'],
'repo_owner': pr_data['head']['repo']['owner']['login'],
'pull_request_id': pr_data['number'],
'branch': pr_data['head']['ref'],
"sha": pr_data['head']['sha']}
build_pull_request(pull_request)
elif event == "pull_request":
data = json.loads(request.body)
# Do nothing if the pull request is being closed
if data['action'] == "closed":
return
pull_request = {'repo_name': data['repository']['name'],
'repo_owner': data['repository']['owner']['login'],
'pull_request_id': data['number'],
'branch': data['pull_request']['head']['ref'],
"sha": data['pull_request']['head']['sha']}
build_pull_request(pull_request)
# If someone pushed directly to master.. lets test it anyway
elif event == "push":
data = json.loads(request.body)
if data['ref'] == "refs/heads/master":
pull_request = {'repo_name': data['repository']['name'],
'repo_owner': data['repository']['owner']['name'],
'pull_request_id': 0,
'branch': 'master',
"sha": data['after']}
build_pull_request(pull_request)
else:
return HttpResponse("Do nothing :)")
return HttpResponse(event)
|
non_blocking_io.py
|
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import io
import logging
from dataclasses import dataclass
from queue import Queue
from threading import Thread
from typing import IO, Callable, Optional, Union
# --------------------------------------------------------
# References:
# https://github.com/facebookresearch/iopath/blob/main/iopath/common/non_blocking_io.py
# --------------------------------------------------------
"""
This file is used for asynchronous file operations.
When `opena` is called for the first time for a specific
`PathHandler`, a `NonBlockingIOManager` is instantiated. The
manager returns a `NonBlockingIO` (or `NonBlockingBufferedIO`)
instance to the caller, and the manager maintains all of the
thread management and data management.
"""
@dataclass
class PathData:
"""
Manage the IO job queue and polling thread for a single
path. This is done to ensure that write calls to the same
path are serialized so they are written in the same order
as they were called.
On each `f.write` call where `f` is of type `NonBlockingIO`,
we send the job to the manager where it is enqueued to the
Queue. The polling Thread picks up on the job, executes it,
waits for it to finish, and then continues to poll.
"""
queue: Queue
thread: Thread
class NonBlockingIOManager:
"""
All `opena` calls pass through this class so that it can
keep track of the threads for proper cleanup at the end
of the script. Each path that is opened with `opena` is
assigned a single queue and polling thread that is kept
open until it is cleaned up by `PathManager.async_join()`.
"""
def __init__(
self,
buffered: Optional[bool] = False,
executor: Optional[concurrent.futures.Executor] = None,
) -> None:
"""
Args:
buffered (bool): IO instances will be `NonBlockingBufferedIO`
or `NonBlockingIO` based on this value. This bool is set
manually for each `PathHandler` in `_opena`.
executor: User can optionally attach a custom executor to
perform async operations through `PathHandler.__init__`.
"""
self._path_to_data = {} # Map from path to `PathData` object
self._buffered = buffered
self._IO = NonBlockingBufferedIO if self._buffered else NonBlockingIO
self._pool = executor or concurrent.futures.ThreadPoolExecutor()
def get_non_blocking_io(
self,
path: str,
io_obj: Union[IO[str], IO[bytes]],
callback_after_file_close: Optional[Callable[[None], None]] = None,
buffering: Optional[int] = -1,
) -> Union[IO[str], IO[bytes]]:
"""
Called by `PathHandler._opena` with the path and returns a
`NonBlockingIO` instance.
Args:
path (str): A path str to operate on. This path should be
simplified to ensure that each absolute path has only a single
path str that maps onto it. For example, in `NativePathHandler`,
we can use `os.path.normpath`.
io_obj (IO): a reference to the IO object returned by the
`PathHandler._open` function.
callback_after_file_close (Callable): An optional argument that can
be passed to perform operations that depend on the asynchronous
writes being completed. The file is first written to the local
disk and then the callback is executed.
buffering (int): An optional argument to set the buffer size for
buffered asynchronous writing.
"""
if not self._buffered and buffering != -1:
raise ValueError(
"NonBlockingIO is not using a buffered writer but `buffering` "
f"arg is set to non-default value of {buffering} != -1."
)
if path not in self._path_to_data:
# Initialize job queue and a polling thread
queue = Queue()
t = Thread(target=self._poll_jobs, args=(queue,))
t.start()
# Store the `PathData`
self._path_to_data[path] = PathData(queue, t)
kwargs = {} if not self._buffered else {"buffering": buffering}
return self._IO(
notify_manager=lambda io_callable: ( # Pass async jobs to manager
self._path_to_data[path].queue.put(io_callable)
),
io_obj=io_obj,
callback_after_file_close=callback_after_file_close,
**kwargs,
)
def _poll_jobs(self, queue: Optional[Callable[[], None]]) -> None:
"""
A single thread runs this loop. It waits for an IO callable to be
placed in a specific path's `Queue` where the queue contains
callable functions. It then waits for the IO job to be completed
before looping to ensure write order.
"""
while True:
# `func` is a callable function (specifically a lambda function)
# and can be any of:
# - func = file.write(b)
# - func = file.close()
# - func = None
func = queue.get() # Blocks until item read.
if func is None: # Thread join signal.
break
self._pool.submit(func).result() # Wait for job to finish.
def _join(self, path: Optional[str] = None) -> bool:
"""
Waits for write jobs for a specific path or waits for all
write jobs for the path handler if no path is provided.
Args:
path (str): Pass in a file path and will wait for the
asynchronous jobs to be completed for that file path.
If no path is passed in, then all threads operating
on all file paths will be joined.
"""
if path and path not in self._path_to_data:
raise ValueError(
f"{path} has no async IO associated with it. "
f"Make sure `opena({path})` is called first."
)
# If a `_close` call fails, we print the error and continue
# closing the rest of the IO objects.
paths_to_close = [path] if path else list(self._path_to_data.keys())
success = True
for _path in paths_to_close:
try:
path_data = self._path_to_data.pop(_path)
path_data.queue.put(None)
path_data.thread.join()
except Exception:
logger = logging.getLogger(__name__)
logger.exception(f"`NonBlockingIO` thread for {_path} failed to join.")
success = False
return success
def _close_thread_pool(self) -> bool:
"""
Closes the ThreadPool.
"""
try:
self._pool.shutdown()
except Exception:
logger = logging.getLogger(__name__)
logger.exception("`NonBlockingIO` thread pool failed to close.")
return False
return True
# NOTE: We currently only support asynchronous writes (not reads).
class NonBlockingIO(io.IOBase):
def __init__(
self,
notify_manager: Callable[[Callable[[], None]], None],
io_obj: Union[IO[str], IO[bytes]],
callback_after_file_close: Optional[Callable[[None], None]] = None,
) -> None:
"""
Returned to the user on an `opena` call. Uses a Queue to manage the
IO jobs that need to be run to ensure order preservation and a
polling Thread that checks the Queue. Implementation for these are
lifted to `NonBlockingIOManager` since `NonBlockingIO` closes upon
leaving the context block.
NOTE: Writes to the same path are serialized so they are written in
the same order as they were called but writes to distinct paths can
happen concurrently.
Args:
notify_manager (Callable): a callback function passed in from the
`NonBlockingIOManager` so that all IO jobs can be stored in
the manager. It takes in a single argument, namely another
callable function.
Example usage:
```
notify_manager(lambda: file.write(data))
notify_manager(lambda: file.close())
```
Here, we tell `NonBlockingIOManager` to add a write callable
to the path's Queue, and then to add a close callable to the
path's Queue. The path's polling Thread then executes the write
callable, waits for it to finish, and then executes the close
callable. Using `lambda` allows us to pass callables to the
manager.
io_obj (IO): a reference to the IO object returned by the
`PathHandler._open` function.
callback_after_file_close (Callable): An optional argument that can
be passed to perform operations that depend on the asynchronous
writes being completed. The file is first written to the local
disk and then the callback is executed.
"""
super().__init__()
self._notify_manager = notify_manager
self._io = io_obj
self._callback_after_file_close = callback_after_file_close
self._close_called = False
def readable(self) -> bool:
return False
def writable(self) -> bool:
return True
def seekable(self) -> bool:
return True
def write(self, b: Union[bytes, bytearray]) -> None:
"""
Called on `f.write()`. Gives the manager the write job to call.
"""
self._notify_manager(lambda: self._io.write(b))
def seek(self, offset: int, whence: int = 0) -> int:
"""
Called on `f.seek()`.
"""
self._notify_manager(lambda: self._io.seek(offset, whence))
def tell(self) -> int:
"""
Called on `f.tell()`.
"""
raise ValueError("ioPath async writes does not support `tell` calls.")
def truncate(self, size: int = None) -> int:
"""
Called on `f.truncate()`.
"""
self._notify_manager(lambda: self._io.truncate(size))
def close(self) -> None:
"""
Called on `f.close()` or automatically by the context manager.
We add the `close` call to the file's queue to make sure that
the file is not closed before all of the write jobs are complete.
"""
# `ThreadPool` first closes the file and then executes the callback.
# We only execute the callback once even if there are multiple
# `f.close` calls.
self._notify_manager(lambda: self._io.close())
if not self._close_called and self._callback_after_file_close:
self._notify_manager(self._callback_after_file_close)
self._close_called = True
# NOTE: To use this class, use `buffered=True` in `NonBlockingIOManager`.
# NOTE: This class expects the IO mode to be buffered.
class NonBlockingBufferedIO(io.IOBase):
MAX_BUFFER_BYTES = 10 * 1024 * 1024 # 10 MiB
def __init__(
self,
notify_manager: Callable[[Callable[[], None]], None],
io_obj: Union[IO[str], IO[bytes]],
callback_after_file_close: Optional[Callable[[None], None]] = None,
buffering: int = -1,
) -> None:
"""
Buffered version of `NonBlockingIO`. All write data is stored in an
IO buffer until the buffer is full, or `flush` or `close` is called.
Args:
Same as `NonBlockingIO` args.
buffering (int): An optional argument to set the buffer size for
buffered asynchronous writing.
"""
super().__init__()
self._notify_manager = notify_manager
self._io = io_obj
self._callback_after_file_close = callback_after_file_close
self._buffers = [io.BytesIO()]
self._buffer_size = buffering if buffering > 0 else self.MAX_BUFFER_BYTES
self._close_called = False
def readable(self) -> bool:
return False
def writable(self) -> bool:
return True
def seekable(self) -> bool:
return False
def write(self, b: Union[bytes, bytearray]) -> None:
"""
Called on `f.write()`. Gives the manager the write job to call.
"""
buffer = self._buffers[-1]
with memoryview(b) as view:
buffer.write(view)
if buffer.tell() < self._buffer_size:
return
self.flush()
def close(self) -> None:
"""
Called on `f.close()` or automatically by the context manager.
We add the `close` call to the file's queue to make sure that
the file is not closed before all of the write jobs are complete.
"""
self.flush()
# Close the last buffer created by `flush`.
self._notify_manager(lambda: self._buffers[-1].close())
# `ThreadPool` first closes the file and then executes the callback.
self._notify_manager(lambda: self._io.close())
if not self._close_called and self._callback_after_file_close:
self._notify_manager(self._callback_after_file_close)
self._close_called = True
def flush(self) -> None:
"""
Called on `f.write()` if the buffer is filled (or overfilled). Can
also be explicitly called by user.
NOTE: Buffering is used in a strict manner. Any buffer that exceeds
`self._buffer_size` will be broken into multiple write jobs where
each has a write call with `self._buffer_size` size.
"""
buffer = self._buffers[-1]
if buffer.tell() == 0:
return
pos = 0
total_size = buffer.seek(0, io.SEEK_END)
view = buffer.getbuffer()
# Chunk the buffer in case it is larger than the buffer size.
while pos < total_size:
item = view[pos : pos + self._buffer_size]
# `item=item` is needed due to Python's late binding closures.
self._notify_manager(lambda item=item: self._io.write(item))
pos += self._buffer_size
# Close buffer immediately after being written to file and create
# a new buffer.
self._notify_manager(lambda: buffer.close())
self._buffers.append(io.BytesIO())
|
email.py
|
from threading import Thread
from flask import render_template
from flask_mail import Message
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
|
experiment.py
|
from abc import ABC, abstractmethod
import logging
import time
import catkit.util
from catkit.testbed import devices
from catkit import datalogging
from catkit.multiprocessing import Process
class SafetyTest(ABC):
name = None
warning = False
@abstractmethod
def check(self):
"""Implement to return two values: boolean for pass/fail, and a string for status message."""
class SafetyException(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
class Experiment(ABC):
"""
Abstract base class that instills safety monitoring into any class that inherits it. Subclasses
need to implement a function called "experiment()", which is designated as an abstractmethod here.
"""
name = None
log = logging.getLogger(__name__)
data_log = datalogging.get_logger(__name__)
def __del__(self):
self.clear_cache()
def __init__(self, safety_tests, safety_check_interval=60, output_path=None, suffix=None):
""" Initialize attributes common to all Experiments.
All child classes should implement their own __init__ and call this via super()
Parameters
----------
safety_tests : list
List of SafetyTest class defs, not already instantiated objects (nothing else should own these).
safety_check_interval : int, float, optional:
Time interval between calling each SafetyTest.chceck().
output_path: str, optional
Output directory to write all files to (or to subdirectories thereof).
For the vast majority of use cases this should be left as None, in which
case it will be auto-generated based on date-time + suffix.
suffix : str, optional
Descriptive string to include as part of the path.
"""
# Default is to wait to set the path until the experiment starts (rather than the constructor)
# but users can optionally pass in a specific path if they want to do something different in a
# particular case.
self.output_path = output_path
self.suffix = suffix
self.pre_experiment_return = None
self.experiment_return = None
self.post_experiment_return = None
self.safety_check_interval = safety_check_interval
self.safety_tests = []
for test in safety_tests:
self.safety_tests.append(test())
def pre_experiment(self, *args, **kwargs):
""" This is called immediately BEFORE self.experiment()."""
pass
@abstractmethod
def experiment(self, *args, **kwargs):
""" This is where the experiment gets implemented. All child classes must implement this. """
def post_experiment(self, *args, **kwargs):
""" This is called immediately AFTER self.experiment()."""
pass
def start(self):
"""
This function starts the experiment on a separate process and monitors power and humidity while active.
Do not override.
"""
experiment_process = None
try:
self.log.info("Running safety tests...")
# Check tests before starting experiment.
for safety_test in self.safety_tests:
status, msg = safety_test.check()
# msg may have a newline in it; if so split that into separate log messages
for msg_line in msg.split("\n"):
self.log.info(msg_line)
if not status:
errmessage = safety_test.name + " reports unsafe conditions. Aborting experiment before start... Details: {}".format(msg)
print(errmessage)
self.log.critical(errmessage)
raise SafetyException(errmessage)
self.log.info("Safety tests passed!")
# Initialize experiment output path. Do this here so the output path is available in the parent process
self.init_experiment_path()
self.log.info("Creating separate process to run experiment...")
# Spin off and start the process to run the experiment.
experiment_process = Process(target=self.run_experiment, name=self.name)
experiment_process.start()
self.log.info(self.name + " process started")
while experiment_process.is_alive():
for safety_test in self.safety_tests:
status, message = safety_test.check()
if status:
# Check passed, clear any warning that might be set and proceed to sleep until next iteration.
for msg_line in message.split("\n"):
self.log.info(msg_line)
safety_test.warning = False
elif safety_test.warning:
# Shut down the experiment (but allow context managers to exit properly).
errmessage = safety_test.name + " reports unsafe conditions repeatedly. Aborting experiment! Details: {}".format(msg)
self.log.critical(errmessage)
catkit.util.soft_kill(experiment_process)
raise SafetyException(errmessage)
else:
errmessage = (message + "\n" + "Warning issued for " + safety_test.name +
". Experiment will be softly killed if safety check fails again.")
self.log.warning(errmessage)
safety_test.warning = True
# Sleep until it is time to check safety again.
if not self.__smart_sleep(self.safety_check_interval, experiment_process):
# Experiment ended before the next check interval, exit the while loop.
break
self.log.info("Experment ended before check interval; exiting.")
except KeyboardInterrupt:
self.log.exception("Parent process: caught ctrl-c, raising exception.")
raise
except SafetyException:
self.log.exception("Safety exception.")
raise
except Exception as e:
safety_exception = SafetyException("Monitoring process caught an unexpected problem: ", e)
self.log.exception(safety_exception)
# Shut down the experiment (but allow context managers to exit properly).
if experiment_process is not None and experiment_process.is_alive():
catkit.util.soft_kill(experiment_process)
# must return SafetyException type specifically to signal queue to stop in typical calling scripts
raise safety_exception
# Explicitly join even though experiment_process.is_alive() will call join() if it's no longer alive.
experiment_process.join() # This will raise any child exceptions.
def run_experiment(self):
"""
Wrapper for experiment to catch the softkill function's KeyboardInterrupt signal more gracefully.
Do not override.
"""
data_log_writer = None
# NOTE: This try/finally IS THE context manager for any cache cleared by self.clear_cache().
try:
self.init_experiment_log()
# Set up data log writer
data_log_writer = datalogging.DataLogWriter(self.output_path)
datalogging.DataLogger.add_writer(data_log_writer)
# De-restrict device cache access.
global devices
with devices:
# Allow pre_experiment() to cache some devices to test for persistence (dev assertions only).
self._persistence_checker = {}
# Run pre-experiment code, e.g., open devices, run calibrations, etc.
self.pre_experiment_return = self.pre_experiment()
# Assert some devices remained opened.
for device in self._persistence_checker.values():
assert device.instrument # Explicit test that instrument remained open.
# Run the core experiment.
self.experiment_return = self.experiment()
# Run any post-experiment analysis, etc.
self.post_experiment_return = self.post_experiment()
except KeyboardInterrupt:
self.log.warning("Child process: caught ctrl-c, raising exception.")
raise
except Exception as error:
self.log.exception(error)
raise
finally:
self.clear_cache()
# Release data log writer
if data_log_writer:
datalogging.DataLogger.remove_writer(data_log_writer)
data_log_writer.close()
@abstractmethod
def clear_cache(self):
""" Injection layer for deleting any global caches. """
pass
@staticmethod
def __smart_sleep(interval, process):
"""
Sleep function that will return false at most 1 second after a process ends. It sleeps in 1 second increments
and checks if the process is alive each time. Rather than sleeping for the entire interval. This allows
the master script to end when the experiment is finished.
Do not override.
:param interval: check_interval from ini.
:param process: experiment process to monitor while sleeping.
:return: True if monitoring should continue, False if the experiment is done.
"""
sleep_count = 0
while process.is_alive():
time.sleep(1)
sleep_count += 1
if sleep_count == interval:
return True
return False
@abstractmethod
def init_experiment_path(self):
""" Set up experiment output. Called from start() prior to experiment(). """
pass
@abstractmethod
def init_experiment_log(self):
""" Initialize log writing. Called from run_experiment() prior to experiment(). """
pass
|
emergency_stop.py
|
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import os
import sys
import threading
from actionlib_msgs.msg import GoalID
from std_msgs.msg import Bool
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
from airbus_cobot_gui.alarm import Alarm
from airbus_cobot_gui.res import R
## @class EmergencyStopState
class EmergencyStopState:
LOCKED = True
UNLOCKED = False
## @class EmergencyStopButton
class EmergencyStopButton(QPushButton):
EMERGENCY_STOP_TOPIC_NAME = rospy.get_param('emergency_stop_topic_name','/emergency_stop/state')
def __init__(self, context):
"""! The constructor."""
QPushButton.__init__(self)
self._context = context
self._context.addLanguageEventListner(self.onTranslate)
self._context.addCloseEventListner(self.onDestroy)
self.setCheckable(True)
self.setFocusPolicy(Qt.NoFocus)
self.setStyleSheet(R.values.styles.transparent_background)
self.setIcon(R.getIconById("icon_pause"))
self.setIconSize(QSize(80,80))
self._button_state = EmergencyStopState.UNLOCKED
self._keep_running = True
self.connect(self,SIGNAL('clicked(bool)'),self._trigger_button)
self._estop_pub = rospy.Publisher(self.EMERGENCY_STOP_TOPIC_NAME,
Bool, latch=True, queue_size=1)
self._preempt_move_base_pub = rospy.Publisher("/move_base/cancel", GoalID, queue_size=1)
self._estop_pub_thread = threading.Thread(name='emergency_stop_publisher_loop',
target=self._emergency_stop_publisher_loop)
self._estop_pub_thread.start()
def _trigger_button(self, checked):
"""Called when user click on ermergency stop button.
@param checked: Button status (True/False).
@type checked: bool.
"""
self._context.resquestEmergencyStop(checked)
self._button_state = checked
if checked == EmergencyStopState.LOCKED:
lng = self._context.getLanguage()
self._context.sendAlarm(Alarm.CRITICAL, R.values.strings.emergency_stop(lng))
self.setIcon(R.getIconById("icon_play"))
else:
self.setIcon(R.getIconById("icon_pause"))
def _emergency_stop_publisher_loop(self):
"""Loop to publish the emergency stop status."""
r = rospy.Rate(10) # 10hz
while not rospy.is_shutdown() and self._keep_running:
if self._button_state == EmergencyStopState.UNLOCKED:
self._estop_pub.publish(Bool(True))
else:
self._preempt_move_base_pub.publish(GoalID())
self._estop_pub.publish(Bool(False))
r.sleep()
def onTranslate(self, lng):
pass
def onDestroy(self):
"""Called when appli closes."""
self._keep_running = False
##Unittest
if __name__ == "__main__":
from airbus_cobot_gui.context import Context
rospy.init_node('unittest_emergency_stop_2')
a = QApplication(sys.argv)
utt_appli = QMainWindow()
context = Context(utt_appli)
context.requestNewLanguage('fr')
estop = EmergencyStopButton(context)
estop.setIconSize(QSize(80,80))
utt_appli.setCentralWidget(estop)
utt_appli.show()
a.exec_()
estop.onDestroy()
#End of file
|
master.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2014-6-12
@author: chine
'''
import os
import time
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
import tempfile
import shutil
from cola.functions.counter import CounterServer
from cola.functions.budget import BudgetApplyServer, ALLFINISHED
from cola.functions.speed import SpeedControlServer
from cola.cluster.tracker import WorkerTracker, JobTracker
from cola.cluster.stage import Stage
from cola.core.rpc import FileTransportServer, FileTransportClient, \
client_call
from cola.core.zip import ZipHandler
from cola.core.utils import import_job_desc
from cola.core.logs import get_logger, LogRecordSocketReceiver
RUNNING, HANGUP, STOPPED = range(3)
STATUSES = ['RUNNING', 'HANGUP', 'STOPPED']
CONTINOUS_HEARTBEAT = 90
HEARTBEAT_INTERVAL = 20
HEARTBEAT_CHECK_INTERVAL = 3*HEARTBEAT_INTERVAL
JOB_CHECK_INTERVAL = 5
JOB_META_STATUS_FILENAME = 'job.meta.status'
class JobMaster(object):
def __init__(self, ctx, job_name, job_desc, workers):
self.working_dir = os.path.join(ctx.working_dir, 'master',
'tracker', job_name)
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self.job_name = job_name
self.job_desc = job_desc
self.settings = job_desc.settings
self.rpc_server = ctx.master_rpc_server
self.inited = False
self.lock = threading.Lock()
self.init()
self.workers = workers
def _init_counter_server(self):
counter_dir = os.path.join(self.working_dir, 'counter')
self.counter_server = CounterServer(counter_dir, self.settings,
rpc_server=self.rpc_server,
app_name=self.job_name)
def _init_budget_server(self):
budget_dir = os.path.join(self.working_dir, 'budget')
self.budget_server = BudgetApplyServer(budget_dir, self.settings,
rpc_server=self.rpc_server,
app_name=self.job_name)
def _init_speed_server(self):
speed_dir = os.path.join(self.working_dir, 'speed')
self.speed_server = SpeedControlServer(speed_dir, self.settings,
rpc_server=self.rpc_server,
app_name=self.job_name)
def init(self):
with self.lock:
if self.inited:
return
self._init_counter_server()
self._init_budget_server()
self._init_speed_server()
self.inited = True
def remove_worker(self, worker):
if worker not in self.workers:
return
# rpc call the other workers to remove this worker
self.workers.remove(worker)
for node in self.workers:
client_call(node, 'remove_node', worker)
def add_worker(self, worker):
if worker in self.workers:
return
# rpc call the other workers to add this worker
for node in self.workers:
client_call(node, 'add_node', worker)
self.workers.append(worker)
def has_worker(self, worker):
return worker in self.workers
def shutdown(self):
with self.lock:
if not self.inited:
return
self.counter_server.shutdown()
self.budget_server.shutdown()
self.speed_server.shutdown()
self.inited = False
class Master(object):
def __init__(self, ctx):
self.ctx = ctx
self.rpc_server = self.ctx.master_rpc_server
assert self.rpc_server is not None
self.working_dir = os.path.join(self.ctx.working_dir, 'master')
self.zip_dir = os.path.join(self.working_dir, 'zip')
self.job_dir = os.path.join(self.working_dir, 'jobs')
if not os.path.exists(self.zip_dir):
os.makedirs(self.zip_dir)
if not os.path.exists(self.job_dir):
os.makedirs(self.job_dir)
self.worker_tracker = WorkerTracker()
self.job_tracker = JobTracker()
self.black_list = []
self.stopped = threading.Event()
self.logger = get_logger("cola_master")
self._init_log_server(self.logger)
self._register_rpc()
self.load()
FileTransportServer(self.rpc_server, self.zip_dir)
def load(self):
self.runned_job_metas = {}
job_meta_file = os.path.join(self.working_dir, JOB_META_STATUS_FILENAME)
if os.path.exists(job_meta_file) and \
os.path.getsize(job_meta_file) > 0:
try:
with open(job_meta_file) as f:
self.runned_job_metas = pickle.load(f)
except:
pass
def save(self):
job_meta_file = os.path.join(self.working_dir, JOB_META_STATUS_FILENAME)
with open(job_meta_file, 'w') as f:
pickle.dump(self.runned_job_metas, f)
def _register_rpc(self):
self.rpc_server.register_function(self.run_job, 'run_job')
self.rpc_server.register_function(self.stop_job, 'stop_job')
self.rpc_server.register_function(self.pack_job_error, 'pack_job_error')
self.rpc_server.register_function(self.list_runnable_jobs,
'runnable_jobs')
self.rpc_server.register_function(lambda: self.job_tracker.running_jobs.keys(),
'running_jobs')
self.rpc_server.register_function(self.list_workers,
'list_workers')
self.rpc_server.register_function(self.shutdown, 'shutdown')
self.rpc_server.register_function(self.register_heartbeat,
'register_heartbeat')
def register_heartbeat(self, worker):
need_to_add_worker_nodes = self.worker_tracker.register_worker(worker)
if need_to_add_worker_nodes is not None:
for node in need_to_add_worker_nodes:
client_call(node, 'add_node', worker)
return self.worker_tracker.workers.keys()
def _init_log_server(self, logger):
self.log_server = LogRecordSocketReceiver(host=self.ctx.ip,
logger=self.logger)
self.log_t = threading.Thread(target=self.log_server.serve_forever)
self.log_t.start()
def _shutdown_log_server(self):
if hasattr(self, 'log_server'):
self.log_server.shutdown()
self.log_t.join()
def _check_workers(self):
while not self.stopped.is_set():
for worker, info in self.worker_tracker.workers.iteritems():
# if loose connection
if int(time.time()) - info.last_update \
> HEARTBEAT_CHECK_INTERVAL:
info.continous_register = 0
if info.status == RUNNING:
info.status = HANGUP
elif info.status == HANGUP:
info.status = STOPPED
self.black_list.append(worker)
for job in self.job_tracker.running_jobs:
self.job_tracker.remove_worker(job, worker)
# if continously connect for more than 10 min
elif info.continous_register >= CONTINOUS_HEARTBEAT:
if info.status != RUNNING:
info.status = RUNNING
if worker in self.black_list:
self.black_list.remove(worker)
for job in self.job_tracker.running_jobs:
if not client_call(worker, 'has_job'):
client_call(worker, 'prepare', job)
client_call(worker, 'run_job', job)
self.job_tracker.add_worker(job, worker)
self.stopped.wait(HEARTBEAT_CHECK_INTERVAL)
def _check_jobs(self):
while not self.stopped.is_set():
for job_master in self.job_tracker.running_jobs.values():
if job_master.budget_server.get_status() == ALLFINISHED:
self.stop_job(job_master.job_name)
self.job_tracker.remove_job(job_master.job_name)
self.stopped.wait(JOB_CHECK_INTERVAL)
def _unzip(self, job_name):
zip_file = os.path.join(self.zip_dir, job_name+'.zip')
job_path = os.path.join(self.job_dir, job_name)
if os.path.exists(job_path):
shutil.rmtree(job_path)
if os.path.exists(zip_file):
ZipHandler.uncompress(zip_file, self.job_dir)
def _register_runned_job(self, job_name, job_desc):
self.runned_job_metas[job_name] = {'job_name': job_desc.name,
'created': time.time()}
def run(self):
self._worker_t = threading.Thread(target=self._check_workers)
self._worker_t.start()
self._job_t = threading.Thread(target=self._check_jobs)
self._job_t.start()
def run_job(self, job_name, unzip=False,
wait_for_workers=False):
if wait_for_workers:
while not self.stopped.is_set():
if len(self.worker_tracker.workers) > 0:
break
stopped = self.stopped.wait(3)
if stopped:
return
if unzip:
self._unzip(job_name)
job_path = os.path.join(self.job_dir, job_name)
job_desc = import_job_desc(job_path)
job_master = JobMaster(self.ctx, job_name, job_desc,
self.worker_tracker.workers.keys())
job_master.init()
self.job_tracker.register_job(job_name, job_master)
self._register_runned_job(job_name, job_desc)
zip_file = os.path.join(self.zip_dir, job_name+'.zip')
for worker in job_master.workers:
FileTransportClient(worker, zip_file).send_file()
self.logger.debug(
'entering the master prepare stage, job id: %s' % job_name)
self.logger.debug(
'job available workers: %s' % job_master.workers)
stage = Stage(job_master.workers, 'prepare', logger=self.logger)
prepared_ok = stage.barrier(True, job_name)
if not prepared_ok:
self.logger.error("prepare for running failed")
return
self.logger.debug(
'entering the master run_job stage, job id: %s' % job_name)
stage = Stage(job_master.workers, 'run_job', logger=self.logger)
run_ok = stage.barrier(True, job_name)
if not run_ok:
self.logger.error("run job failed, job id: %s" % job_name)
def stop_job(self, job_name):
job_master = self.job_tracker.get_job_master(job_name)
stage = Stage(job_master.workers, 'stop_job')
stage.barrier(True, job_name)
stage = Stage(job_master.workers, 'clear_job')
stage.barrier(True, job_name)
self.job_tracker.remove_job(job_name)
self.logger.debug('stop job: %s' % job_name)
def pack_job_error(self, job_name):
job_master = self.job_tracker.get_job_master(job_name)
stage = Stage(job_master.workers, 'pack_job_error')
stage.barrier(True, job_name)
error_dir = os.path.join(self.working_dir, 'errors')
if not os.path.exists(error_dir):
os.makedirs(error_dir)
error_filename = os.path.join(error_dir, '%s_errors.zip'%job_name)
suffix = '%s_errors.zip' % job_name
temp_dir = tempfile.mkdtemp()
try:
for name in os.listdir(self.zip_dir):
if name.endswith(suffix):
shutil.move(os.path.join(self.zip_dir, name), temp_dir)
ZipHandler.compress(error_filename, temp_dir)
finally:
shutil.rmtree(temp_dir)
return error_filename
def list_runnable_jobs(self):
job_dirs = filter(lambda s: os.path.isdir(os.path.join(self.job_dir, s)),
os.listdir(self.job_dir))
jobs = {}
for job_dir in job_dirs:
desc = import_job_desc(os.path.join(self.job_dir, job_dir))
jobs[job_dir] = desc.name
return jobs
def has_running_jobs(self):
return len(self.job_tracker.running_jobs) > 0
def list_workers(self):
return [(worker, STATUSES[worker_info.status]) for worker, worker_info \
in self.worker_tracker.workers.iteritems()]
def _stop_all_jobs(self):
for job_name in self.job_tracker.running_jobs.keys():
self.stop_job(job_name)
def _shutdown_all_workers(self):
stage = Stage(self.worker_tracker.workers.keys(), 'shutdown')
stage.barrier(True)
def shutdown(self):
if not hasattr(self, '_worker_t'):
return
if not hasattr(self, '_job_t'):
return
self.logger.debug('master starts to shutdown')
self.stopped.set()
self._stop_all_jobs()
self._shutdown_all_workers()
self._worker_t.join()
self._job_t.join()
self.save()
self.rpc_server.shutdown()
self.logger.debug('master shutdown finished')
self._shutdown_log_server()
|
beam_worker_pool_service.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import atexit
import functools
import logging
import os
import sys
import threading
import traceback
import grpc
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import ProfilingOptions
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import endpoints_pb2
from apache_beam.portability.api.beam_provision_api_pb2 import GetProvisionInfoRequest
from apache_beam.portability.api.beam_provision_api_pb2_grpc import ProvisionServiceStub
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.utils import thread_pool_executor, profiler
from google.protobuf import json_format
from pyflink.fn_execution.beam import beam_sdk_worker_main # noqa # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
class BeamFnLoopbackWorkerPoolServicer(beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolServicer):
"""
Worker pool entry point.
The worker pool exposes an RPC service that is used in MiniCluster to start and stop the Python
SDK workers.
The worker pool uses child thread for parallelism
"""
def __init__(self):
self._worker_server = None
self._parse_param_lock = threading.Lock()
def start(self):
worker_server = grpc.server(
thread_pool_executor.shared_unbounded_instance())
worker_address = 'localhost:%s' % worker_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnExternalWorkerPoolServicer_to_server(self, worker_server)
worker_server.start()
_LOGGER.info('Listening for workers at %s', worker_address)
self._worker_server = worker_server
atexit.register(functools.partial(worker_server.stop, 1))
return worker_address
def StartWorker(self,
start_worker_request: beam_fn_api_pb2.StartWorkerRequest,
unused_context):
try:
worker_thread = threading.Thread(
name='run_worker_%s' % start_worker_request.worker_id,
target=functools.partial(self._start_sdk_worker_main, start_worker_request))
worker_thread.daemon = True
worker_thread.start()
return beam_fn_api_pb2.StartWorkerResponse()
except Exception:
return beam_fn_api_pb2.StartWorkerResponse(error=traceback.format_exc())
def StopWorker(self,
stop_worker_request: beam_fn_api_pb2.StopWorkerRequest,
unused_context):
pass
def _start_sdk_worker_main(self, start_worker_request: beam_fn_api_pb2.StartWorkerRequest):
params = start_worker_request.params
self._parse_param_lock.acquire()
if 'PYTHONPATH' in params:
python_path_list = params['PYTHONPATH'].split(':')
python_path_list.reverse()
for path in python_path_list:
sys.path.insert(0, path)
if '_PYTHON_WORKING_DIR' in params:
os.chdir(params['_PYTHON_WORKING_DIR'])
os.environ.update(params)
self._parse_param_lock.release()
# read job information from provision stub
metadata = [("worker_id", start_worker_request.worker_id)]
provision_endpoint = start_worker_request.provision_endpoint.url
with grpc.insecure_channel(provision_endpoint) as channel:
client = ProvisionServiceStub(channel=channel)
info = client.GetProvisionInfo(GetProvisionInfoRequest(), metadata=metadata).info
options = json_format.MessageToJson(info.pipeline_options)
logging_endpoint = info.logging_endpoint.url
control_endpoint = info.control_endpoint.url
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor(url=logging_endpoint)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger().addHandler(fn_log_handler)
except Exception:
_LOGGER.error(
"Failed to set up logging handler, continuing without.",
exc_info=True)
fn_log_handler = None
sdk_pipeline_options = sdk_worker_main._parse_pipeline_options(options)
_worker_id = start_worker_request.worker_id
try:
control_service_descriptor = endpoints_pb2.ApiServiceDescriptor(url=control_endpoint)
status_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
experiments = sdk_pipeline_options.view_as(DebugOptions).experiments or []
enable_heap_dump = 'enable_heap_dump' in experiments
SdkHarness(
control_address=control_service_descriptor.url,
status_address=status_service_descriptor.url,
worker_id=_worker_id,
state_cache_size=sdk_worker_main._get_state_cache_size(experiments),
data_buffer_time_limit_ms=sdk_worker_main._get_data_buffer_time_limit_ms(
experiments),
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(ProfilingOptions)),
enable_heap_dump=enable_heap_dump).run()
except: # pylint: disable=broad-except
_LOGGER.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
|
run_all.py
|
"""Run all test cases.
Run each file in a separate process to avoid GPU memory conflicts.
Usages:
python3 run_all.py
python3 run_all.py --filter pipeline
python3 run_all.py --filter auto_sharding
"""
import argparse
import glob
import multiprocessing
import os
import time
from typing import Sequence
import unittest
slow_testcases = set([
"test_pipeline_stage_construction.py",
])
def run_unittest_files(files, args):
"""Run unit test files one by one in separates processes."""
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = str(args.xla_client_mem_fraction)
for filename in files:
if not filename.startswith("test"):
continue
if args.filter is not None and args.filter not in filename:
continue
if not args.enable_slow_tests and filename in slow_testcases:
continue
def func():
ret = unittest.main(module=None, argv=["", "-vb"] + [filename])
p = multiprocessing.Process(target=func)
p.start()
p.join()
if p.exitcode != 0:
return False
return True
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"--filter",
type=str,
default=None,
help="Run test cases whose names contain the filter string")
arg_parser.add_argument(
"--enable_slow_tests",
action="store_true",
help="Run test cases including profiling, which takes a long time")
arg_parser.add_argument(
"--xla_client_mem_fraction",
type=float,
default=0.2,
help="The fraction of GPU memory used to run unit tests")
args = arg_parser.parse_args()
files = glob.glob("*.py")
files.sort()
tic = time.time()
success = run_unittest_files(files, args)
if success:
print(f"Success. Time elapsed: {time.time() - tic:.2f}s")
else:
print(f"Fail. Time elapsed: {time.time() - tic:.2f}s")
exit(0 if success else -1)
|
stream_create_and_transcribe.py
|
#!/usr/bin/env python
# Copyright 2021 Hyperia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from hyperia import Hyperia
import json
import sys
import websocket
import time
import threading
import json
import requests
def sender_thread(ws, file):
print("Starting sender loop.")
bytes = file.read(640)
while bytes:
ws.send_binary(bytes)
bytes = file.read(640)
time.sleep(0.02)
print("Finished sending")
def receiver_thread(ws):
print("Starting receiver loop.")
while True:
message = ws.recv()
print(message)
print("")
print("Receiver exiting")
def open_websockets(socket_id, audio_socket, event_socket, file_path):
print(f"Connencting to socket {socket_id}")
ws_send = websocket.WebSocket()
socket_url = audio_socket
print(socket_url)
ws_send.connect(socket_url)
ws_recv = websocket.WebSocket()
socket_url = event_socket
print(socket_url)
ws_recv.connect(socket_url)
print("Connected..")
file = open(file_path, "rb")
send_thread = threading.Thread(target=sender_thread, args=(ws_send, file))
recv_thread = threading.Thread(target=receiver_thread, args=(ws_recv,))
print("Starting receiver.")
recv_thread.start()
print("Starting sender.")
send_thread.start()
send_thread.join()
if len(sys.argv) < 2:
print("Error! Usage: stream_create_and_transcribe.py <FILENAME")
exit(-1)
file_path = sys.argv[1]
# Create the Hyperia Object
hyperia = Hyperia()
print('')
print('')
print('############################################')
print('# Create Streaming ASR Example #')
print('############################################')
print('')
print('')
response = hyperia.stream_create()
print('## Response Object ##')
print(json.dumps(response, indent=4))
print('')
print('')
print('')
stream_id = response['result']['stream_id']
print(f"Created stream {stream_id}")
time.sleep(1)
audio_socket = response['result']['audio_socket']
event_socket = response['result']['event_socket']
open_websockets(stream_id, audio_socket, event_socket, file_path)
|
rocket.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from multiprocessing import Process, Queue
import time
class Rocket(object):
"""
Flies around
"""
home_departure_template = u"Departing to {}"
target_approach_template = u"Approaching {}"
target_landed_template = u"Landed on {}"
target_departure_template = u"Now flying back to {}"
home_approach_template = u"Approaching {}"
home_landed_template = u"Landed, ready for next mission"
explore_begin_template = u"Exploring {}, this is interesting"
explore_begin_file = "http://www.dummysoftware.com/mars/lander.jpg"
explore_end_template = u"End of the exploration"
blast_begin_template = u"Blasting {}, nobody will survive"
blast_end_template = u"{} has been entirely blasted"
blast_end_file = "http://blogs.discovermagazine.com/badastronomy/files/2012/07/nuke_castleromeo.jpg"
counter = 0
def __init__(self, bot, inbox=None):
"""
Flies around
:param bot: the bot associated to this rocket
:type bot: ShellBot
:param inbox: queue to get commands
:type inbox: Queue
"""
self.bot = bot
self.inbox = inbox if inbox else Queue()
def go(self, action, planet):
"""
Engages a new mission
"""
if not self.bot.recall('rocket.busy', False):
self.bot.say(u"Ok, working on it")
else:
self.bot.say(u"Ok, will work on it as soon as possible")
self.inbox.put((action, planet))
def start(self):
"""
Starts the working process
:return: either the process that has been started, or None
This function starts a separate daemonic process to work
in the background.
"""
p = Process(target=self.run)
p.daemon = True
p.start()
return p
def run(self):
"""
Continuously processes commands
This function is looping on items received from the queue, and
is handling them one by one in the background.
Processing should be handled in the background, like
in the following example::
rocket = Rocket(bot=my_bot)
handle = rocket.start()
...
handle.join()
The recommended way for stopping the process is to change the
parameter ``general.switch`` in the context. For example::
engine.set('general.switch', 'off')
Alternatively, the loop is also broken when an exception is pushed
to the queue. For example::
inbox.put(None)
"""
logging.info(u"Starting rocket")
self.counter = 0
self.bot.remember('rocket.busy', False)
try:
while self.bot.engine.get('general.switch', 'on') == 'on':
if self.inbox.empty():
time.sleep(0.005)
continue
try:
item = self.inbox.get(True, 0.1)
if item is None:
break
self.bot.remember('rocket.busy', True)
self.process(item)
self.bot.remember('rocket.busy', False)
except Exception as feedback:
logging.exception(feedback)
except KeyboardInterrupt:
pass
finally:
logging.info(u"Rocket has been stopped")
def process(self, item):
"""
Processes one action
:param item: the action to perform
:type item: list or tuple
Example actions::
rocket.process(item=('explore', 'Venus'))
rocket.process(item=('blast', 'Mars'))
"""
(verb, planet) = item
assert verb in ('explore', 'blast')
planet = planet.capitalize()
logging.debug(u"Rocket is working on '{} {}'".format(verb, planet))
items = self.bot.recall('planets', [])
if planet not in items:
self.bot.say(u"Planet '{}' is unknown".format(planet))
return
self.counter += 1
self.on_home_departure(planet)
self.on_target_approach(planet)
self.on_target_landing(planet)
if verb == 'blast':
self.on_target_blast(planet)
else:
self.on_target_explore(planet)
self.on_target_departure('Earth')
self.on_home_approach('Mother Earth')
self.on_home_landing('Mother Earth')
def on_home_departure(self, planet, duration=9):
self.bot.say(u"#{} - ".format(self.counter)
+ self.home_departure_template.format(planet))
time.sleep(duration)
def on_target_approach(self, planet, duration=3):
self.bot.say(u"#{} - ".format(self.counter)
+ self.target_approach_template.format(planet))
time.sleep(duration)
def on_target_landing(self, planet, duration=1):
self.bot.say(u"#{} - ".format(self.counter)
+ self.target_landed_template.format(planet))
time.sleep(duration)
def on_target_explore(self, planet, duration=2):
self.bot.say(u"#{} - ".format(self.counter)
+ self.explore_begin_template.format(planet),
file=self.explore_begin_file)
time.sleep(duration)
self.bot.say(u"#{} - ".format(self.counter)
+ self.explore_end_template.format(planet))
time.sleep(1)
def on_target_blast(self, planet, duration=2):
self.bot.say(u"#{} - ".format(self.counter)
+ self.blast_begin_template.format(planet))
time.sleep(duration)
items = self.bot.recall('planets', [])
items.remove(planet)
self.bot.remember('planets', items)
self.bot.say(u"#{} - ".format(self.counter)
+ self.blast_end_template.format(planet),
file=self.blast_end_file)
time.sleep(1)
def on_target_departure(self, planet, duration=9):
self.bot.say(u"#{} - ".format(self.counter)
+ self.target_departure_template.format(planet))
time.sleep(duration)
def on_home_approach(self, planet, duration=3):
self.bot.say(u"#{} - ".format(self.counter)
+ self.home_approach_template.format(planet))
time.sleep(duration)
def on_home_landing(self, planet, duration=1):
self.bot.say(u"#{} - ".format(self.counter)
+ self.home_landed_template.format(planet))
time.sleep(duration)
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
import traceback
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial or i.get('serial_pty', None):
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None) or self.suite.west_runner
serial_pty = hardware.get('serial_pty', None)
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(serial_pty, stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware['serial']
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join()
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb", "nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "mdb":
if not find_executable("mdb"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
else:
ldflags = cflags = aflags = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
self.suite.build_filtered_tests += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_to_do))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_to_do,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_to_do) * 100),
Fore.YELLOW if self.suite.build_filtered_tests > 0 else Fore.RESET,
self.suite.build_filtered_tests,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_skipped_cases = 0 # number of skipped test cases
self.total_to_do = 0 # number of test instances to be run
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.build_filtered_tests = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
# run integration tests only
self.integration = False
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self):
self.total_tests = len(self.instances)
self.total_cases = 0
self.total_skipped = 0
self.total_skipped_cases = 0
self.total_passed = 0
for instance in self.instances.values():
self.total_cases += len(instance.testcase.cases)
if instance.status == 'skipped':
self.total_skipped += 1
self.total_skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
self.total_passed += 1
for res in instance.results.values():
if res == 'SKIP':
self.total_skipped_cases += 1
self.total_to_do = self.total_tests - self.total_skipped
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases - self.total_skipped_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run - self.total_skipped}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = discards.get(instance, "Not a default test platform")
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception:' % (test,))
for line in traceback.format_exc().splitlines():
logger.error(line)
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and not instance.build_only:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
logger.error(f"Unknown status {instance.status}")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(instance.build_only and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (instance.build_only and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' \
or (instance.build_only and instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform, pre_script, is_pty):
device = {
"serial": None,
"platform": platform,
"serial_pty": None,
"counter": 0,
"available": True,
"connected": True,
"pre_script": pre_script
}
if is_pty:
device['serial_pty'] = serial
else:
device['serial'] = serial
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
utils.py
|
#----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import division
import os
import sys
import numpy as np
from six import text_type, binary_type, integer_types
import mmdnn.conversion.common.IR.graph_pb2 as graph_pb2
__all__ = ["assign_IRnode_values", "convert_onnx_pad_to_tf", 'convert_tf_pad_to_onnx',
'compute_tf_same_padding', 'is_valid_padding', 'download_file',
'shape_to_list', 'list_to_shape']
def assign_attr_value(attr, val):
from mmdnn.conversion.common.IR.graph_pb2 import TensorShape
'''Assign value to AttrValue proto according to data type.'''
if isinstance(val, bool):
attr.b = val
elif isinstance(val, integer_types):
attr.i = val
elif isinstance(val, float):
attr.f = val
elif isinstance(val, binary_type) or isinstance(val, text_type):
if hasattr(val, 'encode'):
val = val.encode()
attr.s = val
elif isinstance(val, TensorShape):
attr.shape.MergeFromString(val.SerializeToString())
elif isinstance(val, list):
if not val: return
if isinstance(val[0], integer_types):
attr.list.i.extend(val)
elif isinstance(val[0], TensorShape):
attr.list.shape.extend(val)
elif isinstance(val[0], float):
attr.list.f.extend(val)
else:
raise NotImplementedError('AttrValue cannot be of list[{}].'.format(val[0]))
elif isinstance(val, np.ndarray):
assign_attr_value(attr, val.tolist())
else:
raise NotImplementedError('AttrValue cannot be of %s' % type(val))
def assign_IRnode_values(IR_node, val_dict):
for name, val in val_dict.items():
assign_attr_value(IR_node.attr[name], val)
# For padding
def convert_tf_pad_to_onnx(pads):
pads = np.reshape(pads, -1).tolist()
dims = len(pads)
assert dims % 2 == 0
ret = []
for idx in range(0, dims, 2):
ret.append(pads[idx])
for idx in range(1, dims, 2):
ret.append(pads[idx])
return ret
def convert_onnx_pad_to_tf(pads):
return np.transpose(np.array(pads).reshape([2, -1])).reshape(-1, 2).tolist()
def is_valid_padding(pads):
return sum(np.reshape(pads, -1)) == 0
def shape_to_list(shape):
return [dim.size for dim in shape.dim]
def list_to_shape(shape):
ret = graph_pb2.TensorShape()
for dim in shape:
new_dim = ret.dim.add()
new_dim.size = dim
return ret
def compute_tf_same_padding(source_node, input_shape, kernel_shape, strides, data_format='NHWC'):
""" Convert [SAME] padding in tensorflow, keras to onnx pads,
i.e. [x1_begin, x2_begin...x1_end, x2_end,...] """
# print (input_shape)
# print (kernel_shape)
# print (strides)
if data_format.startswith('NC'):
# Not tested
input_shape = input_shape[2:]
remove_dim = len(strides) - len(input_shape)
if remove_dim > 0:
strides = strides[remove_dim::]
else:
input_shape = input_shape[1:-1]
remove_dim = len(input_shape) - len(strides) + 1
if remove_dim < 0:
strides = strides[1:remove_dim]
# print (input_shape)
# print (kernel_shape)
# print (strides)
up_list = [0]
down_list = [0]
if source_node.type == 'Conv2DBackpropInput':
up_list += [0, 0]
down_list += [0, 0]
else:
for idx in range(0, len(input_shape)):
# kernel_shape[idx] = (kernel_shape[idx] - 1) * dilation_rate + 1
output_shape = (input_shape[idx] + strides[idx] - 1) // strides[idx]
this_padding = (output_shape - 1) * strides[idx] + kernel_shape[idx] - input_shape[idx]
this_padding = max(0, this_padding)
up_list.append(this_padding // 2)
down_list.append(this_padding - this_padding // 2)
# print ([0] + up_list + [0] + down_list if data_format.startswith('NC') else up_list + [0] + down_list + [0])
# print ('-----------------------------------------------------')
return [0] + up_list + [0] + down_list if data_format.startswith('NC') else up_list + [0] + down_list + [0]
# network library
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def _progress_check(count, block_size, total_size):
read_size = count * block_size
read_size_str = sizeof_fmt(read_size)
if total_size > 0:
percent = int(count * block_size * 100 / total_size)
percent = min(percent, 100)
sys.stdout.write("\rprogress: {} downloaded, {}%.".format(read_size_str, percent))
if read_size >= total_size:
sys.stdout.write("\n")
else:
sys.stdout.write("\rprogress: {} downloaded.".format(read_size_str))
sys.stdout.flush()
def _single_thread_download(url, file_name):
from six.moves import urllib
result, _ = urllib.request.urlretrieve(url, file_name, _progress_check)
return result
def _downloader(start, end, url, filename):
import requests
headers = {'Range': 'bytes=%d-%d' % (start, end)}
r = requests.get(url, headers=headers, stream=True)
with open(filename, "r+b") as fp:
fp.seek(start)
var = fp.tell()
fp.write(r.content)
def _multi_thread_download(url, file_name, file_size, thread_count):
import threading
fp = open(file_name, "wb")
fp.truncate(file_size)
fp.close()
part = file_size // thread_count
for i in range(thread_count):
start = part * i
if i == thread_count - 1:
end = file_size
else:
end = start + part
t = threading.Thread(target=_downloader, kwargs={'start': start, 'end': end, 'url': url, 'filename': file_name})
t.setDaemon(True)
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
return file_name
def download_file(url, directory='./', local_fname=None, force_write=False, auto_unzip=False, compre_type=''):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.isdir(directory):
os.mkdir(directory)
if not local_fname:
k = url.rfind('/')
local_fname = url[k + 1:]
local_fname = os.path.join(directory, local_fname)
if os.path.exists(local_fname) and not force_write:
print ("File [{}] existed!".format(local_fname))
return local_fname
else:
print ("Downloading file [{}] from [{}]".format(local_fname, url))
try:
import wget
ret = wget.download(url, local_fname)
print ("")
except:
ret = _single_thread_download(url, local_fname)
if auto_unzip:
if ret.endswith(".tar.gz") or ret.endswith(".tgz"):
try:
import tarfile
tar = tarfile.open(ret)
for name in tar.getnames():
if not (os.path.realpath(os.path.join(directory, name))+ os.sep).startswith(os.path.realpath(directory) + os.sep):
raise ValueError('The decompression path does not match the current path. For more info: https://docs.python.org/3/library/tarfile.html#tarfile.TarFile.extractall')
tar.extractall(directory)
tar.close()
except ValueError:
raise
except:
print("Unzip file [{}] failed.".format(ret))
elif ret.endswith('.zip'):
try:
import zipfile
zip_ref = zipfile.ZipFile(ret, 'r')
for name in zip_ref.namelist():
if not (os.path.realpath(os.path.join(directory, name))+ os.sep).startswith(os.path.realpath(directory) + os.sep):
raise ValueError('The decompression path does not match the current path. For more info: https://docs.python.org/3/library/zipfile.html?highlight=zipfile#zipfile.ZipFile.extractall')
zip_ref.extractall(directory)
zip_ref.close()
except ValueError:
raise
except:
print("Unzip file [{}] failed.".format(ret))
return ret
"""
r = requests.head(url)
try:
file_size = int(r.headers['content-length'])
return _multi_thread_download(url, local_fname, file_size, 5)
except:
# not support multi-threads download
return _single_thread_download(url, local_fname)
return result
"""
|
test_run_function.py
|
from ast import Assert
import unittest
import dolphindb as ddb
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_frame_equal, assert_series_equal
from setup import HOST, PORT, WORK_DIR, DATA_DIR
import time
import threading
class TestRunFunction(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
@classmethod
def tearDownClass(cls):
pass
def test_run_bool_scalar(self):
re = self.s.run("true")
self.assertEqual(re, True)
re = self.s.run("bool()")
self.assertIsNone(re, True)
def test_run_char_scalar(self):
re = self.s.run("'a'")
self.assertEqual(re, 97)
re = self.s.run("char()")
self.assertIsNone(re, True)
def test_run_short_scalar(self):
re = self.s.run("22h")
self.assertEqual(re, 22)
re = self.s.run("short()")
self.assertIsNone(re, True)
def test_run_int_scalar(self):
re = self.s.run("22")
self.assertEqual(re, 22)
re = self.s.run("int()")
self.assertIsNone(re, True)
def test_run_long_scalar(self):
re = self.s.run("22l")
self.assertEqual(re, 22)
re = self.s.run("long()")
self.assertIsNone(re, True)
def test_run_date_scalar(self):
re = self.s.run("2012.06.12")
self.assertEqual(re, np.datetime64("2012-06-12"))
re = self.s.run("date()")
self.assertIsNone(re, True)
def test_run_month_scalar(self):
re = self.s.run("2012.06M")
self.assertEqual(re, np.datetime64("2012-06"))
re = self.s.run("month()")
self.assertIsNone(re, True)
def test_run_time_scalar(self):
re = self.s.run("13:30:10.008")
self.assertEqual(re, np.datetime64("1970-01-01T13:30:10.008"))
re = self.s.run("time()")
self.assertIsNone(re, True)
def test_run_minute_scalar(self):
re = self.s.run("13:30m")
self.assertEqual(re, np.datetime64("1970-01-01T13:30"))
re = self.s.run("minute()")
self.assertIsNone(re, True)
def test_run_second_scalar(self):
re = self.s.run("13:30:10")
self.assertEqual(re, np.datetime64("1970-01-01T13:30:10"))
re = self.s.run("second()")
self.assertIsNone(re, True)
def test_run_datetime_scalar(self):
re = self.s.run("2012.06.13T13:30:10")
self.assertEqual(re, np.datetime64("2012-06-13T13:30:10"))
re = self.s.run("datetime()")
self.assertIsNone(re, True)
def test_run_timestamp_scalar(self):
re = self.s.run("2012.06.13 13:30:10.008")
self.assertEqual(re, np.datetime64("2012-06-13 13:30:10.008"))
re = self.s.run("timestamp()")
self.assertIsNone(re, True)
def test_run_nanotime_salar(self):
re = self.s.run("13:30:10.008007006")
self.assertEqual(re, np.datetime64("1970-01-01T13:30:10.008007006"))
re = self.s.run("nanotime()")
self.assertIsNone(re, True)
def test_run_nanotimestamp_salar(self):
re = self.s.run("2012.06.13T13:30:10.008007006")
self.assertEqual(re, np.datetime64("2012-06-13T13:30:10.008007006"))
re = self.s.run("nanotimestamp()")
self.assertIsNone(re, True)
def test_run_float_scalar(self):
re = self.s.run("2.1f")
self.assertAlmostEqual(re, 2.1, places=2)
re = self.s.run("float()")
self.assertIsNone(re, True)
def test_run_double_scalar(self):
re = self.s.run("2.1")
self.assertAlmostEqual(re, 2.1, places=2)
re = self.s.run("double()")
self.assertIsNone(re, True)
def test_run_string_scalar(self):
re = self.s.run("`aaaa")
self.assertEqual(re, "aaaa")
re = self.s.run("string()")
self.assertIsNone(re, True)
def test_run_uuid_scalar(self):
re = self.s.run("uuid('5d212a78-cc48-e3b1-4235-b4d91473ee87')")
self.assertEqual(re, "5d212a78-cc48-e3b1-4235-b4d91473ee87")
re = self.s.run("uuid()")
self.assertIsNone(re, True)
def test_run_ipaddr_scalar(self):
re = self.s.run("ipaddr('192.168.1.135')")
self.assertEqual(re, "192.168.1.135")
re = self.s.run("ipaddr()")
self.assertIsNone(re, True)
def test_run_int128_scalar(self):
re = self.s.run("int128('e1671797c52e15f763380b45e841ec32')")
self.assertEqual(re, "e1671797c52e15f763380b45e841ec32")
re = self.s.run("int128()")
self.assertIsNone(re, True)
def test_run_bool_vector(self):
re = self.s.run("true false false true")
assert_array_equal(re, [True, False, False, True])
#re = self.s.run("take(bool(), 5)")
#self.assertIsNone(re, True)
def test_run_char_vector(self):
re = self.s.run("['a', 'b', 'c']")
assert_array_equal(re, [97, 98, 99])
re = self.s.run("take(char(), 5)")
self.assertTrue(np.isnan(re).all())
def test_run_short_vector(self):
re = self.s.run("[10h, 20h, 30h, 40h]")
assert_array_equal(re, [10, 20, 30, 40])
def test_run_int_vector(self):
re = self.s.run("1..5")
assert_array_equal(re, [1, 2, 3, 4, 5])
def test_run_long_vector(self):
re = self.s.run("long(11..15)")
assert_array_equal(re, [11, 12, 13, 14, 15])
def test_run_date_vector(self):
re = self.s.run("2012.06.01..2012.06.05")
assert_array_equal(re, np.array(["2012-06-01", "2012-06-02", "2012-06-03", "2012-06-04", "2012-06-05"], dtype="datetime64[D]"))
def test_run_month_vector(self):
re = self.s.run("2012.06M..2012.10M")
assert_array_equal(re, np.array(["2012-06", "2012-07", "2012-08", "2012-09", "2012-10"], dtype="datetime64[M]"))
def test_run_time_vector(self):
re = self.s.run("13:30:10.001 13:30:10.002")
assert_array_equal(re, np.array(["1970-01-01T13:30:10.001", "1970-01-01T13:30:10.002"], dtype="datetime64[ms]"))
def test_run_minute_vector(self):
re = self.s.run("13:30m 13:31m")
assert_array_equal(re, np.array(["1970-01-01T13:30", "1970-01-01T13:31"], dtype="datetime64[m]"))
def test_run_second_vector(self):
re = self.s.run("13:30:10 13:30:11")
assert_array_equal(re, np.array(["1970-01-01T13:30:10", "1970-01-01T13:30:11"], dtype="datetime64[s]"))
def test_run_datetime_vector(self):
re = self.s.run(" 2012.06.13T13:30:10 2012.06.13T13:30:11")
assert_array_equal(re, np.array([" 2012-06-13T13:30:10", " 2012-06-13T13:30:11"], dtype="datetime64[s]"))
def test_run_timestamp_vector(self):
re = self.s.run("2012.06.13T13:30:10.008 2012.06.13T13:30:10.009")
assert_array_equal(re, np.array(["2012-06-13T13:30:10.008", "2012-06-13T13:30:10.009"], dtype="datetime64[ms]"))
def test_run_nanotime_vector(self):
re = self.s.run("13:30:10.008007006 13:30:10.008007007")
assert_array_equal(re, np.array(["1970-01-01T13:30:10.008007006", "1970-01-01T13:30:10.008007007"], dtype="datetime64[ns]"))
def test_run_nanotimestamp_vector(self):
re = self.s.run("2012.06.13T13:30:10.008007006 2012.06.13T13:30:10.008007007")
assert_array_equal(re, np.array(["2012-06-13T13:30:10.008007006", "2012-06-13T13:30:10.008007007"], dtype="datetime64[ns]"))
def test_run_floar_vector(self):
re = self.s.run("float(2.1 2.2)")
assert_array_almost_equal(re, [2.1, 2.2], decimal=1)
def test_run_double_vector(self):
re = self.s.run("2.1 2.1")
assert_array_almost_equal(re, [2.1, 2.2], decimal=1)
def test_run_string_vector(self):
re = self.s.run("`a`b`c")
assert_array_equal(re, ["a", "b", "c"])
def test_run_symbol_vector(self):
re = self.s.run("symbol(`a`b`c)")
assert_array_equal(re, ["a", "b", "c"])
def test_run_int_set(self):
re = self.s.run("set(1..5)")
self.assertSetEqual(re, set([1,2,3,4,5]))
def test_run_int_matrix(self):
re = self.s.run("1..4$2:2")
assert_array_equal(re[0], [[1,3], [2, 4]])
def test_run_tuple(self):
re = self.s.run("[1, `a, 2]")
assert_array_equal(re, ["1","a","2"])
def test_run_vector_vector(self):
re = self.s.run("[[1,2,3],`a`b]")
assert_array_equal(re[0], [1,2,3])
assert_array_equal(re[1], ["a","b"])
def test_run_dict_value_scalar(self):
re = self.s.run("dict(`a`b`c,1 2 3)")
self.assertDictEqual(re, {'b': 2, 'c': 3, 'a': 1})
def test_run_dict_value_vector(self):
re = self.s.run("dict(`a`b`c, [1..3, 4..6, 7..9])")
assert_array_equal(re["a"], [1, 2, 3])
assert_array_equal(re["b"], [4, 5, 6])
assert_array_equal(re["c"], [7, 8, 9])
def test_run_table(self):
re = self.s.run("table(`AAPL`MS`C`IBM as sym, 45 48 52 56 as vol)")
tmp = {"sym": ['AAPL', 'MS', 'C', 'IBM'],
"vol": np.array([45, 48, 52, 56], dtype="int32")}
assert_frame_equal(re, pd.DataFrame(tmp))
def test_function_add_int(self):
re = self.s.run('add', 3, 4)
self.assertEqual(re, 7)
def test_function_add_string(self):
re = self.s.run('add', 'hello', 'world')
self.assertMultiLineEqual(re, 'helloworld')
def test_function_sum_list(self):
re = self.s.run('sum', [1.0, 2.0, 3.0])
self.assertAlmostEqual(re, 6.0)
def test_function_sum_numpy_array_int32(self):
re = self.s.run('sum', np.array([100000, 200000, 300000]))
self.assertEqual(re, 600000)
def test_function_sum_numpy_array_int64(self):
pass
# re=self.s.run('sum',np.int64([1e15, 2e15, 3e15])
def test_function_sum_numpy_array_float64(self):
re = self.s.run('sum', np.array([100000.0, 200000.0, 300000.0]))
self.assertAlmostEqual(re, 600000.0)
def test_function_reverse_str_array(self):
re = self.s.run('reverse', np.array(["1", "2", "3"], dtype="str"))
self.assertMultiLineEqual(re[0], '3')
self.assertMultiLineEqual(re[1], '2')
self.assertMultiLineEqual(re[2], '1')
def test_function_flatten_matrix(self):
re = self.s.run('flatten', np.int32([[1, 2, 3], [4, 5, 6]]))
self.assertEqual((re == np.array([1, 4, 2, 5, 3, 6])).all(), True)
def test_function_case_matrix(self):
pass
# TOOD: matrix bug
# self.s.run("cast", np.double([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),[2,3])
def test_function_wavg(self):
col1 = [100, 30, 300]
col2 = [1.0, 1.5, 2.0]
re = self.s.run("wavg", col1, col2)
self.assertAlmostEqual(re, 165.5556, places=4)
def test_function_wsum(self):
col1 = [100, 30, 300]
col2 = [1.0, 1.5, 2.0]
re = self.s.run("wsum", col1, col2)
self.assertAlmostEqual(re, 745.0, places=1)
def test_function_wavg_partial(self):
col1 = [100, 30, 300]
re = self.s.run("wavg{, [1, 1.5, 2]}", col1)
self.assertAlmostEqual(re, 165.5556, places=4)
def test_user_defined_function(self):
re = self.s.run("login('admin','123456')")
self.s.run("def foo(a,b){return a+b-1}")
re = self.s.run('foo', 3, 4)
self.assertEqual(re, 6)
def test_clear_variable(self):
self.s.run('''t = table(1..10 as id,rand(10,10) as val1)
select * from t''',clearMemory = True)
def secondRun():
self.s.run("t")
time.sleep(10)
self.assertRaises(RuntimeError, secondRun)
def test_BlockReader_Table(self):
self.s.run('''
rows=10000;
testblock=table(take(1..rows,rows) as id,take(`A,rows) as symbol,take(2020.08.01..2020.10.01,rows) as date, rand(50,rows) as size,rand(50.5,rows) as price)''')
br = self.s.run("select * from testblock",fetchSize=10000)
self.assertTrue(br.hasNext())
re = br.read()
self.assertFalse(br.hasNext())
expected = self.s.run("select * from testblock")
assert_frame_equal(re,expected)
re = br.read()
self.assertIsNone(re)
br = self.s.run("select * from testblock",fetchSize=8200)
self.assertTrue(br.hasNext())
tem = br.read()
self.assertTrue(br.hasNext())
self.assertEqual(len(tem),8200)
re = br.read()
self.assertFalse(br.hasNext())
expected = self.s.run("select * from testblock where id>8200")
assert_frame_equal(re,expected)
br = self.s.run("select * from testblock",fetchSize=10001)
self.assertTrue(br.hasNext())
re = br.read()
self.assertFalse(br.hasNext())
expected = self.s.run("select * from testblock")
assert_frame_equal(re,expected)
def errFetchSize():
self.s.run("select * from testblock",fetchSize=8191)
self.assertRaises(RuntimeError, errFetchSize)
def fetchSizeZero():
self.s.run("select * from testblock",fetchSize=0)
self.assertRaises(RuntimeError, fetchSizeZero)
def test_Block_Reader_DFStable(self):
self.s.run('''
n = 10000
t = table(take(1..n,n) as id,take(2010.01.01,n) as date,rand(30,n) as price)
dbPath = "dfs://TEST_BLOCK"
if(existsDatabase(dbPath)){
dropDatabase(dbPath)
}
db = database(dbPath,VALUE,2010.01.01..2010.01.30)
pt = db.createPartitionedTable(t,`pt,`date)
pt.append!(t)
'''
)
br = self.s.run("select * from loadTable(dbPath,`pt)",fetchSize=10001)
self.assertTrue(br.hasNext())
re = br.read()
expected = self.s.run("select * from loadTable(dbPath,`pt)")
assert_frame_equal(re,expected)
self.assertFalse(br.hasNext())
re = br.read()
self.assertIsNone(re)
br = self.s.run("select * from loadTable(dbPath,`pt)",fetchSize=8200)
temp = br.read()
self.assertTrue(br.hasNext())
self.assertEqual(len(temp),8200)
re = br.read()
self.assertFalse(br.hasNext())
expected = self.s.run("select * from loadTable(dbPath,`pt) where id>8200")
assert_frame_equal(re,expected)
def errFetchSize():
self.s.run("select * from loadTable(dbPath,`pt)",fetchSize=8191)
self.assertRaises(RuntimeError, errFetchSize)
def fetchSizeZero():
self.s.run("select * from loadTable(dbPath,`pt)",fetchSize=0)
self.assertRaises(RuntimeError, fetchSizeZero)
def test_Block_Reader_skipALL(self):
br = self.s.run('''select * from loadTable("dfs://TEST_BLOCK",`pt)''',fetchSize=10001)
br.skipAll()
re = br.read()
re = br.read()
self.assertIsNone(re)
self.s.run('''
rows=10000
testblock=table(take(1..rows,rows) as id,take(`A,rows) as symbol,take(2020.08.01..2020.10.01,rows) as date, rand(50,rows) as size,rand(50.5,rows) as price)''')
br = self.s.run("select * from testblock",fetchSize=10000)
self.assertTrue(br.hasNext())
br.skipAll()
self.assertFalse(br.hasNext())
self.assertIsNone(re)
def test_Block_Reader_huge_table(self):
self.s.run('''
rows = 20000000
testblock=table(1..rows as id,take(string('A'..'Z'),rows) as symbol,take(2020.08.01..2020.10.01,rows) as date, rand(50,rows) as size,rand(50.5,rows) as price)
''')
fetchSize=10000000
br = self.s.run("select * from testblock",fetchSize=fetchSize)
temp = br.read()
self.assertEqual(len(temp),fetchSize)
self.assertTrue(br.hasNext())
re = br.read()
self.assertEqual(len(temp),fetchSize)
self.assertFalse(br.hasNext())
def test_Block_Reader_huge_Dfs(self):
self.s.run('''
n = 20000000
t = table(1..n as id,take(2010.01.01,n) as date,take(string('A'..'Z'),n) as symbol,rand(30,n) as price)
dbPath = "dfs://Test_Huge_Block"
if(existsDatabase(dbPath)){
dropDatabase(dbPath)
}
db = database(dbPath,VALUE,2010.01.01..2010.01.30)
pt = db.createPartitionedTable(t,`pt,`date)
pt.append!(t)
''')
fetchSize=10000000
br = self.s.run("select * from loadTable(dbPath,`pt)",fetchSize=fetchSize)
temp = br.read()
self.assertTrue(br.hasNext())
self.assertEqual(len(temp),fetchSize)
re = br.read()
self.assertFalse(br.hasNext())
self.assertEqual(len(temp),fetchSize)
def test_Run_multithread(self):
def job1():
tmp = ddb.session()
tmp.connect(HOST, PORT, "admin", "123456")
tmp.run("1+1;sleep(2000)")
def job2():
tmp = ddb.session()
tmp.connect(HOST, PORT, "admin", "123456")
tmp.run("1+1;sleep(2000)")
def job3():
tmp = ddb.session()
tmp.connect(HOST, PORT, "admin", "123456")
tmp.run("1+1;sleep(2000)")
def job4():
tmp = ddb.session()
tmp.connect(HOST, PORT, "admin", "123456")
tmp.run("1+1;sleep(2000)")
startTime = time.time()
job1_thread = threading.Thread(target=job1)
job2_thread = threading.Thread(target=job2)
job3_thread = threading.Thread(target=job3)
job4_thread = threading.Thread(target=job4)
job1_thread.start()
job2_thread.start()
job3_thread.start()
job4_thread.start()
job1_thread.join()
job2_thread.join()
job3_thread.join()
job4_thread.join()
endTime = time.time()
re = endTime-startTime
print(re)
self.assertTrue(re>2)
self.assertTrue(re<8)
if __name__ == '__main__':
unittest.main()
|
MiniMainWindow.py
|
"""
MIT License
Copyright (c) 2021 Pedro Correia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Python libraries
import time
import serial
from threading import Thread, Event
# Qt libraries
from PyQt5 import QtCore, QtGui, QtWidgets
# Local libraries
from src.settings import Settings, Observer
from src.unit import Unit
from src.utils import COMUtils
from src.language import Language
from src.assets import Assets
from .MainDialogs import InfoDialog, UnitsDialog, HorizontalLine, PreferencesDialog, EditDialog
BIG_FONT: QtGui.QFont = QtGui.QFont()
BIG_FONT.setPointSize(64)
BIG_FONT.setBold(True)
MEDIUM_FONT: QtGui.QFont = QtGui.QFont()
MEDIUM_FONT.setPointSize(24)
MEDIUM_FONT.setBold(True)
BIG_SIZE: int = 64
MEDIUM_SIZE: int = 32
CALIBRATION_FILENAME: str = "Calibration"
class CentralWidget(QtWidgets.QWidget):
def __init__(self, parent=None, settings:Settings=None, language:Language=None, unit:Unit=None, observer:Observer=None, assets:Assets=None):
QtWidgets.QWidget.__init__(self, parent)
self._settings: Settings = settings
self._language: Language = language
self._assets: Assets = assets
self._unit: Unit = unit
self._observer: Observer = observer
self._observer.Signal.ValuePressureChanged.connect(self._onPressureChange)
self._observer.Signal.SendInfo.connect(self._onInfo)
if len(self._settings.calibrationCurves()) == 0:
self._settings.createCurve(CALIBRATION_FILENAME)
self._pressure_label: QtWidgets.QLabel = QtWidgets.QLabel(self._language.get(self._language.Pressure) + ": ", self)
self._pressure_label.setFont(BIG_FONT)
self._pressure_edit: QtWidgets.QLineEdit = QtWidgets.QLineEdit("0.0 kPa", self)
self._pressure_edit.setReadOnly(True)
self._pressure_edit.setFont(BIG_FONT)
self._info_label: QtWidgets.QLabel = QtWidgets.QLabel("...", self)
self._info_label.setFont(MEDIUM_FONT)
self._info_label.setStyleSheet("""QLabel {
color: red;
border: 3px solid red;
border-radius: 5px;
}""")
self._info_label.setAlignment(QtCore.Qt.AlignCenter)
hbox_top: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox_top.addWidget(self._pressure_label)
hbox_top.addWidget(self._pressure_edit)
# NOTE: tank
self._line0: HorizontalLine = HorizontalLine()
self._empty_tank_button: QtWidgets.QPushButton = QtWidgets.QPushButton(self._language.get(self._language.EmptyTank))
self._fill_tank_button: QtWidgets.QPushButton = QtWidgets.QPushButton(self._language.get(self._language.FillTank))
self._empty_tank_button.clicked.connect(self._onEmptyTank)
self._fill_tank_button.clicked.connect(self._onFillTank)
self._empty_tank_button.setFont(MEDIUM_FONT)
self._fill_tank_button.setFont(MEDIUM_FONT)
hbox_tank: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox_tank.addWidget(self._empty_tank_button)
hbox_tank.addWidget(self._fill_tank_button)
# NOTE: calibração
self._line1: HorizontalLine = HorizontalLine()
self._calibration_label: QtWidgets.QLabel = QtWidgets.QLabel(self._language.get(self._language.CalibrationTitle) + ": ", self)
self._calibration_import_button: QtWidgets.QPushButton = QtWidgets.QPushButton(self._assets.get("import"), self._language.get(self._language.Import))
self._calibration_edit_button: QtWidgets.QPushButton = QtWidgets.QPushButton(self._assets.get("edit"), self._language.get(self._language.Edit))
self._calibration_label.setFont(MEDIUM_FONT)
self._calibration_import_button.setFont(MEDIUM_FONT)
self._calibration_edit_button.setFont(MEDIUM_FONT)
self._calibration_import_button.setIconSize(QtCore.QSize(MEDIUM_SIZE, MEDIUM_SIZE))
self._calibration_edit_button.setIconSize(QtCore.QSize(MEDIUM_SIZE, MEDIUM_SIZE))
self._calibration_import_button.clicked.connect(self._onImportCalibration)
self._calibration_edit_button.clicked.connect(self._onEditCalibration)
hbox_calibration: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox_calibration.addWidget(self._calibration_label)
hbox_calibration.addWidget(self._calibration_import_button)
hbox_calibration.addWidget(self._calibration_edit_button)
# NOTE: pressure validation
self._line2: HorizontalLine = HorizontalLine()
self._target_label: QtWidgets.QLabel = QtWidgets.QLabel(self._language.get(self._language.TargetPressure) + ":", self)
self._target_value: QtWidgets.QDoubleSpinBox = QtWidgets.QDoubleSpinBox(self)
self._target_value.setSuffix(self._unit.getSuffix(self._unit.UnitPressure, add_space=True))
self._target_value.setRange(*self._unit.getRange(self._unit.UnitPressure))
self._target_value.setDecimals(self._unit.getPrecision(self._unit.UnitPressure))
self._target_button: QtWidgets.QPushButton = QtWidgets.QPushButton(self._assets.get("check"), self._language.get(self._language.Validate), self)
self._target_button.clicked.connect(self._onTargetPressure)
self._target_label.setFont(MEDIUM_FONT)
self._target_value.setFont(MEDIUM_FONT)
self._target_button.setFont(MEDIUM_FONT)
self._target_button.setIconSize(QtCore.QSize(MEDIUM_SIZE, MEDIUM_SIZE))
hbox_target: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox_target.addWidget(self._target_label)
hbox_target.addWidget(self._target_value)
hbox_target.addWidget(self._target_button)
# NOTE: COM port
self._line3: HorizontalLine = HorizontalLine()
self._comport_label: QtWidgets.QLabel = QtWidgets.QLabel(self._language.get(self._language.ComPort) + ": ", self)
self._comport_value: QtWidgets.QComboBox = QtWidgets.QComboBox(self)
for port in COMUtils.getAllCOMPortDevices():
self._comport_value.addItem(port)
port = self._settings.getProperty(self._settings.ComPort)
if port != self._settings.NullString and port in COMUtils.getAllCOMPortDevices():
self._comport_value.setCurrentText(port)
else:
self._settings.setProperty(self._settings.ComPort, self._comport_value.currentText())
self._comport_description: QtWidgets.QLabel = QtWidgets.QLabel("-> " + COMUtils.getDescription(self._comport_value.currentText()), self)
self._comport_label.setFont(MEDIUM_FONT)
self._comport_value.setFont(MEDIUM_FONT)
self._comport_description.setFont(MEDIUM_FONT)
self._comport_value.currentTextChanged.connect(self._onComPortChanged)
# NOTE: start button
self._connect_button: QtWidgets.QPushButton = QtWidgets.QPushButton(self._assets.get("play"), self._language.get(self._language.Connect), self)
self._connect_button.clicked.connect(self._onConnect)
self._connect_button.setFont(BIG_FONT)
self._connect_button.setIconSize(QtCore.QSize(BIG_SIZE, BIG_SIZE))
hbox_comport: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()
hbox_comport.addWidget(self._comport_label)
hbox_comport.addWidget(self._comport_value)
hbox_comport.addWidget(self._comport_description)
layout: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()
layout.addLayout(hbox_top)
layout.addWidget(self._info_label)
layout.addStretch()
layout.addWidget(self._line0)
layout.addLayout(hbox_tank)
layout.addWidget(self._line1)
layout.addLayout(hbox_calibration)
layout.addWidget(self._line2)
layout.addLayout(hbox_target)
layout.addWidget(self._line3)
layout.addLayout(hbox_comport)
layout.addWidget(self._connect_button)
layout.setSpacing(MEDIUM_SIZE)
self.setLayout(layout)
def _onImportCalibration(self) -> None:
fic = QtWidgets.QFileDialog.getOpenFileName(self, "Abrir Ficheiro", "", "Text Files (*.csv *.txt)")
try:
x, y = self._settings.loadCurveFromPath(fic)
self._settings.saveCurve(CALIBRATION_FILENAME, [x,y])
except ValueError as err:
QtWidgets.QMessageBox.warning(self, self._language.get(self._language.FileProblem), self._language.get(self._language.UnableToOpenFile))
def _onEditCalibration(self) -> None:
edit_dialog = EditDialog(self.parent(), target=CALIBRATION_FILENAME, settings=self._settings, language=self._language, unit=self._unit, observer=self._observer, assets=self._assets)
edit_dialog.show()
def _onInfo(self, text:str) -> None:
self._info_label.setText(text)
def _onEmptyTank(self) -> None:
self._observer.Signal.EmptyTank.emit()
def _onFillTank(self) -> None:
self._observer.Signal.FillTank.emit()
def _onTargetPressure(self) -> None:
value = self._target_value.value()
self._observer.Signal.NewTargetPressure.emit(value)
def _onComPortChanged(self) -> None:
port = self._comport_value.currentText()
self._settings.setProperty(self._settings.ComPort, port)
self._comport_description.setText("-> " + COMUtils.getDescription(self._comport_value.currentText()))
def _onPressureChange(self, pressure:float) -> None:
self._pressure_edit.setText("{:.2f} kPa".format(pressure))
def setConnectionButtonState(self, flag:bool) -> None:
if not flag:
self._connect_button.setIcon(self._assets.get("play"))
self._connect_button.setText(self._language.get(self._language.Connect))
else:
self._connect_button.setIcon(self._assets.get("stop"))
self._connect_button.setText(self._language.get(self._language.Disconnect))
def _onConnect(self) -> None:
self._observer.Signal.Connect.emit()
class MiniMainWindow(QtWidgets.QMainWindow):
"""
This is the Main Window for Prime. It stores the permanent data and
initializes all the manager objects.
"""
def __init__(self, name:str, version:str, user_folder:str):
QtWidgets.QMainWindow.__init__(self)
self._name: str = name
self._version: str = version
self._user_folder: str = user_folder
self.setWindowTitle(self._name + " " + self._version)
self._settings: Settings = Settings(user_folder=user_folder, name=name, version=version)
self._language: Language = Language(settings=self._settings)
self._observer: Observer = Observer()
self._observer.Signal.Connect.connect(self._onConnection)
self._observer.Signal.NewTargetPressure.connect(self._onNewTargetPressure)
self._observer.Signal.FillTank.connect(self._fillTank)
self._observer.Signal.EmptyTank.connect(self._emptyTank)
# NOTE: units object (for conversion)
self._unit: Unit = Unit(settings=self._settings)
# NOTE: assets object
self._assets: Assets = Assets()
# NOTE: units object (for conversion)
self._unit: Unit = Unit(settings=self._settings)
self._central_widget: CentralWidget = CentralWidget(self, settings=self._settings, language=self._language, unit=self._unit, observer=self._observer, assets=self._assets)
self.setCentralWidget(self._central_widget)
self._connection_thread_flag: bool = False
self._connection_thread_event: Event = None
self._connection_thread: Thread = None
# NOTE: setting up window icon
self.setWindowIcon(self._assets.get("logo"))
# NOTE: building menu.
self._buildMenuBar()
# NOTE: Maximizing on startup
self.showMaximized()
def _buildMenuBar(self):
"""
Adding the top menu to this application.
"""
self._file_menu:QtWidgets.QMenu = self.menuBar().addMenu(self._language.get(self._language.File))
self._settings_menu: QtWidgets.QMenu = self.menuBar().addMenu(self._language.get(self._language.Settings))
self._about_menu: QtWidgets.QMenu = self.menuBar().addMenu(self._language.get(self._language.About))
self._quit_action: QtWidgets.QAction = QtWidgets.QAction(self._assets.get("close"), self._language.get(self._language.Quit), self)
self._quit_action.triggered.connect(self._onExit)
self._file_menu.addSeparator()
self._file_menu.addAction(self._quit_action)
self._preferences_action: QtWidgets.QAction = QtWidgets.QAction(self._assets.get("gear"), self._language.get(self._language.Preferences), self)
self._preferences_action.triggered.connect(self._onPreferences)
self._settings_menu.addAction(self._preferences_action)
self._information_action: QtWidgets.QAction = QtWidgets.QAction(self._assets.get("info"), self._language.get(self._language.Information), self)
self._information_action.triggered.connect(self._onInformation)
self._license_action: QtWidgets.QAction = QtWidgets.QAction(self._assets.get("license"), self._language.get(self._language.License), self)
self._license_action.triggered.connect(self._onLicense)
self._help_action: QtWidgets.QAction = QtWidgets.QAction(self._assets.get("help"), self._language.get(self._language.Help), self)
self._help_action.triggered.connect(self._onHelp)
self._about_menu.addAction(self._information_action)
self._about_menu.addAction(self._license_action)
self._about_menu.addSeparator()
self._about_menu.addAction(self._help_action)
def _onPreferences(self) -> None:
preferences_dialog = PreferencesDialog(self, settings=self._settings, language=self._language, unit=self._unit, observer=self._observer, assets=self._assets)
preferences_dialog.show()
def _onInformation(self) -> None:
info_dialog = InfoDialog(self, self._assets.get("info"), text=self._language.get(self._language.InfoMessage), title=self._language.get(self._language.Information) + " " + self._name + " " + self._version, close_text=self._language.get(self._language.Close))
info_dialog.show()
def _onLicense(self) -> None:
info_dialog = InfoDialog(self, self._assets.get("license"), text=self._language.get(self._language.LicenseMessage), title=self._language.get(self._language.License), close_text=self._language.get(self._language.Close))
info_dialog.show()
def _onHelp(self) -> None:
info_dialog = InfoDialog(self, self._assets.get("help"), text=self._language.get(self._language.HelpMessage), title=self._language.get(self._language.Help), close_text=self._language.get(self._language.Close))
info_dialog.show()
def _emptyTank(self):
if self._serial != None:
self._observer.Signal.SendInfo.emit(self._language.get(self._language.EmptyingTank))
self._serial.write(str("X").encode())
print("MiniMainWindow::_emptyTank :", str("Ok"))
else:
QtWidgets.QMessageBox.warning(self, self._language.get(self._language.NoConnection), self._language.get(self._language.YouMustOpenAConnection))
def _fillTank(self):
if self._serial != None:
self._observer.Signal.SendInfo.emit(self._language.get(self._language.FillingTank))
self._serial.write(str("Y").encode())
print("MiniMainWindow::_fillTank :", str("Ok"))
else:
QtWidgets.QMessageBox.warning(self, self._language.get(self._language.NoConnection), self._language.get(self._language.YouMustOpenAConnection))
def _onNewTargetPressure(self, value:float) -> None:
if self._serial is not None:
self._serial.write(str(int(value)).encode())
print("MiniMainWindow::_onNewTargetPressure : new target pressure ->", str(value))
def _onExit(self) -> None:
self.close()
def closeEvent(self, event) -> None:
reply = QtWidgets.QMessageBox.question(self, self._language.get(self._language.Quit) + " " + self._name + "?", 'Are you sure you want to quit?', QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
self._settings.save()
print("MiniMainWindow::closeEvent : quitting software at: ", time.asctime())
QtWidgets.QApplication.instance().quit()
else:
event.ignore()
def _onConnection(self) -> None:
if not self._connection_thread_flag:
try:
x, y = self._settings.loadCurve(CALIBRATION_FILENAME)
if len(x) > 2:
z = np.polyfit(x, y, 2)
self._p = np.poly1d(z)
else:
self._p = None
port = self._settings.getProperty(self._settings.ComPort)
self._serial: serial.Serial = serial.Serial(port, 9600)
self._thread_flag: bool = True
self._connection_thread_event = Event()
self._connection_thread = Thread(target=self._onReadPressureThread)
self._connection_thread.start()
self._connection_thread_flag = True
self._central_widget.setConnectionButtonState(True)
except OSError as err:
self._central_widget.setConnectionButtonState(False)
QtWidgets.QMessageBox.warning(self, self._language.get(self._language.UnableToConnect), self._language.get(self._language.UnableToOpenPort))
else:
self._serial.close()
self._serial = None
self._connection_thread_flag = False
self._connection_thread_event.set()
self._connection_thread.join()
self._connection_thread_flag = False
self._central_widget.setConnectionButtonState(False)
def _onReadPressureThread(self) -> None:
while self._connection_thread_flag:
value = self._serial.readline()
try:
val = str(value).replace("\\r","").replace("\\n","").replace("''","").replace("b","").replace("'","").replace("'","")
if val == "IC_H":
self._observer.Signal.SendInfo(self._language.get(self._language.ReachedBeginning))
elif val == "FC_H":
self._observer.Signal.SendInfo(self._language.get(self._language.ReachedEnd))
elif val == "FC_L":
pass
elif val == "IC_L":
pass
else:
if self._p is not None:
self._observer.Signal.ValuePressureChanged.emit(self._p(float(val)))
else:
self._observer.Signal.ValuePressureChanged.emit(float(val))
print(2)
except ValueError as error:
print(error)
self._connection_thread_event.wait(0.2)
|
server.py
|
import asyncio
import time
import os
from multiprocessing import Process,Lock,Value,Manager
import queue
import sqlite3
import threading
import collections
import platform
from ctypes import c_bool
import copy
"""
这里使用的是异步触发式服务器,因为在IO上同时需要保证效率,
所以代码会比较冗长,希望能提出宝贵建议
"""
__author__ = "chriskaliX"
class Receive:
_dict = dict() # 外部
_dict_tmp = dict() # 内部
class EchoServerProtocol:
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
if len(data) > 100:
Receive.counter(Receive._dict_tmp, addr[0])
def error_received(self,data,addr):
pass
@staticmethod
async def start_datagram_proxy(ip,port):
loop = asyncio.get_event_loop()
return await loop.create_datagram_endpoint(
lambda: Receive.EchoServerProtocol(),
local_addr=(ip, port))
@staticmethod
def run(ip,port,_dict,signal):
# 将当前Receive中_dict设为全局共享
Receive._dict = _dict
# Linux下uvloop提高速度
if platform.system() == "Linux":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
def getvalue(signal):
while True:
time.sleep(1)
if signal.value:
Receive._dict.update(Receive._dict_tmp)
Receive._dict_tmp.clear()
signal.value = False
# loop.call_soon_threadsafe(loop.stop)
# break
threading.Thread(target=getvalue,args=(signal,)).start()
coro = Receive.start_datagram_proxy(ip,port)
transport, _ = loop.run_until_complete(coro)
loop.run_forever()
@staticmethod
def counter(_dict,key):
_dict[key] = _dict.get(key) + 1 if key in _dict.keys() else 1
# class的调用和使用
if __name__ == '__main__':
# 用于与子进程交互的字典
_dict = Manager().dict()
# 信号值,用于获取子进程的字典
signal = Manager().Value(c_bool,False)
# Performance
#
# Q&A:
# Q:为什么不直接对Manager进行操作
# A:效率低
# 参考 https://stackoverflow.com/questions/10721915/shared-memory-objects-in-multiprocessing
# https://www.codesd.com/item/python-manager-dict-is-very-slow-compared-to-dict.html
#
# Q:为什么需要count
# A:有漏洞的服务器不一定都有较好的攻击效果,NTP monlist,SSDP等返回包的大小都比较固定,相反
# PPS就比较重要,这边记录下IP以及他们所返回包的数量,一定程度上筛选了漏洞服务器的质量
# 开启进程并且监听
pro = Process(target=Receive.run,args=('127.0.0.1',9999,_dict,signal))
pro.start()
time.sleep(20)
# 设置signal.value为True,即可获得_dict的值
signal.value = True
while True:
print(_dict)
print(pro.is_alive())
time.sleep(1)
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-5, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-5, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 10):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288")
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
if __name__ == '__main__':
import nose
nose.runmodule()
|
stream_test.py
|
import httmock
import os
import sys
import threading
import unittest
from . import captureOutput
from girder_worker.core.io import (make_stream_push_adapter,
make_stream_fetch_adapter)
from girder_worker.core.utils import run_process
from six.moves import BaseHTTPServer, socketserver
_iscript = os.path.join(os.path.dirname(__file__), 'stream_input.py')
_oscript = os.path.join(os.path.dirname(__file__), 'stream_output.py')
_pipepath = os.path.join(os.path.dirname(__file__), 'namedpipe')
_socket_port = int(os.environ.get('WORKER_TEST_SOCKET_PORT', 7941))
_server = None
_req_chunks = []
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_PUT(self):
while True:
length = int(self.rfile.readline(), 16)
if length:
_req_chunks.append((length, self.rfile.read(length)))
else:
break
self.rfile.readline() # read the empty line between chunks
self.close_connection = True
self.send_response(200, message='received')
self.end_headers()
def log_message(self, *args, **kwargs):
pass # Override so we don't print to stderr
class ServerThread(object):
def __init__(self, port):
self.port = port
self.httpd = socketserver.TCPServer(('127.0.0.1', self.port), Handler)
self.thread = threading.Thread(target=self.start, args=(self.httpd,))
self.thread.start()
def start(self, httpd):
httpd.serve_forever()
def stop(self):
self.httpd.shutdown()
self.httpd.server_close()
self.thread.join(timeout=10)
def setUpModule():
global _server
_server = ServerThread(_socket_port)
def tearDownModule():
if _server:
_server.stop()
class TestStream(unittest.TestCase):
def setUp(self):
super(TestStream, self).setUp()
if os.path.exists(_pipepath):
os.unlink(_pipepath)
os.mkfifo(_pipepath)
def tearDown(self):
super(TestStream, self).tearDown()
if os.path.exists(_pipepath):
os.unlink(_pipepath)
def testOutputStreams(self):
output_spec = {
'mode': 'http',
'method': 'PUT',
'url': 'http://localhost:%d' % _socket_port
}
fd = os.open(_pipepath, os.O_RDONLY | os.O_NONBLOCK)
adapters = {
fd: make_stream_push_adapter(output_spec)
}
cmd = [sys.executable, _oscript, _pipepath]
try:
with captureOutput() as stdpipes:
run_process(cmd, adapters)
except Exception:
print('Stdout/stderr from exception: ')
print(stdpipes)
raise
self.assertEqual(stdpipes, ['start\ndone\n', ''])
self.assertEqual(len(_req_chunks), 1)
self.assertEqual(_req_chunks[0], (9, 'a message'))
def testInputStreams(self):
input_spec = {
'mode': 'http',
'method': 'GET',
'url': 'http://mockedhost'
}
@httmock.urlmatch(netloc='^mockedhost$', method='GET')
def mock_fetch(url, request):
return 'hello\nworld'
adapters = {
_pipepath: make_stream_fetch_adapter(input_spec)
}
cmd = [sys.executable, _iscript, _pipepath]
try:
with captureOutput() as stdpipes, httmock.HTTMock(mock_fetch):
run_process(cmd, input_pipes=adapters)
except Exception:
print('Stdout/stderr from exception: ')
print(stdpipes)
raise
self.assertEqual(stdpipes, ['olleh\ndlrow\n', ''])
|
ts2.py
|
# Anthony Tiongson (ast119) with assistance from Nicolas Gundersen (neg62)
# TS2 (a simplified top-level DNS server)
#
# resources:
# https://www.pythonforbeginners.com/system/python-sys-argv
import random, socket, sys, threading, time
# makeDNSTable function stores a DNS table from a file into a dictionary
def makeDNSTable(file, dictionary):
for line in file:
hostname, IPaddress, flag = line.split()
hostname = hostname.lower()
dictionary[hostname] = hostname + " " + IPaddress + " " + flag
file.close()
print("Created DNS dictionary: " + str(dictionary) + "\n")
# socketOpen function to open and return a socket in a given port designated by a label.
def socketOpen(label, port):
try:
socketOpen = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socketOpenPrompt = "Socket opened to connect to " + label + ": port " + str(port) + "\n"
print(socketOpenPrompt)
return socketOpen
except socket.error as socketOpenError:
socketOpenError = label + ' socket already open, error: {} \n'.format(socketError)
print(socketOpenError)
exit()
# Print out the hostname and IP address for a server designated by a label
def serverInfo(label):
hostname = socket.gethostname()
print(label + " hostname: {}".format(hostname))
localhostIP = (socket.gethostbyname(hostname))
print(label + " IP address: {}".format(localhostIP))
# evaluate function accepts a given clientSocket connection designated by a given label and receives data to check against a given dictionary
def evaluate(label, clientLabel, clientSocket, dictionary):
while True:
clientConnection, address = clientSocket.accept()
print("Received " + clientLabel + " connection request from: {}".format(address))
# Receive hostname query from the client
queryFromClient = clientConnection.recv(256)
# The client is done querying
if queryFromClient == "shutdown" + label:
print("\nReceived shutdown command...\n")
clientConnection.close()
break
# If hostname is in dictionary, send hostname information
elif queryFromClient in dictionary:
clientConnection.send(str(dictionary[queryFromClient]).encode('utf-8'))
# Close the client socket connection
print("\nClosing " + clientLabel + " socket connection.\n")
clientConnection.close()
# Hostname not in dictionary, do not do anything
def server(label, clientLabel, file):
# Establish port via command-line argument
port = int(sys.argv[1])
# Initialize dictionary for DNS table
DNSTable = {}
# Store TS DNS table in file in the dictionary DNSTable
makeDNSTable(file, DNSTable)
# Open client socket and listen
client = socketOpen(clientLabel, port)
client.bind(('', port))
client.listen(1)
# Print out TS's hostname and its respective IP address
serverInfo(label)
# Accept a client socket connection and receives data to check against the DNSTable.
evaluate(label, clientLabel, client, DNSTable)
# Close the client socket and shutdown server
client.close()
exit()
if __name__ == "__main__":
label = "TS2Server"
clientLabel = "TS2Client"
# Create file object to read TS DNS table
file = open("PROJ2-DNSTS2.txt", "r")
thread = threading.Thread(name='server', target = server, args = (label, clientLabel, file,))
thread.start()
sleepTime = random.random() * 5
print("\n" + label + " thread executed, sleep time: " + str(sleepTime) + " sec\n")
time.sleep(sleepTime)
|
Processor.py
|
import time
from threading import Lock, Thread
from typing import Tuple, List, Callable, Dict
import numpy as np
from src.camera.Frame import Frame
from src.utils.PubSub import PubSub
from src.utils.injection.decorator import inject
class Processor:
"""
A convenience class that provides functionality to chain CV operations and stream results
"""
FRAME_DELAY = .1 # type: float
@inject
def __init__(self, processors: [List[Tuple[Callable[[np.ndarray, ...], np.ndarray], dict]],
List[Callable[[np.ndarray, ...], np.ndarray]]], pubsub: PubSub = None):
"""
Initialize Processor
:param processors: a list of middleware or tuple of [middleware, dictionary of parameters]
:param PubSub pubsub: injected pubsub
"""
self.pubsub = pubsub # type: PubSub
self.processors = [] # type: List[Tuple[Callable[[np.ndarray, ...], np.ndarray], dict]]
self._add_processors(processors)
self.image_cache = {} # type: Dict[str, np.ndarray]
self.image_lock = Lock() # type: Lock
self.processed = True # type: bool
# Publishing thread that dies as soon as processing is done
self.publish_thread = Thread(target=self._publish_loop, daemon=True)
self.publish_thread.start()
def execute(self, data: [np.ndarray, Frame, bytes]) -> np.ndarray:
"""
Execute the chain of processing logic on a piece of data
:param [np.ndarray, Frame, bytes] data: an image for processing
:return: processed image
"""
image = Processor.data_to_bgr(data) # type: np.ndarray
for processor in self.processors:
image = processor[0](image, **processor[1])
if 'channel' in processor[1]:
with self.image_lock:
self.image_cache[processor[1]['channel']] = image
self.processed = True
return image
@staticmethod
def data_to_bgr(data: [np.ndarray, Frame, bytes]) -> np.ndarray:
"""
Convert an image into a numpy array
:param [np.ndarray, Frame, bytes] data: image
:return:
"""
img = data
if type(data) is Frame:
img = data.to_cv2_bgr()
elif type(data) is bytes:
img = Frame.from_bytes(data).to_cv2_bgr()
return img.copy()
def _add_processors(self, processors: [Tuple[callable, dict], callable]):
"""
Add processors in a uniform format to the chain
:param processors: a list of middleware or tuple of [middleware, dictionary of parameters]
:return:
"""
for element in processors:
if type(element) == tuple:
processor, params = element
else:
processor = element
params = {}
self.processors.append((processor, params))
def _publish(self, channel: str, image: np.ndarray):
"""
Publish a frame to PubSub
:param str channel: channel name
:param np.ndarray image: image array
:return:
"""
self.pubsub.publish(channel, Frame.from_cv2_bgr(image).to_bytes())
def subscribe(self, channel: str):
"""
Dynamically process data from a PubSub channel
:param str channel: channel name
:return:
"""
self.pubsub.subscribe(channel, self._receive_data)
def _receive_data(self, data: bytes):
"""
Convert image data to a numpy array
:param bytes data: image data
:return:
"""
self.img = Frame.from_bytes(data).to_cv2_bgr()
if self.processed:
self.processed = False
self.execute(self.img)
def _publish_loop(self):
"""
Periodically publish last processed frame
:return:
"""
while True:
with self.image_lock:
for channel in self.image_cache:
self._publish(channel, self.image_cache[channel])
time.sleep(Processor.FRAME_DELAY)
|
main.py
|
# coding=utf-8
import classify
import preprocess
import pandas as pd
import numpy as np
import csv
import codecs
import multiprocessing
import time
def input(trainname):
"""
load the text file
:param trainname: path of the input file
:return:list
"""
traindata = []
with open(trainname, 'rb') as f:
reader = csv.reader(f)
count = 0
for line in reader:
try:
traindata.append(line[0])
count += 1
except:
print "error:", line, count
traindata.append("1 ")
return traindata
def output(filename, ID, age, gender, education):
"""
generate the submit file
:param filename: path of the submit file
:param ID: user ID
:param age:predicted age
:param gender:predicted gender
:param education:predicted education
:return:submit file
"""
print ID.shape, age.shape, gender.shape, education.shape
with codecs.open(filename, 'w', encoding='gbk') as f:
count=0
for i in range(len(ID)):
# if count>=1000:
# break
f.write(str(ID[i]) + ' ' + str(age[i]) + ' ' + str(gender[i]) + ' ' + str(education[i]) + '\n')
count+=1
if __name__ == '__main__':
"""
the main function
注意路径
"""
start=time.time()
# order='predict' #execute predict function
order='test' #execute 2-fold validation function
print 'orderis ', order
print '----------start----------'
#loading
trainname = 'jieba_train_cut.csv'
testname = 'jieba_test_cut.csv'
traindata = input(trainname)
testdata = input(testname)
label_genderfile_path = 'train_gender.csv'
label_agefile_path = 'train_age.csv'
label_edufile_path = 'train_education.csv'
genderdata = np.loadtxt(open(label_genderfile_path, 'r')).astype(int)
agedata = np.loadtxt(open(label_agefile_path, 'r')).astype(int)
educationdata = np.loadtxt(open(label_edufile_path, 'r')).astype(int)
# ---------------------------------
print '预处理中..'
preprocessob = preprocess.preprocess()
#remove label missed samples
gender_traindatas, genderlabel = preprocessob.removezero(traindata, genderdata)
age_traindatas, agelabel = preprocessob.removezero(traindata, agedata)
edu_traindatas, edulabel = preprocessob.removezero(traindata, educationdata)
# 填写你的wv向量路径
w2vtrain = np.load('wv300_win100.train.npy')
w2vtest = np.load('wv300_win100.test.npy')
wv_gender_traindatas, genderlabel = preprocessob.removezero(w2vtrain, genderdata)
wv_age_traindatas, agelabel = preprocessob.removezero(w2vtrain, agedata)
wv_edu_traindatas, edulabel = preprocessob.removezero(w2vtrain, educationdata)
if order=='test':
termob1 = classify.term()
termob2 = classify.term()
termob3 = classify.term()
p1 = multiprocessing.Process(target=termob1.validation,
args=(gender_traindatas, genderlabel, wv_gender_traindatas, 'gender',))
p2=multiprocessing.Process(target=termob2.validation,args=(age_traindatas, agelabel, wv_age_traindatas, 'age',))
p3=multiprocessing.Process(target=termob3.validation,args=(edu_traindatas, edulabel, wv_edu_traindatas, 'edu',))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
elif order=='predict':
termob = classify.term()
gender=termob.predict(gender_traindatas, genderlabel, testdata, wv_gender_traindatas, w2vtest, 'gender')
age=termob.predict(age_traindatas, agelabel, testdata, wv_age_traindatas, w2vtest, 'age')
edu=termob.predict(edu_traindatas, edulabel, testdata, wv_edu_traindatas, w2vtest, 'edu')
ID = pd.read_csv('user_tag_query.10W.TEST.csv').ID
output('submit.csv', ID, age, gender, edu)
end=time.time()
print 'total time is', end-start
|
process_pool.py
|
# Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This pool is different from standard Python pool implementations by the fact that the workers are spawned
without using fork. Some issues with using jvm based HDFS driver were observed when the process was forked
(could not access HDFS from the forked worker if the driver was already used in the parent process)"""
import logging
import pickle
import sys
import os
from time import sleep, time
from traceback import format_exc
from threading import Thread
from psutil import process_iter
import zmq
from zmq import ZMQBaseError
from petastorm.reader_impl.pickle_serializer import PickleSerializer
from petastorm.workers_pool import EmptyResultError, VentilatedItemProcessedMessage
from petastorm.workers_pool.exec_in_new_process import exec_in_new_process
# When _CONTROL_FINISHED is passed via control socket to a worker, the worker will terminate
_CONTROL_FINISHED = "FINISHED"
# This is the amount of seconds we will wait to all processes to be created. We throw an error if can not start them
# on time
_WORKERS_STARTED_TIMEOUT_S = 20
_SOCKET_LINGER_MS = 1000
_KEEP_TRYING_WHILE_ZMQ_AGAIN_IS_RAIZED_TIMEOUT_S = 20
# Amount of time we will wait on a the queue to get the next result. If no results received until then, we will
# recheck if no more items are expected to be ventilated
_VERIFY_END_OF_VENTILATION_PERIOD = 0.1
_WORKER_STARTED_INDICATOR = 'worker started indicator'
logger = logging.getLogger(__name__)
#
# ---------------- ------------------
# | | --- _ventilator_send (push) --> | |
# | main process | --- _control_sender (pub) --> | worker process |
# | | <-- _results_receiver (pull) -- | |
# ---------------- ------------------
#
# 1. When ProcessPool start is called, it creates _ventilator_send, _control_sender and _result_receiver
# sockets.
# 2. After initialization is done, worker process sends _WORKER_STARTED_INDICATOR
# 3. Once ProcessPool receives _WORKER_STARTED_INDICATOR from all workers, the ProcessPool
# is ready to start ventilating.
#
# 4. Each ventilated message is picked up by one of the workers.
# 5. Worker process would send 0..n responses for each ventilated message. Each response
# is a tuple of (data payload, control). Data payload is serialized using
# _serializer instance. Control is always pickled.
# 6. After the last response to a single ventilated item is transmitted, an instance of VentilatedItemProcessedMessage
# is transmitted as a control. This control message is needed to count how many ventilated
# items are being processed at each time.
#
# 7. Workers are terminated by broadcasting _CONTROL_FINISHED message.
#
def _keep_retrying_while_zmq_again(timeout, func, allowed_failures=3):
"""Will keep executing func() as long as zmq.Again is being thrown.
Usage example:
>>> _keep_retrying_while_zmq_again(
>>> _KEEP_TRYING_WHILE_ZMQ_AGAIN_IS_RAIZED_TIMEOUT_S,
>>> lambda: self._ventilator_send.send_pyobj(
>>> (args, kargs),
>>> flags=zmq.constants.NOBLOCK))
:param timeout: A :class:`RuntimeError` is raised if could not execute ``func()`` without getting a
:class:`zmq.Again` within this timeout. The timeout is defined in seconds.
:param func: The function will be executed (as ``func()``)
:return: None
"""
now = time()
failures = 0
while time() < now + timeout:
try:
return func()
except zmq.Again:
logger.debug('zmq.Again exception caught. Will try again')
sleep(0.1)
continue
except ZMQBaseError as e:
# There are race conditions while setting up the zmq socket so you can get unexpected errors
# for the first bit of time. We therefore allow for a few unknown failures while the sockets
# are warming up. Before propogating them as a true problem.
sleep(0.1)
failures += 1
logger.debug('Unexpected ZMQ error \'%s\' received. Failures %d/%d', str(e), failures, allowed_failures)
if failures > allowed_failures:
raise
raise RuntimeError('Timeout ({} [sec]) has elapsed while keep getting \'zmq.Again\''.format(timeout))
class ProcessPool(object):
def __init__(self, workers_count, serializer=None, zmq_copy_buffers=True):
"""Initializes a ProcessPool.
This pool is different from standard Python pool implementations by the fact that the workers are spawned
without using fork. Some issues with using jvm based HDFS driver were observed when the process was forked
(could not access HDFS from the forked worker if the driver was already used in the parent process).
:param workers_count: Number of processes to be spawned
:param serializer: An object that would be used for data payload serialization when sending data from a worker
process to the main process. ``PickleSerializer`` is used by default. May use
:class:`petastorm.reader_impl.ArrowTableSerializer` (should be used together with
:class:`petastorm.reader.ArrowReader`)
:param zmq_copy_buffers: When set to False, we will use a zero-memory-copy feature of recv_multipart.
A downside of using this zero memory copy feature is that it does not play nice with Python GC and cases
were observed when it resulted in wild memory footprint swings. Having the buffers copied is typically a
safer alternative.
"""
self._workers = []
self._ventilator_send = None
self._control_sender = None
self.workers_count = workers_count
self._results_receiver_poller = None
self._results_receiver = None
self._ventilated_items = 0
self._ventilated_items_processed = 0
self._ventilator = None
self._serializer = serializer or PickleSerializer()
self._zmq_copy_buffers = zmq_copy_buffers
def _create_local_socket_on_random_port(self, context, socket_type):
"""Creates a zmq socket on a random port.
:param context: zmq context
:param socket_type: zmq socket type
:return: A tuple: ``(zmq_socket, endpoint_address)``
"""
LOCALHOST = 'tcp://127.0.0.1'
socket = context.socket(socket_type)
# There are race conditions where the socket can close when messages are still trying to be sent by zmq.
# This can end up causing zmq to block indefinitely when sending objects or shutting down. Having the socket
# linger on close helps prevent this.
socket.linger = _SOCKET_LINGER_MS
port = socket.bind_to_random_port(LOCALHOST)
return socket, '{}:{}'.format(LOCALHOST, port)
def start(self, worker_class, worker_setup_args=None, ventilator=None):
"""Starts worker processes.
Will block until all processes to subscribe to the worker queue (the messages are distributed by zmq on write
so if only one, out of many, workers is up at the time of 'ventilation', the initial load won't be balanced
between workers. If can not start the workers in timely fashion, will raise an exception.
:param worker_class: A class of the worker class. The class will be instantiated in the worker process. The
class must implement :class:`.WorkerBase` protocol.
:param worker_setup_args: Argument that will be passed to 'args' property of the instantiated
:class:`.WorkerBase`.
:param ventilator: Optional ventilator to handle ventilating items to the process pool. Process pool needs
to know about the ventilator to know if it has completed ventilating items.
:return: ``None``
"""
# Initialize a zeromq context
self._context = zmq.Context()
# Ventilator socket used to send out tasks to workers
self._ventilator_send, worker_receiver_socket = self._create_local_socket_on_random_port(self._context,
zmq.PUSH)
# Control socket is used to signal termination of the pool
self._control_sender, control_socket = self._create_local_socket_on_random_port(self._context, zmq.PUB)
self._results_receiver, results_sender_socket = self._create_local_socket_on_random_port(self._context,
zmq.PULL)
# We need poller to be able to read results from workers in a non-blocking manner
self._results_receiver_poller = zmq.Poller()
self._results_receiver_poller.register(self._results_receiver, zmq.POLLIN)
# Start a bunch of processes
self._workers = [
exec_in_new_process(_worker_bootstrap, worker_class, worker_id, control_socket, worker_receiver_socket,
results_sender_socket, os.getpid(), self._serializer, worker_setup_args)
for worker_id in range(self.workers_count)]
# Block until we have get a _WORKER_STARTED_INDICATOR from all our workers
self._wait_for_workers_to_start()
if ventilator:
self._ventilator = ventilator
self._ventilator.start()
def _wait_for_workers_to_start(self):
"""Waits for all workers to start."""
for _ in range(self.workers_count):
started_indicator = _keep_retrying_while_zmq_again(
_KEEP_TRYING_WHILE_ZMQ_AGAIN_IS_RAIZED_TIMEOUT_S,
lambda: self._results_receiver.recv_pyobj(flags=zmq.constants.NOBLOCK))
assert _WORKER_STARTED_INDICATOR == started_indicator
def ventilate(self, *args, **kargs):
"""Sends a work item to a worker process. Will result in worker.process(...) call with arbitrary arguments."""
self._ventilated_items += 1
logger.debug('ventilate called. total ventilated items count %d', self._ventilated_items)
# There is a race condition when sending objects to zmq that if all workers have been killed, sending objects
# can block indefinitely. By using NOBLOCK, an exception is thrown stating that all resources have been
# exhausted which the user can decide how to handle instead of just having the process hang.
_keep_retrying_while_zmq_again(_KEEP_TRYING_WHILE_ZMQ_AGAIN_IS_RAIZED_TIMEOUT_S,
lambda: self._ventilator_send.send_pyobj((args, kargs),
flags=zmq.constants.NOBLOCK))
def get_results(self):
"""Returns results from worker pool
:param timeout: If None, will block forever, otherwise will raise :class:`.TimeoutWaitingForResultError`
exception if no data received within the timeout (in seconds)
:return: arguments passed to ``publish_func(...)`` by a worker. If no more results are anticipated,
:class:`.EmptyResultError` is raised.
"""
while True:
# If there is no more work to do, raise an EmptyResultError
logger.debug('ventilated_items=%d ventilated_items_processed=%d ventilator.completed=%s',
self._ventilated_items, self._ventilated_items_processed,
str(self._ventilator.completed()) if self._ventilator else 'N/A')
if self._ventilated_items == self._ventilated_items_processed:
# We also need to check if we are using a ventilator and if it is completed
if not self._ventilator or self._ventilator.completed():
logger.debug('ventilator reported it has completed. Reporting end of results')
raise EmptyResultError()
logger.debug('get_results polling on the next result')
socks = self._results_receiver_poller.poll(_VERIFY_END_OF_VENTILATION_PERIOD * 1e3)
if not socks:
continue
# Result message is a tuple containing data payload and possible exception (or None).
fast_serialized, pickle_serialized = self._results_receiver.recv_multipart(copy=self._zmq_copy_buffers)
pickle_serialized = pickle.loads(pickle_serialized)
if pickle_serialized:
logger.debug('get_results a pickled message %s', type(pickle_serialized))
if isinstance(pickle_serialized, VentilatedItemProcessedMessage):
self._ventilated_items_processed += 1
if self._ventilator:
self._ventilator.processed_item()
elif isinstance(pickle_serialized, Exception):
self.stop()
self.join()
raise pickle_serialized
else:
logger.debug('get_results received new results')
if self._zmq_copy_buffers:
deserialized_result = self._serializer.deserialize(fast_serialized)
else:
deserialized_result = self._serializer.deserialize(fast_serialized.buffer)
return deserialized_result
def stop(self):
"""Stops all workers (non-blocking)"""
logger.debug('stopping')
if self._ventilator:
self._ventilator.stop()
try:
self._control_sender.send_string(_CONTROL_FINISHED)
except ZMQBaseError as e:
logger.warning('Stopping worker processes failed with \'%s\'. Does not necessary indicates an error.'
'This can happen if worker processes were terminated due to an error raised in that '
'process. See the log for additional messages from the failed worker.', str(e))
def join(self):
"""Blocks until all workers are terminated."""
logger.debug('joining')
# Slow joiner problem with zeromq means that not all workers are guaranteed to have gotten
# the stop event. Therefore we will keep sending it until all workers are stopped to prevent
# a deadlock.
while any([w.poll() is None for w in self._workers]):
self.stop()
sleep(.1)
for w in self._workers:
w.wait()
self._ventilator_send.close()
self._control_sender.close()
self._results_receiver.close()
self._context.destroy()
@property
def diagnostics(self):
# items_produced is updated only when VentilatedItemProcessedMessage is received. This will happen only on the
# next call to get_results, so it's value may lag.
return {
'items_consumed': self._ventilated_items,
'items_produced': self._ventilated_items_processed,
'items_inprocess': self._ventilated_items - self._ventilated_items_processed,
'zmq_copy_buffers': self._zmq_copy_buffers
}
def _serialize_result_and_send(socket, serializer, data):
# Result message is a tuple containing data payload and possible exception (or None).
socket.send_multipart([serializer.serialize(data), pickle.dumps(None)])
def _monitor_thread_function(main_process_pid):
while True:
logger.debug('Monitor thread monitoring pid: %d', main_process_pid)
main_process_alive = any([process.pid for process in process_iter() if process.pid == main_process_pid])
if not main_process_alive:
logger.debug('Main process with pid %d is dead. Killing worker', main_process_pid)
os._exit(0)
sleep(1)
def _worker_bootstrap(worker_class, worker_id, control_socket, worker_receiver_socket, results_sender_socket,
main_process_pid, serializer, worker_args):
"""This is the root of the spawned worker processes.
:param worker_class: A class with worker implementation.
:param worker_id: An integer. Unique for each worker.
:param control_socket: zmq socket used to control the worker (currently supports only :class:`zmq.FINISHED` signal)
:param worker_receiver_socket: A zmq socket used to deliver tasks to the worker
:param results_sender_socket: A zmq socket used to deliver the work products to the consumer
:param serializer: A serializer object (with serialize/deserialize methods) or None.
:param worker_args: Application specific parameter passed to worker constructor
:return: ``None``
"""
logger.debug('Starting _worker_bootstrap')
context = zmq.Context()
logger.debug('Connecting sockets')
# Set up a channel to receive work from the ventilator
work_receiver = context.socket(zmq.PULL)
work_receiver.linger = 0
work_receiver.connect(worker_receiver_socket)
# Set up a channel to send result of work to the results reporter
results_sender = context.socket(zmq.PUSH)
results_sender.linger = 0
results_sender.connect(results_sender_socket)
# Set up a channel to receive control messages over
control_receiver = context.socket(zmq.SUB)
control_receiver.linger = 0
control_receiver.connect(control_socket)
_setsockopt(control_receiver, zmq.SUBSCRIBE, b"")
logger.debug('Setting up poller')
# Set up a poller to multiplex the work receiver and control receiver channels
poller = zmq.Poller()
poller.register(work_receiver, zmq.POLLIN)
poller.register(control_receiver, zmq.POLLIN)
results_sender.send_pyobj(_WORKER_STARTED_INDICATOR)
# Use this 'none_marker' as the first argument to send_multipart.
none_marker = bytes()
logger.debug('Instantiating a worker')
# Instantiate a worker
worker = worker_class(worker_id, lambda data: _serialize_result_and_send(results_sender, serializer, data),
worker_args)
logger.debug('Starting monitor loop')
thread = Thread(target=_monitor_thread_function, args=(main_process_pid,))
thread.daemon = True
thread.start()
# Loop and accept messages from both channels, acting accordingly
logger.debug('Entering worker loop')
while True:
logger.debug('Polling new message')
socks = dict(poller.poll())
# If the message came from work_receiver channel
if socks.get(work_receiver) == zmq.POLLIN:
try:
args, kargs = work_receiver.recv_pyobj()
logger.debug('Starting worker.process')
worker.process(*args, **kargs)
logger.debug('Finished worker.process')
results_sender.send_multipart([none_marker, pickle.dumps(VentilatedItemProcessedMessage())])
logger.debug('Sending result')
except Exception as e: # pylint: disable=broad-except
stderr_message = 'Worker %d terminated: unexpected exception:\n' % worker_id
stderr_message += format_exc()
logger.debug('worker.process failed with exception %s', stderr_message)
sys.stderr.write(stderr_message)
results_sender.send_multipart([none_marker, pickle.dumps(e)])
return
# If the message came over the control channel, shut down the worker.
if socks.get(control_receiver) == zmq.POLLIN:
control_message = control_receiver.recv_string()
logger.debug('Received control message %s', control_message)
if control_message == _CONTROL_FINISHED:
worker.shutdown()
break
def _setsockopt(sock, option, value):
"""
This wraps setting socket options since python2 vs python3 handles strings differently
and pyzmq requires a different call. See http://pyzmq.readthedocs.io/en/latest/unicode.html
"""
try:
sock.setsockopt(option, value)
except TypeError:
sock.setsockopt_string(option, value)
|
xmqp.py
|
""" XVM (c) www.modxvm.com 2013-2017 """
__all__ = ['start', 'stop', 'call']
# PUBLIC
import os
import threading
import simplejson
import traceback
import uuid
import BigWorld
from gui.shared import g_eventBus, events
import pika
from pika import exceptions as pika_exceptions
from xfw import *
from xvm_main.python.logger import *
import xvm_main.python.config as config
import xvm_main.python.minimap_circles as minimap_circles
import xvm_main.python.utils as utils
from xvm_main.python.consts import *
from xvm_main.python.xvm import g_xvm
from consts import *
XMQP_DEVELOPMENT = os.environ.get('XMQP_DEVELOPMENT') == '1'
_xmqp = None
_xmqp_thread = None
def is_active():
global _xmqp_thread, _xmqp
return _xmqp_thread and _xmqp.is_consuming
def start():
BigWorld.player().arena.onNewVehicleListReceived -= start
BigWorld.callback(0, _start)
def _start(e=None):
g_eventBus.removeListener(XVM_EVENT.XVM_SERVICES_INITIALIZED, _start)
if not g_xvm.xvmServicesInitialized:
g_eventBus.addListener(XVM_EVENT.XVM_SERVICES_INITIALIZED, _start)
return
if (config.networkServicesSettings.xmqp and not isReplay()) or XMQP_DEVELOPMENT:
token = config.token.token
if token:
players = []
player = BigWorld.player()
for (vehicleID, vData) in player.arena.vehicles.iteritems():
# ally team only
if vData['team'] == player.team:
players.append(vData['accountDBID'])
if XMQP_DEVELOPMENT:
accountDBID = utils.getAccountDBID()
if accountDBID not in players:
players.append(accountDBID)
#players.append(42)
#players.append(43)
# start
stop()
global _xmqp_thread, _xmqp
_xmqp = _XMQP(players)
_xmqp_thread = threading.Thread(target=_xmqp.start, name='xmqp')
_xmqp_thread.setDaemon(True)
_xmqp_thread.start()
debug('[XMQP] Thread started')
def stop():
global _xmqp_thread, _xmqp
if _xmqp_thread:
_xmqp.stop()
_xmqp_thread.join()
_xmqp_thread = None
debug('[XMQP] Thread stopped')
_xmqp = None
def call(message):
global _xmqp
if _xmqp:
_xmqp.call(message)
def getCapabilitiesData():
capabilities = {}
mcdata = minimap_circles.getMinimapCirclesData()
if mcdata:
capabilities['sixthSense'] = mcdata.get('commander_sixthSense', None)
#capabilities['sixthSense'] = True # for debug
return capabilities
players_capabilities = {}
# PRIVATE
class _XMQP(object):
"""This is an xmqp consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
def __init__(self, players):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
"""
self._players = players
self._consuming = False
self._closing = False
self._connection = None
self._channel = None
self._consumer_tag = None
self._exchange_name = None
self._queue_name = None
#self._correlation_id = None
self._exchange_correlation_id = None
self._reconnect_attempts = 0
global players_capabilities
players_capabilities = {}
@property
def is_consuming(self):
return self._consuming
def start(self):
"""Run the xmqp consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
debug('[XMQP] Starting')
self._connection = self.connect()
self.start_ioloop()
def stop(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
try:
debug('[XMQP] Stopping')
self._connection.ioloop.stop()
if self.is_consuming:
self.stop_consuming()
if self._channel and self._channel.is_open:
self.close_channel()
if self._connection and self._connection.is_open:
self.close_connection()
self._connection.ioloop.stop()
debug('[XMQP] Stopped')
except (pika_exceptions.ChannelClosed, pika_exceptions.ConnectionClosed):
debug(traceback.format_exc())
except Exception as ex:
err(traceback.format_exc())
def call(self, data):
if self.is_consuming and self._exchange_name is not None:
try:
#self._correlation_id = str(uuid.uuid4())
message = simplejson.dumps({'accountDBID': utils.getAccountDBID(), 'data': data})
debug('[XMQP] call: %s' % utils.hide_guid(message))
self._channel.basic_publish(
exchange=self._exchange_name,
routing_key='',
#properties=pika.BasicProperties(
# reply_to=self._queue_name,
# correlation_id=self._correlation_id),
body=message)
except Exception as ex:
err('_exchange_name=' + str(self._exchange_name))
err(traceback.format_exc())
# INTERNAL
def on_message(self, unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, delivery tag and a redelivered flag
for the message. The properties passed in is an instance of
BasicProperties with the message properties and the body is the
message that was sent.
:param pika.channel.Channel unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param str|unicode body: The message body
"""
if self._closing:
return
try:
#debug('[XMQP] Received message #%s: %s' % (basic_deliver.delivery_tag, body))
debug('[XMQP] recv: %s' % body)
#debug(basic_deliver)
#if body != 'ok':
# debug('[XMQP] Received message #%s: %s' % (basic_deliver.delivery_tag, body))
if self._exchange_correlation_id == properties.correlation_id:
response = simplejson.loads(body)
if 'exchange' in response:
self._exchange_name = response['exchange']
global players_capabilities
for accountDBID, data in response['users'].iteritems():
players_capabilities[int(accountDBID)] = simplejson.loads(data) if data else {}
self.bind_channel()
else:
log("[XMQP] ERROR: response='{}'".format(body))
self.stop()
else:
#elif basic_deliver.exchange:
#debug('[XMQP] recv: {} {}'.format(properties.headers.get('userId', None), body))
response = simplejson.loads(body)
g_eventBus.handleEvent(events.HasCtxEvent(XVM_BATTLE_EVENT.XMQP_MESSAGE, response))
except Exception as ex:
err(traceback.format_exc())
def start_ioloop(self):
try:
self._connection.ioloop.start()
except Exception as ex:
err(traceback.format_exc())
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
debug('[XMQP] Connecting')
credentials = pika.PlainCredentials('xvm', 'xvm')
params = pika.ConnectionParameters(
host=XVM.XMQP_SERVER,
#port=XVM.XMQP_SERVER_PORT,
virtual_host='xvm',
credentials=credentials,
#channel_max=None,
#frame_max=None,
#heartbeat=None,
#ssl=None,
#ssl_options=None,
connection_attempts=3,
retry_delay=3,
socket_timeout=1,
#locale=None,
#backpressure_detection=None,
blocked_connection_timeout=5)
#client_properties=_DEFAULT)
return pika.SelectConnection(
params,
on_open_error_callback=self.on_open_connection_error,
on_open_callback=self.on_connection_open,
stop_ioloop_on_close=False)
def on_open_connection_error(self, unused_connection, error_message=None):
err('[XMQP] on_open_connection_error %s' % repr(pika_exceptions.AMQPConnectionError(error_message or
self._connection.params.connection_attempts)))
self._connection.ioloop.stop()
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
debug('[XMQP] Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._consuming = False
self._channel = None
if self._closing:
self._connection.ioloop.stop()
elif self._reconnect_attempts >= 3:
debug('[XMQP] Connection closed, maximum reopen attempts reached')
self._connection.ioloop.stop()
else:
debug('[XMQP] Connection closed, reopening in 5 seconds: (%s) %s' % (reply_code, reply_text))
self._connection.add_timeout(5, self.reconnect)
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
debug('[XMQP] Reconnecting')
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
self._reconnect_attempts += 1
self._connection = self.connect()
self.start_ioloop()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
debug('[XMQP] Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
if self._closing:
return
debug('[XMQP] Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_queue()
def setup_queue(self):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
"""
debug('[XMQP] Declaring queue')
self._channel.queue_declare(self.on_queue_declareok, exclusive=True)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange by issuing the Queue.Bind RPC command.
When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
if self._closing:
return
self._queue_name = method_frame.method.queue
debug('[XMQP] queue: %s' % (self._queue_name))
self.start_consuming()
self.get_exchange_name()
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
debug('[XMQP] Issuing consumer related RPC commands')
self._consuming = True
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message, self._queue_name, no_ack=True)
def get_exchange_name(self):
debug('[XMQP] Getting exchange name')
self._exchange_correlation_id = str(uuid.uuid4())
message = simplejson.dumps({
'token': config.token.token,
'players': self._players,
'capabilities': simplejson.dumps(getCapabilitiesData())})
debug('[XMQP] %s' % utils.hide_guid(message))
self._channel.basic_publish(
exchange=XVM.XMQP_LOBBY_EXCHANGE,
routing_key=XVM.XMQP_LOBBY_ROUTING_KEY,
properties=pika.BasicProperties(
reply_to=self._queue_name,
correlation_id=self._exchange_correlation_id,
),
body=message)
def bind_channel(self):
debug('[XMQP] Binding %s to %s' % (self._exchange_name, self._queue_name))
self._channel.queue_bind(self.on_bindok, self._queue_name, self._exchange_name)
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
"""
debug('[XMQP] Queue bound')
self._reconnect_attempts = 0
g_eventBus.handleEvent(events.HasCtxEvent(XVM_BATTLE_EVENT.XMQP_CONNECTED))
# service methods
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
debug('[XMQP] Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
debug('[XMQP] Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
debug('[XMQP] Channel %i was closed: (%s) %s' % (channel, reply_code, reply_text))
self._connection.close()
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
debug('[XMQP] Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
debug('[XMQP] Consumer was cancelled remotely, shutting down: %r' % (method_frame))
if self._channel:
self._channel.close()
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
self._consuming = False
self._closing = True
if self._channel:
debug('[XMQP] Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
"""
debug('[XMQP] RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def close_channel(self):
debug('[XMQP] Closing the channel')
self._consuming = False
self._closing = True
if self._channel is not None:
self._channel.close()
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
debug('[XMQP] Closing connection')
self._consuming = False
self._closing = True
self._connection.close()
|
NarrativeIndexerServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from configparser import ConfigParser
from Utils import log
import requests as _requests
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'NarrativeIndexer'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from NarrativeIndexer.NarrativeIndexerImpl import NarrativeIndexer # noqa @IgnorePep8
impl_NarrativeIndexer = NarrativeIndexer(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, str):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]: # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'NarrativeIndexer'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.rpc_service.add(impl_NarrativeIndexer.status,
name='NarrativeIndexer.status',
types=[dict])
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print(("Listening on port %s" % port))
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print((str(err))) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print(("Host set to %s" % host))
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
Touchscreendisp (for use on the pi).py
|
from tkinter import *
from tkinter import messagebox, _setit, ttk
from PIL import Image, ImageTk
from ttkthemes import ThemedStyle
from Keyboard import create_keyboard, keyboard_on, function_maker
import Wifi_file as wf
import VideoSetting as vs
import pickle
import os
import threading
import cv2
import StreamSetting
# See README for information about this
starting = 0
audio_connection = 0
video_connection = 0
cap = cv2.VideoCapture(0)
cap.set(3,480)
cap.set(4,640)
class Customise_button(ttk.Button):
def __init__(self, parent, text, command):
self.parent = parent
if type(text).__name__ == 'PhotoImage':
self.name = 'arrow'
super().__init__(stream, image=text, command=command)
else:
self.name = text
super().__init__(stream, text=text, command=command)
def __repr__(self):
return (self.name)
class Customise_window():
def __init__(self, name):
self.name = name
self.button_list = []
def add_button(self, button):
if button.parent == self.name:
self.button_list.append(button)
def create_window(self):
if len(self.button_list) == 1:
self.button_list[0].grid(column=0, row=3, columnspan=2)
if len(self.button_list) == 2:
self.button_list[0].grid(column=0, row=3, columnspan=2)
self.button_list[1].grid(column=2, row=1, rowspan=2)
if len(self.button_list) == 4:
self.button_list[0].grid(column=0, row=3)
self.button_list[1].grid(column=1, row=3)
self.button_list[2].grid(column=2, row=1)
self.button_list[3].grid(column=2, row=2)
def close_window(self):
for i in self.button_list:
i.grid_forget()
def __repr__(self):
return self.button_list.__repr__()
# Functions for resetting back to the default settings
def set_defaults():
settings_file = open('settings_file', 'wb')
settings_dic = {'bit_rate': 30, 'frame_rate': 30, 'audioless': False, 'audio_delay': 200}
pickle.dump(settings_dic, settings_file)
settings_file.close()
# Function for reading the settings from settings_file
def inital_settings():
global frame_rate, delay_value, chk_state, bit_rate
previous_settings = open('settings_file', 'rb')
saved_values = pickle.load(previous_settings)
chk_state = IntVar(value=saved_values['audioless'])
bit_rate = DoubleVar(value=saved_values['bit_rate'])
frame_rate = DoubleVar(value=saved_values['frame_rate'])
delay_value = DoubleVar(value=saved_values['audio_delay'])
previous_settings.close()
# Function for updating file every 5 seconds with new settings
def update_settings(*args):
global frame_rate, delay_value, chk_state, bit_rate
settings_dic = {'bit_rate': bit_rate.get(), 'frame_rate': frame_rate.get(), 'audioless': chk_state.get(),
'audio_delay': delay_value.get()}
settings_file = open('settings_file', 'wb')
pickle.dump(settings_dic, settings_file)
settings_file.close()
threading.Timer(5, update_settings).start()
app = Tk()
app.title('Streaming on a prayer')
app.geometry('480x320') # swap to fullscreen when using touchscreen
#app.attributes('-fullscreen', True)
# initializing style
style = ThemedStyle(app)
style.set_theme("equilux")
# background colour from theme equilux
bg = style.lookup('TFrame', 'background')
fg = style.lookup('TFrame', 'foreground')
app.configure(bg=style.lookup('TFrame', 'background'))
# app.attributes('-fullscreen',True)
# Setting up windows for application (main menu, settings, stream)
style.configure('TNotebook.Tab', width=app.winfo_screenwidth())
style.configure("Tab", focuscolor=style.configure(".")["background"])
style.configure('TNotebook.Tab', anchor=CENTER, activebackground='#00ff00')
note = ttk.Notebook(app)
note.pack()
stream = ttk.Frame(note)
settings = ttk.Frame(note)
wifi_login = ttk.Frame(note)
tutorial = ttk.Frame(note)
note.add(stream, text="STREAM")
note.add(settings, text="SETTINGS")
note.add(wifi_login, text="WIFI")
note.add(tutorial, text="TUTORIAL")
# Settings display------------------------------------------------------------------------------------------------------
# Configuring grid layout for settings window
settings.grid_columnconfigure(0, weight=2)
settings.grid_columnconfigure((1, 2), weight=1)
settings.grid_rowconfigure((0, 1, 2, 3, 4, 5, 6, 7, 8), weight=1)
# Wifi login and code ----------------------------------------------------------------------
def password_space_wifi(*args):
connect_btn.grid_forget()
wifi_label.grid_forget()
wifi_connected.grid_forget()
keyboard_frame3.grid(column=0, row=4, columnspan=2, rowspan=2)
keyboard_on(keyboard_frame3.children['!frame'])
def password_keyboard_off(current_frame):
current_frame.grid_forget()
wifi_label.grid(column=0, row=0)
wifi_connected.grid(column=1, row=0)
search_label.grid(column=0, row=1)
search_networks.grid(column=1, row=1)
password_entr.grid(column=1, row=2)
password_lbl.grid(column=0, row=2)
connect_btn.grid(column=1, row=3)
keyboard_frame3.grid_forget()
# username_entr.grid(column=1, row=2)
# username_lbl.grid(column=0, row=2)
wifi_login.grid_rowconfigure((0, 1, 2, 3, 4), weight=1)
wifi_login.grid_columnconfigure((0, 1), weight=1)
# Code for Wifi connection
wifi_label = ttk.Label(wifi_login, text='WiFi')
wifi_label.grid(column=0, row=0)
wifi_connected = ttk.Label(wifi_login, text='Unconnected')
wifi_connected.grid(column=1, row=0)
search_label = ttk.Label(wifi_login, text='Nearby Networks')
search_label.grid(column=0, row=1)
def password_filler(*args):
global candidate_network
saved = 0
for networks in wf.saved_networks:
if networks.SSID == args[0]:
password_text.set(networks.password)
saved = 1
for networks in search_list:
if networks.SSID == args[0]:
candidate_network = networks
if saved == 0:
password_text.set('')
candidate_network = 'none'
def connect():
global candidate_network
candidate_network.password = password_text.get()
wf.save_conf(candidate_network)
wf.dump()
search_list = wf.scan()
#search_list = ['list of networks', 'Glide0028763-5G', 'Glide0028763-2G']
option_menu_list = []
for networks in search_list:
option_menu_list.append(networks.SSID)
current_network = StringVar(value='network 3')
search_networks = ttk.OptionMenu(wifi_login, current_network, *search_list, command=password_filler)
search_networks.grid(column=1, row=1)
password_lbl = ttk.Label(wifi_login, text='PASSWORD:')
password_lbl.grid(column=0, row=2)
password_text = StringVar()
password_entr = ttk.Entry(wifi_login, textvariable=password_text)
password_entr.grid(column=1, row=2)
password_entr.bind("<Button>", password_space_wifi)
connect_btn = ttk.Button(wifi_login, text='CONNECT/SAVE', command=connect)
connect_btn.grid(columnspan=2, row=3)
keyboard_frame3 = Frame(wifi_login)
keyboard_frame3.configure(bg=style.lookup('TFrame', 'background'))
keyboard_frame3.grid(column=0, row=4, columnspan=2, rowspan=2)
keyboard_frame3.rowconfigure(0, weight=1)
keyboard_frame3.columnconfigure(0, weight=1)
keyboard_frame3 = create_keyboard(keyboard_frame3, password_entr, password_text, style, password_keyboard_off)
# Settings frame--------------------------------------------------------------------------------------
# importing default settings -------------------------------------------------------------------------
if starting == 0:
inital_settings()
update_settings()
starting = 1
# code for saving and importing streams
file_name = 'stream_codes'
# reading the stream codes from memory
stored_codes2 = open(file_name, 'rb')
code_list = pickle.load(stored_codes2)
stored_codes2.close()
# code for entering a new key
def enter_code():
global code_list
input_code = stream_code.get()
# checks if this is the first key entered and if so deletes the '' that was in it's place
if code_list[0] == '':
code_list.remove('')
existing_codes['menu'].delete(0)
code_list.insert(0, input_code)
# adds the new key to a file
stored_codes1 = open(file_name, 'wb')
pickle.dump(code_list, stored_codes1)
stored_codes1.close()
# redoes the label displaying the current code
current_code['text'] = input_code
# clears the key entry
stream_code.delete(0, 'end')
# adds the new key to the list of keys
value.set(input_code)
existing_codes['menu'].add_command(label=input_code, command=_setit(value, input_code))
# Program to clear stream keys from memory+screen
def clear_code():
global code_list, stream_code
# Clearing stream keys from memory
code_list = ['']
stored_codes = open(file_name, 'wb')
pickle.dump(code_list, stored_codes)
stored_codes.close()
# Clearing stream keys from GUI
current_code['text'] = ''
stream_code.delete(0, 'end')
value.set('')
existing_codes['menu'].delete(0, 'end')
# Program to select new stream code from existing ones
def change_code():
global existing_codes
chosen_code = value.get()
current_code['text'] = chosen_code
# check to make see if this is the first time (check list in file is not empty, if empty have none come up)
current_codetext = code_list[0]
# current stream key should be the last used stream key (if any)
stream_label = ttk.Label(settings, text='Current stream key:')
stream_label.grid(column=0, row=0)
current_code = ttk.Label(settings, text=current_codetext)
current_code.grid(column=1, row=0)
# function to clear space for keyboard
def keyboard_space_settings(*args):
clear_label.grid_forget()
clr_lbl_bck.grid_forget()
clear_button.grid_forget()
audio_chklbl.grid_forget()
audio_chk.grid_forget()
delay_lbl.grid_forget()
BckLbl.grid_forget()
delay.grid_forget()
frame_rate_label.grid_forget()
bit_rate_label.grid_forget()
frame_rate_scroller.grid_forget()
bit_rate_scroller.grid_forget()
keyboard_on(keyboard_frame1.children['!frame'])
def reset_page(current_frame):
current_frame.grid_forget()
clear_label.grid(column=0, row=3)
clr_lbl_bck.grid(column=1, row=3, columnspan=2)
audio_chklbl.grid(column=0, row=4)
audio_chk.grid(column=1, row=4, columnspan=2)
clear_button.grid(column=1, row=3, columnspan=2)
delay_lbl.grid(column=0, row=5)
BckLbl.grid(column=1, row=4, columnspan=2)
delay.grid(column=1, row=5, columnspan=2)
frame_rate_label.grid(column=0, row=6)
bit_rate_label.grid(column=0, row=7)
frame_rate_scroller.grid(column=1, row=6, columnspan=2)
bit_rate_scroller.grid(column=1, row=7, columnspan=2)
# user to input stream key
stream_inputlabel = ttk.Label(settings, text='Enter key:')
stream_inputlabel.grid(column=0, row=1)
stream_text = StringVar()
stream_code = ttk.Entry(settings, textvariable=stream_text)
stream_code.bind("<Button>", keyboard_space_settings)
stream_code.grid(column=1, row=1)
stream_enter = ttk.Button(settings, text='Use key', command=enter_code)
stream_enter.grid(column=2, row=1)
keyboard_frame1 = Frame(settings)
keyboard_frame1.configure(bg=style.lookup('TFrame', 'background'))
keyboard_frame1.grid(column=0, row=4, columnspan=3, rowspan=4)
keyboard_frame1.rowconfigure(0, weight=1)
keyboard_frame1.columnconfigure(0, weight=1)
keyboard_frame1 = create_keyboard(keyboard_frame1, stream_code, stream_text, style, reset_page)
# User to choose stream key (should appear in order of last used)
stream_p_label = ttk.Label(settings, text="Saved keys:")
stream_p_label.grid(column=0, row=2)
value = StringVar()
value.set(current_codetext) # Setting the key (should be last key used)
existing_codes = ttk.OptionMenu(settings, value, *code_list)
existing_codes.grid(column=1, row=2)
stream_p_enter = ttk.Button(settings, text="Use key", command=change_code)
stream_p_enter.grid(column=2, row=2)
# Clearing list of stream codes
clear_label = ttk.Label(settings, text='Clear keys:')
clear_label.grid(column=0, row=3)
clr_lbl_bck = ttk.Label(settings, text='')
clr_lbl_bck.grid(column=1, row=3, columnspan=2)
clear_button = ttk.Button(settings, text='Clear keys', command=clear_code)
clear_button.grid(column=1, row=3, columnspan=2)
# Allow stream w_out audio?
audio_chklbl = ttk.Label(settings, text='Audioless streaming:')
audio_chklbl.grid(column=0, row=4)
audio_chk = ttk.Checkbutton(settings, var=chk_state)
audio_chk.grid(column=1, row=4, columnspan=2)
# Code for delay_option
delay_lbl = ttk.Label(settings, text="Audio-video delay:")
delay_lbl.grid(column=0, row=5)
BckLbl = ttk.Label(settings, text='')
BckLbl.grid(column=1, row=5, columnspan=2)
# initial value
delay = ttk.Spinbox(settings, from_=-5000, to=5000, increment=20, textvariable=delay_value)
delay.grid(column=1, row=5, columnspan=2)
frame_rate_label = ttk.Label(settings, text='Frame Rate:')
frame_rate_scroller = ttk.Spinbox(settings, from_=0, to=100, textvariable=frame_rate)
frame_rate_scroller.grid(column=1, row=6, columnspan=2)
frame_rate_label.grid(column=0, row=6)
bit_rate_label = ttk.Label(settings, text='Bit Rate: ')
bit_rate_scroller = ttk.Spinbox(settings, from_=0, to=100, textvariable=bit_rate)
bit_rate_label.grid(column=0, row=7)
bit_rate_scroller.grid(column=1, row=7, columnspan=2)
# More settings ---------------------------------------------------------------------------------------------
# Touchscreen calibration
calibration = function_maker(os.system, "/bin/sh -c xinput_calibrator; cat | tail -6 > /etc/X11/xorg.conf.d/99-calibration.conf")
def touchscreen_calibration():
threading.Thread(target = calibration).start()
screen_calib = ttk.Button(settings, text="Calibrate screen", command=touchscreen_calibration)
screen_calib.grid(column=2, row=0)
# Stream display--------------------------------------------------------------------------------------------------------
stream.grid_rowconfigure((0, 3), weight=2)
stream.grid_rowconfigure((1, 2), weight=3)
stream.grid_columnconfigure((0, 1), weight=1)
stream.grid_rowconfigure(1, weight=4)
def start_stream():
# insert code here for checking WiFi connection, stream code (if it is in the correct format), camera + audio.
#
#
#
#
#
#
if wifi_connected['text'] != "Connected":
messagebox.showwarning("Wifi warning", "Please connect to wifi to stream")
elif current_code['text'] == "AHAHAHA":
messagebox.showwarning("Stream code warning", "Please input a valid stream code")
elif audio_connection == 0:
messagebox.showwarning("Audio warning", "No audio input detected")
elif video_connection == 0:
messagebox.showwarning("Video warning", "No video input detected")
else:
messagebox.showinfo("Full speed ahead", "Checks complete: We're good to go")
# code for turning button red
go_btn.configure(text='Stop', bg='red', command=stop_stream)
go_btn.pack()
# Here is the section where the code to start the stream should go
#
def stop_stream():
# insert code for stopping stream here
#
go_btn.configure(text='Go', bg='green', command=start_stream)
def start_camera_stream():
vs.stop_stream()
cap.release()
threading.Thread(target = StreamSetting.STREAM_CAMERA_COMMAND).start()
def start_screen_stream():
threading.Thread(target = StreamSetting.STREAM_SCREEN_COMMAND).start()
# Go button
StreamButtons = Frame(stream)
stream_btn = ttk.Button(StreamButtons, text='HQ Stream', command=start_camera_stream)
stream_btn.grid(column=0, row=0)
stream_btn = ttk.Button(StreamButtons, text='LQ Stream', command=start_screen_stream)
stream_btn.grid(column=0, row=1)
stream_btn = ttk.Button(StreamButtons, text='Stop', command=StreamSetting.STOP)
stream_btn.grid(column=0, row=2)
StreamButtons.grid(column=2, row=3)
# Button to select between video options
arrow_width = 40
arrow_height = 40
uparrow = Image.open(
"/home/pi/touchscreen-main/Touchscreen_photos/UpArrow.png") # needs to be
# whatever your directory is
up_per = (arrow_width / float(uparrow.size[0]))
height = int((float(uparrow.size[1]) * float(up_per)))
uparrow = uparrow.resize((arrow_width, height))
uparrowrender = ImageTk.PhotoImage(uparrow)
downarrow = Image.open(
"/home/pi/touchscreen-main/Touchscreen_photos/DownArrow.png") # needs to be
# whatever your directory is
down_per = (arrow_width / float(downarrow.size[0]))
height = int((float(downarrow.size[1]) * float(down_per)))
downarrow = downarrow.resize((arrow_width, height))
downarrowrender = ImageTk.PhotoImage(downarrow)
leftarrow = Image.open(
"/home/pi/touchscreen-main/Touchscreen_photos/LeftArrow.png") # needs to be
# whatever your directory is
left_per = (arrow_height / float(leftarrow.size[0]))
height = int((float(leftarrow.size[1]) * float(left_per)))
leftarrow = leftarrow.resize((arrow_height, height))
leftarrowrender = ImageTk.PhotoImage(leftarrow)
rightarrow = Image.open(
"/home/pi/touchscreen-main/Touchscreen_photos/RightArrow.png") # needs to be
# whatever your directory is
right_per = (arrow_height / float(rightarrow.size[0]))
rightarrow = rightarrow.resize((arrow_height, height))
rightarrowrender = ImageTk.PhotoImage(rightarrow)
customise_names = [['Reset', vs.make_normal, 'Reset'], ['Make Grey', vs.make_grey, 'Grey'],
['Brightness up', vs.make_bright, 'Brightness'], ['Brightness down', vs.make_dark, 'Brightness'],
['Blur', vs.make_blur, 'Blur/Sharpen'], ['Sharpen', vs.make_sharpen, 'Blur/Sharpen'],
['Rotate clock', vs.make_clockwise_rotate, 'Rotate'],
['Rotate anticlock', vs.make_anticlockwise_rotate
, 'Rotate'], ['Zoom in', vs.make_zoom_in, 'Zoom'],
['Zoom out', vs.make_zoom_out, 'Zoom'], [leftarrowrender, vs.make_pan_left, 'Pan'],
[rightarrowrender, vs.make_pan_right, 'Pan'], [uparrowrender, vs.make_pan_up, 'Pan'],
[downarrowrender, vs.make_pan_down, 'Pan'], ['Emboss', vs.make_emboss, 'Emboss'],
['Outline', vs.make_edge_detection, 'Outline'], ['Sepia', vs.make_sepia, 'Sepia'],
['Face Detection', vs.detect_face, 'Face Detection']]
windows_names = ['Reset', 'Grey', 'Brightness', 'Blur/Sharpen', 'Rotate', 'Zoom', 'Pan', 'Emboss', 'Outline', 'Sepia',
'Face Detection']
windows = list(range(len(windows_names)))
buttons = list(range(len(customise_names)))
for i in range(len(buttons)):
buttons[i] = Customise_button(customise_names[i][2], customise_names[i][0], customise_names[i][1])
for i in range(len(windows)):
windows[i] = Customise_window(windows_names[i])
for j in buttons:
windows[i].add_button(j)
windows_dic = {'Reset': windows[0], 'Grey': windows[1], 'Brightness': windows[2], 'Blur/Sharpen': windows[3],
'Rotate': windows[4], 'Zoom': windows[5], 'Pan': windows[6], 'Emboss': windows[7], 'Outline': windows[8],
'Sepia': windows[9], 'Face Detection': windows[10]}
current_window = windows_dic['Pan']
current_window.create_window()
# function to change the thing being customized
def change_mode(new_window):
global current_window, windows_dic
current_window.close_window()
windows_dic[new_window].create_window()
current_window = windows_dic[new_window]
# button for dropdown list where user can change video type
customise = StringVar()
customise.set('Customise')
labelscus = ['Customise']
labelscus.extend(windows_names)
video_customise = ttk.OptionMenu(stream, customise, *labelscus, command=change_mode)
video_customise.grid(column=2, row=0)
# displaying preview of stream
# 400 for big, 300 for small
stock_height = 250
stock_width = int(1.33333 * stock_height)
stock = Label(stream, bg=style.lookup('TFrame', 'background'))
stock.grid(column=0, row=0, columnspan=2, rowspan=3, sticky='nw')
vs.show_frame(stock, stock_height, stock_width, cap)
# Tutorial section
rick_roll = ttk.Label(tutorial, text="""Guide to using this touchscreen explaining what a stream key
is,how the process works (i.e. that they need wifi, a video and audio device connected and a valid stream key. Also will
explain what the delay between audio/video is and why it is necessary,
along with other necessary things...""")
rick_roll.grid(column=0, row=1)
app.mainloop()
|
apparea.py
|
#!/usr/bin/env python3
# Wrapper for apparea.
import argparse
import getpass
import glob
import json
import subprocess
import sys
import threading
import time
import os
from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer
CONFIG_FILE = os.path.expanduser("~/.apparea.json")
SITE = None
PORT = None
USERNAME = None
KEY_FILE = None
def main():
configure()
parser = argparse.ArgumentParser(description="Client helper script to forward ports using AppArea")
parser.add_argument("--verbose", "-v", action="store_true", help="enable verbose output")
subparsers = parser.add_subparsers()
http_parser = subparsers.add_parser("http", help="proxy a http port")
http_parser.add_argument("port", type=int, help="target port to proxy")
http_parser.add_argument("--subdomain", "-s", help="target domain to proxy to")
http_parser.set_defaults(func=http)
http_parser = subparsers.add_parser("https", help="proxy a https port")
http_parser.add_argument("port", type=int, help="target port to proxy")
http_parser.add_argument("--subdomain", "-s", help="target domain to proxy to")
http_parser.set_defaults(func=https)
tcp_parser = subparsers.add_parser("tcp", help="proxy a raw tcp port")
tcp_parser.add_argument("ports", nargs="+", type=int, help="target ports to proxy")
tcp_parser.set_defaults(func=tcp)
http_parser = subparsers.add_parser("serve-http", help="serve the current directory and proxy it")
http_parser.add_argument("--subdomain", "-s", help="target domain to proxy to")
http_parser.set_defaults(func=serve_http)
args = parser.parse_args()
if hasattr(args, "func"):
args.func(args)
else:
parser.print_usage()
def exponential_backoff(f):
def wrapper(*args, **kwargs):
delay = 1
while True:
start = time.time()
res = f(*args, **kwargs)
end = time.time()
if res:
break
if end - start > 2:
# HACK: assume that if the process was running for longer than
# 2 seconds, it successfully established a connection
delay = 1
time.sleep(delay)
delay *= 2
if delay > 60:
delay = 60
return wrapper
def http(args):
username = craft_username(args.subdomain)
forward(80, [args.port], username=username, verbose=args.verbose)
def https(args):
username = craft_username(args.subdomain)
forward(443, [args.port], username=username, verbose=args.verbose)
def tcp(args):
forward(0, args.ports, verbose=args.verbose)
class CustomHandler(SimpleHTTPRequestHandler):
server_version = "AppArea"
sys_version = ""
def log_message(self, *args):
pass
def serve_http(args):
address = ("localhost", 0)
httpd = ThreadingHTTPServer(address, CustomHandler)
port = httpd.server_address[1]
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.start()
username = craft_username(args.subdomain)
forward(80, [port], username=username, verbose=args.verbose)
httpd.shutdown()
httpd_thread.join()
def craft_username(subdomain):
if subdomain:
username = subdomain.split('.') + [USERNAME]
username.reverse()
username = ".".join(username)
else:
username = USERNAME
return username
def forward(dest, srcs, username=None, verbose=False):
if username is None:
username = USERNAME
forwards = [("-R", f"0.0.0.0:{dest}:localhost:{src}") for src in srcs]
forwards = [item for forward in forwards for item in forward]
command = [*forwards, "-T", "-i", KEY_FILE, "-p", str(PORT), f"{username}@{SITE}"]
if verbose:
command.append("-v")
run_ssh(command)
@exponential_backoff
def run_ssh(args):
proc = subprocess.Popen(
["ssh", *args],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE)
try:
while True:
line = proc.stdout.readline()
if not line:
break
if b"want to continue connecting" in line:
proc.communicate(input())
sys.stdout.buffer.write(line)
sys.stdout.buffer.flush()
proc.wait()
return False
except KeyboardInterrupt:
proc.terminate()
return True
def configure():
global SITE
global PORT
global USERNAME
global KEY_FILE
try:
with open(CONFIG_FILE) as f:
config = json.load(f)
SITE = config["site"]
PORT = config["port"]
USERNAME = config["username"]
KEY_FILE = config["keyfile"]
except FileNotFoundError:
print("Welcome to apparea!")
print("Since this is your first time, this helper will get you setup.")
print("Leave the fields blank for the default option.\n")
site = "apparea.dev"
new_site = input(f"Site [default={site}]: ")
if new_site:
site = new_site
port = 21
new_port = input(f"Port [default={port}]: ")
if new_port:
port = int(new_port)
username = getpass.getuser()
new_username = input(f"Username [default={username}]: ")
if new_username:
username = new_username
keyfiles = glob.iglob(os.path.abspath(os.path.expanduser("~/.ssh/id_*")))
keyfiles = list(filter(lambda s: ".pub" not in s, keyfiles))
keyfile = keyfiles[0]
new_keyfile = input(f"SSH Key [default={keyfile}]: ")
if new_keyfile:
keyfile = os.path.abspath(os.path.expanduser(new_keyfile))
print()
result = json.dumps({
"site": site,
"port": port,
"username": username,
"keyfile": keyfile,
}, indent=4)
print(result)
ok = input("Is this ok? [yes]/no: ")
if ok and ok[0].lower() == 'n':
print("Alright, quitting.")
sys.exit(1)
with open(CONFIG_FILE, "w") as f:
f.write(result)
print(f"Written config to {CONFIG_FILE}")
print()
install = input("Do you want to install this script to /usr/local/bin? [yes]/no: ")
if not install or install[0].lower() != 'n':
command = f"sudo cp {os.path.realpath(__file__)} /usr/local/bin/apparea"
print("$ " + command)
subprocess.run(command, shell=True)
print()
print("All done!")
print()
SITE = site
PORT = port
KEY_FILE = keyfile
USERNAME = username
if __name__ == "__main__":
main()
|
monitor_engine.py
|
# -*- coding: utf-8 -*-
u"""Monitor Engine module for SecureTea AntiVirus.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: Abhishek Sharma <abhishek_official@hotmail.com> , Jul 4 2019
Version: 1.4
Module: SecureTea
"""
from securetea.lib.antivirus.monitor.monitor_changes import MonitorChanges
from securetea.lib.antivirus.monitor.usb_monitor import USBMonitor
from securetea.lib.antivirus.antivirus_logger import AntiVirusLogger
from securetea.lib.antivirus.tools import utils
import multiprocessing
import sys
class MonitorEngine(object):
"""
MonitorEngine class.
"""
def __init__(self,
debug=False,
config_path=None,
vt_api_key=None,
monitor_changes=1,
monitor_usb=1):
"""
Initialize MonitorEngine.
Args:
debug (bool): Log on terminal or not
config_path (str): Configuration JSON file path
vt_api_key (str): VirusTotal API Key
monitor_changes (int): Monitor changes (1) or not (0)
monitor_usb (int): Monitor USB (1) or not (0)
Raises:
None
Returns:
None
"""
self.debug = debug
# Initialize logger
self.logger = AntiVirusLogger(
__name__,
debug=self.debug
)
if config_path:
self._CONFIG_PATH = config_path
else:
self.logger.log(
"Configuration file not found",
logtype="error"
)
sys.exit(0)
# Load Configuration
self.config_dict = utils.json_to_dict(self._CONFIG_PATH)
# Categorize OS
self.os_name = utils.categorize_os()
if self.os_name:
# Load malicious-file log path
self.changes_min_time = int(self.config_dict[self.os_name]["monitor"]["threshold_min"])
self.monitor_changes = int(monitor_changes)
self.monitor_usb = int(monitor_usb)
# Create a pool of process
self.process_pool = []
# Initialize VirusTotal API key
self.vt_api_key = vt_api_key
def kill_process(self):
"""
Kill running process.
Args:
None
Raises:
None
Returns:
None
"""
for process in self.process_pool:
process.terminate()
def create_process(self):
"""
Create specific process depending on the choice
of the user.
Args:
None
Raises:
None
Returns:
None
"""
if self.monitor_changes:
# Create MonitorChanges object
self.monitor_changes_obj = MonitorChanges(debug=self.debug,
config_path=self._CONFIG_PATH,
min_time=self.changes_min_time,
vt_api_key=self.vt_api_key)
monitor_changes_process = multiprocessing.Process(target=self.monitor_changes_obj.monitor)
# Add to process pool
self.process_pool.append(monitor_changes_process)
if self.monitor_usb:
# Create USBMonitor object
self.monitor_usb_obj = USBMonitor(debug=self.debug,
config_path=self._CONFIG_PATH,
vt_api_key=self.vt_api_key)
monitor_usb_process = multiprocessing.Process(target=self.monitor_usb_obj.monitor_usb_device)
# Add to process pool
self.process_pool.append(monitor_usb_process)
def start_monitor_engine(self):
"""
Start the monitor engine.
Args:
None
Raises:
None
Returns:
None
"""
# Create process based on user choice
self.create_process()
try:
if self.process_pool:
for process in self.process_pool:
process.start()
for process in self.process_pool:
process.join()
except KeyboardInterrupt:
self.logger.log(
"KeyboardInterrupt detected, quitting monitor engine",
logtype="info"
)
# Kill running process
self.kill_process()
except Exception as e:
self.logger.log(
"Error occurred: " + str(e),
logtype="error"
)
# Kill running process
self.kill_process()
|
ipcontrollerapp.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
The IPython controller application.
Authors:
* Brian Granger
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import with_statement
import json
import os
import socket
import stat
import sys
from multiprocessing import Process
from signal import signal, SIGINT, SIGABRT, SIGTERM
import zmq
from zmq.devices import ProcessMonitoredQueue
from zmq.log.handlers import PUBHandler
from IPython.core.profiledir import ProfileDir
from IPython.parallel.apps.baseapp import (
BaseParallelApplication,
base_aliases,
base_flags,
catch_config_error,
)
from IPython.utils.importstring import import_item
from IPython.utils.traitlets import Instance, Unicode, Bool, List, Dict, TraitError
from IPython.zmq.session import (
Session, session_aliases, session_flags, default_secure
)
from IPython.parallel.controller.heartmonitor import HeartMonitor
from IPython.parallel.controller.hub import HubFactory
from IPython.parallel.controller.scheduler import TaskScheduler,launch_scheduler
from IPython.parallel.controller.sqlitedb import SQLiteDB
from IPython.parallel.util import split_url, disambiguate_url
# conditional import of MongoDB backend class
try:
from IPython.parallel.controller.mongodb import MongoDB
except ImportError:
maybe_mongo = []
else:
maybe_mongo = [MongoDB]
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
#: The default config file name for this application
default_config_file_name = u'ipcontroller_config.py'
_description = """Start the IPython controller for parallel computing.
The IPython controller provides a gateway between the IPython engines and
clients. The controller needs to be started before the engines and can be
configured using command line options or using a cluster directory. Cluster
directories contain config, log and security files and are usually located in
your ipython directory and named as "profile_name". See the `profile`
and `profile-dir` options for details.
"""
_examples = """
ipcontroller --ip=192.168.0.1 --port=1000 # listen on ip, port for engines
ipcontroller --scheme=pure # use the pure zeromq scheduler
"""
#-----------------------------------------------------------------------------
# The main application
#-----------------------------------------------------------------------------
flags = {}
flags.update(base_flags)
flags.update({
'usethreads' : ( {'IPControllerApp' : {'use_threads' : True}},
'Use threads instead of processes for the schedulers'),
'sqlitedb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'}},
'use the SQLiteDB backend'),
'mongodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'}},
'use the MongoDB backend'),
'dictdb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.DictDB'}},
'use the in-memory DictDB backend'),
'nodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.NoDB'}},
"""use dummy DB backend, which doesn't store any information.
This is the default as of IPython 0.13.
To enable delayed or repeated retrieval of results from the Hub,
select one of the true db backends.
"""),
'reuse' : ({'IPControllerApp' : {'reuse_files' : True}},
'reuse existing json connection files'),
'restore' : ({'IPControllerApp' : {'restore_engines' : True, 'reuse_files' : True}},
'Attempt to restore engines from a JSON file. '
'For use when resuming a crashed controller'),
})
flags.update(session_flags)
aliases = dict(
ssh = 'IPControllerApp.ssh_server',
enginessh = 'IPControllerApp.engine_ssh_server',
location = 'IPControllerApp.location',
url = 'HubFactory.url',
ip = 'HubFactory.ip',
transport = 'HubFactory.transport',
port = 'HubFactory.regport',
ping = 'HeartMonitor.period',
scheme = 'TaskScheduler.scheme_name',
hwm = 'TaskScheduler.hwm',
)
aliases.update(base_aliases)
aliases.update(session_aliases)
class IPControllerApp(BaseParallelApplication):
name = u'ipcontroller'
description = _description
examples = _examples
config_file_name = Unicode(default_config_file_name)
classes = [ProfileDir, Session, HubFactory, TaskScheduler, HeartMonitor, SQLiteDB] + maybe_mongo
# change default to True
auto_create = Bool(True, config=True,
help="""Whether to create profile dir if it doesn't exist.""")
reuse_files = Bool(False, config=True,
help="""Whether to reuse existing json connection files.
If False, connection files will be removed on a clean exit.
"""
)
restore_engines = Bool(False, config=True,
help="""Reload engine state from JSON file
"""
)
ssh_server = Unicode(u'', config=True,
help="""ssh url for clients to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
engine_ssh_server = Unicode(u'', config=True,
help="""ssh url for engines to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
location = Unicode(u'', config=True,
help="""The external IP or domain name of the Controller, used for disambiguating
engine and client connections.""",
)
import_statements = List([], config=True,
help="import statements to be run at startup. Necessary in some environments"
)
use_threads = Bool(False, config=True,
help='Use threads instead of processes for the schedulers',
)
engine_json_file = Unicode('ipcontroller-engine.json', config=True,
help="JSON filename where engine connection info will be stored.")
client_json_file = Unicode('ipcontroller-client.json', config=True,
help="JSON filename where client connection info will be stored.")
def _cluster_id_changed(self, name, old, new):
super(IPControllerApp, self)._cluster_id_changed(name, old, new)
self.engine_json_file = "%s-engine.json" % self.name
self.client_json_file = "%s-client.json" % self.name
# internal
children = List()
mq_class = Unicode('zmq.devices.ProcessMonitoredQueue')
def _use_threads_changed(self, name, old, new):
self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process')
write_connection_files = Bool(True,
help="""Whether to write connection files to disk.
True in all cases other than runs with `reuse_files=True` *after the first*
"""
)
aliases = Dict(aliases)
flags = Dict(flags)
def save_connection_dict(self, fname, cdict):
"""save a connection dict to json file."""
c = self.config
url = cdict['registration']
location = cdict['location']
if not location:
try:
location = socket.gethostbyname_ex(socket.gethostname())[2][-1]
except (socket.gaierror, IndexError):
self.log.warn("Could not identify this machine's IP, assuming 127.0.0.1."
" You may need to specify '--location=<external_ip_address>' to help"
" IPython decide when to connect via loopback.")
location = '127.0.0.1'
cdict['location'] = location
fname = os.path.join(self.profile_dir.security_dir, fname)
self.log.info("writing connection info to %s", fname)
with open(fname, 'w') as f:
f.write(json.dumps(cdict, indent=2))
os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR)
def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
self.log.debug("loading config from JSON")
# load engine config
fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
ecfg = json.loads(f.read())
# json gives unicode, Session.key wants bytes
c.Session.key = ecfg['exec_key'].encode('ascii')
xport,ip = ecfg['interface'].split('://')
c.HubFactory.engine_ip = ip
c.HubFactory.engine_transport = xport
self.location = ecfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = ecfg['ssh']
# load client config
fname = os.path.join(self.profile_dir.security_dir, self.client_json_file)
self.log.info("loading connection info from %s", fname)
with open(fname) as f:
ccfg = json.loads(f.read())
for key in ('exec_key', 'registration', 'pack', 'unpack'):
assert ccfg[key] == ecfg[key], "mismatch between engine and client info: %r" % key
xport,addr = ccfg['interface'].split('://')
c.HubFactory.client_transport = xport
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = ccfg['ssh']
# load port config:
c.HubFactory.regport = ecfg['registration']
c.HubFactory.hb = (ecfg['hb_ping'], ecfg['hb_pong'])
c.HubFactory.control = (ccfg['control'], ecfg['control'])
c.HubFactory.mux = (ccfg['mux'], ecfg['mux'])
c.HubFactory.task = (ccfg['task'], ecfg['task'])
c.HubFactory.iopub = (ccfg['iopub'], ecfg['iopub'])
c.HubFactory.notifier_port = ccfg['notification']
def cleanup_connection_files(self):
if self.reuse_files:
self.log.debug("leaving JSON connection files for reuse")
return
self.log.debug("cleaning up JSON connection files")
for f in (self.client_json_file, self.engine_json_file):
f = os.path.join(self.profile_dir.security_dir, f)
try:
os.remove(f)
except Exception as e:
self.log.error("Failed to cleanup connection file: %s", e)
else:
self.log.debug(u"removed %s", f)
def load_secondary_config(self):
"""secondary config, loading from JSON and setting defaults"""
if self.reuse_files:
try:
self.load_config_from_json()
except (AssertionError,IOError) as e:
self.log.error("Could not load config from JSON: %s" % e)
else:
# successfully loaded config from JSON, and reuse=True
# no need to wite back the same file
self.write_connection_files = False
# switch Session.key default to secure
default_secure(self.config)
self.log.debug("Config changed")
self.log.debug(repr(self.config))
def init_hub(self):
c = self.config
self.do_import_statements()
try:
self.factory = HubFactory(config=c, log=self.log)
# self.start_logging()
self.factory.init_hub()
except TraitError:
raise
except Exception:
self.log.error("Couldn't construct the Controller", exc_info=True)
self.exit(1)
if self.write_connection_files:
# save to new json config files
f = self.factory
base = {
'exec_key' : f.session.key.decode('ascii'),
'location' : self.location,
'pack' : f.session.packer,
'unpack' : f.session.unpacker,
}
cdict = {'ssh' : self.ssh_server}
cdict.update(f.client_info)
cdict.update(base)
self.save_connection_dict(self.client_json_file, cdict)
edict = {'ssh' : self.engine_ssh_server}
edict.update(f.engine_info)
edict.update(base)
self.save_connection_dict(self.engine_json_file, edict)
fname = "engines%s.json" % self.cluster_id
self.factory.hub.engine_state_file = os.path.join(self.profile_dir.log_dir, fname)
if self.restore_engines:
self.factory.hub._load_engine_state()
def init_schedulers(self):
children = self.children
mq = import_item(str(self.mq_class))
f = self.factory
ident = f.session.bsession
# disambiguate url, in case of *
monitor_url = disambiguate_url(f.monitor_url)
# maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url
# IOPub relay (in a Process)
q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A',b'iopub')
q.bind_in(f.client_url('iopub'))
q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub")
q.bind_out(f.engine_url('iopub'))
q.setsockopt_out(zmq.SUBSCRIBE, b'')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
# Multiplexer Queue (in a Process)
q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out')
q.bind_in(f.client_url('mux'))
q.setsockopt_in(zmq.IDENTITY, b'mux_in')
q.bind_out(f.engine_url('mux'))
q.setsockopt_out(zmq.IDENTITY, b'mux_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
# Control Queue (in a Process)
q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol')
q.bind_in(f.client_url('control'))
q.setsockopt_in(zmq.IDENTITY, b'control_in')
q.bind_out(f.engine_url('control'))
q.setsockopt_out(zmq.IDENTITY, b'control_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
try:
scheme = self.config.TaskScheduler.scheme_name
except AttributeError:
scheme = TaskScheduler.scheme_name.get_default_value()
# Task Queue (in a Process)
if scheme == 'pure':
self.log.warn("task::using pure DEALER Task scheduler")
q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask')
# q.setsockopt_out(zmq.HWM, hub.hwm)
q.bind_in(f.client_url('task'))
q.setsockopt_in(zmq.IDENTITY, b'task_in')
q.bind_out(f.engine_url('task'))
q.setsockopt_out(zmq.IDENTITY, b'task_out')
q.connect_mon(monitor_url)
q.daemon=True
children.append(q)
elif scheme == 'none':
self.log.warn("task::using no Task scheduler")
else:
self.log.info("task::using Python %s Task scheduler"%scheme)
sargs = (f.client_url('task'), f.engine_url('task'),
monitor_url, disambiguate_url(f.client_url('notification')),
disambiguate_url(f.client_url('registration')),
)
kwargs = dict(logname='scheduler', loglevel=self.log_level,
log_url = self.log_url, config=dict(self.config))
if 'Process' in self.mq_class:
# run the Python scheduler in a Process
q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
q.daemon=True
children.append(q)
else:
# single-threaded Controller
kwargs['in_thread'] = True
launch_scheduler(*sargs, **kwargs)
def terminate_children(self):
child_procs = []
for child in self.children:
if isinstance(child, ProcessMonitoredQueue):
child_procs.append(child.launcher)
elif isinstance(child, Process):
child_procs.append(child)
if child_procs:
self.log.critical("terminating children...")
for child in child_procs:
try:
child.terminate()
except OSError:
# already dead
pass
def handle_signal(self, sig, frame):
self.log.critical("Received signal %i, shutting down", sig)
self.terminate_children()
self.loop.stop()
def init_signal(self):
for sig in (SIGINT, SIGABRT, SIGTERM):
signal(sig, self.handle_signal)
def do_import_statements(self):
statements = self.import_statements
for s in statements:
try:
self.log.msg("Executing statement: '%s'" % s)
exec s in globals(), locals()
except:
self.log.msg("Error running statement: %s" % s)
def forward_logging(self):
if self.log_url:
self.log.info("Forwarding logging to %s"%self.log_url)
context = zmq.Context.instance()
lsock = context.socket(zmq.PUB)
lsock.connect(self.log_url)
handler = PUBHandler(lsock)
handler.root_topic = 'controller'
handler.setLevel(self.log_level)
self.log.addHandler(handler)
@catch_config_error
def initialize(self, argv=None):
super(IPControllerApp, self).initialize(argv)
self.forward_logging()
self.load_secondary_config()
self.init_hub()
self.init_schedulers()
def start(self):
# Start the subprocesses:
self.factory.start()
# children must be started before signals are setup,
# otherwise signal-handling will fire multiple times
for child in self.children:
child.start()
self.init_signal()
self.write_pid_file(overwrite=True)
try:
self.factory.loop.start()
except KeyboardInterrupt:
self.log.critical("Interrupted, Exiting...\n")
finally:
self.cleanup_connection_files()
def launch_new_instance():
"""Create and run the IPython controller"""
if sys.platform == 'win32':
# make sure we don't get called from a multiprocessing subprocess
# this can result in infinite Controllers being started on Windows
# which doesn't have a proper fork, so multiprocessing is wonky
# this only comes up when IPython has been installed using vanilla
# setuptools, and *not* distribute.
import multiprocessing
p = multiprocessing.current_process()
# the main process has name 'MainProcess'
# subprocesses will have names like 'Process-1'
if p.name != 'MainProcess':
# we are a subprocess, don't start another Controller!
return
app = IPControllerApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
launch_new_instance()
|
main.py
|
#!/usr/bin/env python3
# @File:main.py
# @Date:2018/5/9
# Author:Cat.1
# 2018/05/20 代码部分重构
# 2018/07/15 重构系统
# 2018/07/29 增加酷狗音乐初步支持
# 2018/08/03 增加百度、酷我音乐初步支持
# 2018/08/25 增加Spotify初步支持
#t
#te
#tes
#test
#testt
#testte
#testtes
import re
import sys
import copy
import redis
import datetime
import threading
import json, time
sys.path.append('..')
from flask_cors import CORS
import project.Config.config
from project.Log import Logger
from project.Library import Error
from project.Helper import bcrypt_hash
from project.Helper import token_admin
from project.Module import ReturnStatus
from project.Module import RetDataModule
from flask import render_template,redirect
from flask import Flask,request,Response,jsonify
from project.Sync.NeteasySync import Neteasymusic_Sync
from project.Scrawl.QQMusic import QQMusic as qq_scrawl
from project.Scrawl.KugouMusic import kugou as kugou_scrawl
from project.Scrawl.KuwoMusic import KuwoMusic as kuwo_scrawl
from project.Scrawl.MiguMusic import MiguMusic as migu_scrawl
# from project.Scrawl.SpotifyMusic import SpotifyMusic as spotify
from project.Scrawl.BaiduMusic import BaiduMusic as baidu_scrawl
from project.Sync.XiamiSync import XiamiMusic as xiami_Song_List
from project.Scrawl.XiamiMusic import XiamiMusic as xiami_scrawl
from project.Scrawl.NeteasyMusic import NeteasyMusic as neteasy_scrawl
from project.Sync.NeteasySync import Hot_Song_List as neteasy_Hot_Song_List
"""
引入json网页框架用于开放api接口
引入json库用于解析前端上传的json文件
引入AES外部文件用于加密网易云音乐的POST数据
引入Scrawl用于各个平台的爬虫
引入config文件用于配置数据库等配置信息
引入Sync文件用不各个平台歌单信息同步
引入Module文件用于规定各个平台返回数据的格式和针对不同状况的错误处理码
引入flask_cors用于运行跨域访问
"""
"""
>>>全局设定开始>>>
"""
re_dict = {}
# 返回参数初始化
logger = Logger.Logger('/var/log/Listen-now/Listen-now.log',level='info')
# 初始化日志函数,返回等级为info及以上
if int(project.Config.config.getConfig("open_database", "redis")) == 1:
host = project.Config.config.getConfig("database", "dbhost")
port = project.Config.config.getConfig("database", "dbport")
_redis = redis.Redis(host=host, port=int(port), decode_responses=True, db=6)
# 连接token认证数据库
# sp = spotify.Spotify(2) # 必要的全局变量 参数是保持的待用驱动数 一个驱动可以处理一个用户
# 暂时因为spotify平台问题而没有启用
app = Flask(__name__)
# 形成flask实例
CORS(app, resources=r'/*')
# r'/*' 是通配符,让本服务器所有的URL 都允许跨域请求
re_value_, re_value = 0, 0
# token认证返回的参数
"""
>>>全局设定结束>>>
"""
@app.route('/')
def hello_world():
"""
用于测试服务器是否正常工作.
"""
UA = request.headers['User-Agent']
logger.logger.debug("请求测试服务器是否正常工作")
return UA + 'Hello World! Listen-now\'s API Working Cat.1'
# return 'Hello World! Listen-now\'s API Working Cat.1'
def _Return_Error_Post(code, status, detail = "", **kw):
"""
用于向前端反馈错误信息的函数
包括code参数 错误码
status 状态信息
detail 详细信息
组装数据包成json格式返回
"""
RetureContent = {"code": code, "status": status, "detail": detail, "other": kw}
logger.logger.info("向前端返回请求结果" + RetureContent)
return RetureContent
def Simple_Check(token):
# 如果token在redis库中,简单认证则通过。
global re_value
if token != "" or token != None:
print("result = ", _redis.get(str(token[:-5] + '\n' + token[-3:])))
if _redis.get(str(token[:-5] + '\n' + token[-3:])) == None:
re_value = 0
return 0
else:
re_value = 1
return 1
else:
re_value = 0
return 0
def Contorl_Request(token):
global re_value_
check = token_admin.Forbidden()
re_value_ = check.sign_ip(token)
if re_value_["code"] == ReturnStatus.TOKEN_SUCCESS:
re_value_ = 1
return re_value_
elif re_value_["code"] == ReturnStatus.IP_FORBID:
re_value_ = 0
return re_value_
def Test_api(token):
try:
Simple_Check = threading.Thread(target=Simple_Check, args=(token,))
Contorl_Request = threading.Thread(target=Contorl_Request, args=(token,))
# 启动异步线程查询数据
Simple_Check.start()
Contorl_Request.start()
Simple_Check.join()
Contorl_Request.join()
if re_value != 1:
# token 不合法
raise Error.Token_Time_Error()
elif re_value == 1 and re_value_ != 1:
# token 受到频率控制
raise Error.Token_Contorl_Error()
except Error.Token_Time_Error:
return ReturnStatus.TOKEN_ERROR
except Error.Token_Contorl_Error:
return ReturnStatus.TOKEN_FORBED
else:
return 1
@app.route('/search', methods = ['POST', 'GET'])
def search_json():
"""
用于接受各类前端的歌曲名字的api请求
分为POST/GET请求
如果是POST则又分为
三大platform平台不同而调起不同的爬虫脚本
有关更多错误码信息请查阅SDK文档
"""
global re_dict
if request.method == 'POST':
re_dict = {}
data = request.get_data() # 获得json数据包.
try:
dict_data = json.loads(data) # 解析json数据包.
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PSOT_DATA, status="Failed", detail = "ERROR_PSOT_DATA")
try:
music_title = dict_data["title"]
music_platform = dict_data["platform"]
try:
music_page = dict_data["page"]
except:
music_page = 1
# 获得请求的歌曲名字和选择的音乐平台
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
else:
if music_page > 10:
re_dict = _Return_Error_Post(code=ReturnStatus.OVER_MAXPAGE, status="Failed", detail = "OVER_MAXPAGE")
else:
if music_title != '' or music_title != None:
if music_platform == "Neteasemusic":
neteasymusic_id = neteasy_scrawl.Netmusic()
re_dict = neteasymusic_id.pre_response_neteasymusic(music_title, music_page)
elif music_platform == "Xiamimusic":
xiamimusic_search = xiami_scrawl.Search_xiami()
re_dict = xiamimusic_search.search_xiami(music_title, music_page)
elif music_platform == "QQmusic":
qqmusic_search = qq_scrawl.QQMusic()
re_dict = qqmusic_search.search_by_keyword(music_title, music_page)
elif music_platform == "Kugoumusic":
kugou_search = kugou_scrawl.Kugou()
re_dict = kugou_search.Search_List(music_title, music_page)
elif music_platform == "Kuwomusic":
kuwo_search = kuwo_scrawl.KuwoMusic()
re_dict = kuwo_search.Search_List(music_title, music_page)
elif music_platform == "Migumusic":
migu_search = migu_scrawl.Migu()
re_dict = migu_search.search(music_title, music_page)
elif music_platform == "Baidumusic":
baidu_search = baidu_scrawl.BaiduMusic()
re_dict = baidu_search.search_by_keyword(keyword=music_title, page_no=music_page, page_num=10)
else:
logger.logger.warning("用户请求了一个不被支持的平台")
re_dict = _Return_Error_Post(code=ReturnStatus.NO_SUPPORT, status="Failed", detail = "NO_SUPPORT")
else:
logger.logger.warning("用户的请求有参数错误" + dict_data)
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
finally:
if re_dict == "":
re_dict = _Return_Error_Post(code=ReturnStatus.NOT_SAFE, status="Failed", detail = "NOT_SAFE")
elif re_dict == ReturnStatus.NO_EXISTS:
re_dict = _Return_Error_Post(code=ReturnStatus.NO_EXISTS, status="Failed", detail = "NO_EXISTS")
logger.logger.warning("用户的请求不存在。" + dict_data)
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
else:
logger.logger.warning("请求search接口使用了错误的方法")
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_METHOD, status="Failed", detail = "ERROR_METHOD")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/TopSongList', methods = ['POST', "GET"])
def Return_Random_User_Song_List():
"""
用于向前端返回20个热门歌单信息
允许GET、POST任何请求均可
"""
global re_dict
if request.method == "POST":
pass
# 暂时重新修改代码
else:
KugouTopSongList = kugou_scrawl.Kugou()
re_dict = KugouTopSongList.TopSongList()
logger.logger.info("向前端返回的热门歌单列" + re_dict)
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
return response
@app.route('/login', methods = ["POST"])
def login():
"""登录/注册函数
用于用户登录/注册的api接口,
用户登录是用户上传他的账户名,明文密码,若登录成功服务器返回cookies,账户名,状态码
登录失败返回账户名,状态码,状态码可能标示该用户未注册,请求注册的信息,或者是密码错误的信息
注册功能则是请求账户名,明文密码,flag参数为0
Decorators:
app.route
"""
global re_dict
data = request.get_data()
try:
dict_data = json.loads(data)
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PSOT_DATA, status="Failed", detail="ERROR_PSOT_DATA")
if re.findall(r"wechat", request.headers.get("User-Agent")): # 如果判断用户请求是来自微信小程序
try:
user_id = dict_data["open_id"]
passwd = "Wechat_Mini_Program"
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail="ERROR_PARAMS")
else: # 请求来自非小程序端
try:
user_id = dict_data["user_id"]
passwd = dict_data["passwd"]
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail="ERROR_PARAMS")
try:
flag = dict_data["flag"]
except:flag = 1 # 填写flag参数不为零或者不填写为登录的意思,而不是注册,注册请填写参数为0
if flag:
status = bcrypt_hash.Sign_In_Check(user_id, passwd)
if status["code"] == ReturnStatus.USER_SUCCESS_SIGN_IN or status["code"] == ReturnStatus.USER_WECHAT_SIGN:
# 用户登录成功
re_dict = copy.deepcopy(status)
elif status["code"] == ReturnStatus.USER_FAILED_SIGN_IN:
re_dict = copy.deepcopy(status)
elif status["code"] == ReturnStatus.USER_NOT_SIGN_UP:
re_dict = copy.deepcopy(status)
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/get_token', methods=['GET'])
def get_token():
global re_dict
outdate=datetime.datetime.today()
if request.method == "POST":
data = request.get_data()
try:
dict_data = json.loads(data)
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PSOT_DATA, status="Failed", detail="ERROR_PSOT_DATA")
user_id = dict_data["user_id"]
ua = request.headers.get('User-Agent')
ip = request.remote_addr
creat_token = bcrypt_hash.AES_Crypt_Cookies()
Token = creat_token.Creat_Token(1, user_id, ip, ua)
re_dict = {"token_message":str(Token[0]), "signature":str(Token[1])}
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.set_cookie('token', Token[0], expires=outdate)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
logger.logger.info("注册新token" + str(Token[0]))
return response
else:
ua = request.headers.get('User-Agent')
ip = request.remote_addr
creat_token = bcrypt_hash.AES_Crypt_Cookies()
Token = creat_token.Creat_Token(1, "Listen now user", ip, ua)
re_dict = {"token_message":str(Token[0]), "signature":str(Token[1])}
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.set_cookie('token', Token[0], expires=outdate)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
logger.logger.info("注册新token" + str(Token[0]))
return response
@app.route('/exist_token', methods=['POST'])
def exist_token():
global re_dict
outdate=datetime.datetime.today() + datetime.timedelta(days=2)
data = request.get_data()
try:
dict_data = json.loads(data)
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PSOT_DATA, status="Failed", detail="ERROR_PSOT_DATA")
if dict_data["sign_valid"] == 1: # 证明签名有效
try:
user_id = dict_data["user_id"]
except KeyError:
user_id = "Listen now user"
if _redis.get(dict_data["token"]) != None and _redis.get(dict_data["token"]) == user_id:
_redis.set(dict_data["token"], user_id)
if _redis.expire(dict_data["token"], 3600*48):
re_dict = _Return_Error_Post(code=ReturnStatus.TOKEN_IS_EXIST, status="SUCCESS", detail="TOKEN_IS_EXIST")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.set_cookie('token', dict_data["token"], expires=outdate)
else:
re_dict = _Return_Error_Post(code=ReturnStatus.TOKEN_ERROR, status="Failed", detail="TOKEN_ERROR")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
else:
re_dict = _Return_Error_Post(code=ReturnStatus.TOKEN_CREAT_FAILED, status="Failed", detail="TOKEN_CREAT_FAILED")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/user_song_list', methods = ['POST', 'GET'])
def Return_User_Song_List():
"""
处理用户的同步请求
需要的参数为用户在本平台上的uid以及同步的音乐平台id
返回的数据包为被同步的歌单信息,用户uid,状态码等
"""
global re_dict, user_id
user_id = None
data = request.get_data()
try:
dict_data = json.loads(data)
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PSOT_DATA, status="Failed", detail="ERROR_PSOT_DATA")
if re.findall(r"wechat", request.headers.get("User-Agent")):
# 如果判断用户请求是来自微信小程序
try:
user_id = dict_data["open_id"]
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail="ERROR_PARAMS")
else:
try:
user_id = dict_data["user_id"]
except:
re_dict = _Return_Error_Post(code=ReturnStatus.USER_NOT_SIGN_UP, status="Failed", detail="USER_NOT_SIGN_UP")
else:
pass
if user_id != None:
try:
uid = dict_data["uid"]
platform = dict_data["platform"]
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail="ERROR_PARAMS")
else:
if platform == "Neteasemusic":
check_func = Neteasymusic_Sync.Neteasymusic_Sync()
re_dict = check_func.Get_User_List(uid, user_id)
elif platform == "QQmusic":
check_func = qq_scrawl.QQMusic()
re_dict = check_func.Get_User_List(uid, user_id)
if re_dict:
re_dict.update({"code":202, "status":"Success"})
else:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_SEVER, status="Failed", detail="ERROR_SEVER")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/song_list_requests', methods = ['POST', 'GET'])
def Return_User_Song_List_Detail():
"""
用于向前端返回某一个歌单的详细信息(
包括歌单的名称,
歌单id,
每首歌曲id,
歌曲名称,
歌曲演唱者
)
"""
global re_dict
data = request.get_data()
try:
dict_data = json.loads(data)
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PSOT_DATA, status="Failed", detail = "ERROR_PSOT_DATA")
try:
song_list_platform = dict_data["platform"]
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
song_list_id = dict_data["id"]
page = dict_data['page']
if song_list_platform == "Neteasemusic":
return_user_song_list = neteasy_Hot_Song_List.Hot_Song_List()
re_dict = return_user_song_list.Download_SongList(song_list_id)
elif song_list_platform == "Xiamimusic":
return_song_list = xiami_Song_List.XiamiApi()
re_dict = retrun_song_list.getPlaylist(song_list_id)
elif song_list_platform == "Kugoumusic":
return_user_song_list = kugou_scrawl.Kugou()
re_dict = return_user_song_list.ReturnSongList(song_list_id, page)
else:
re_dict = _Return_Error_Post(code=ReturnStatus.NO_SUPPORT, status="Failed", detail = "NO_SUPPORT")
if re_dict:
logger.logger.info("请求歌单详细数据" + re_dict)
re_dict.update(_Return_Error_Post(code=ReturnStatus.SUCCESS, status="Success", detail="SUCCESS"))
else:
logger.logger.info("请求歌单错误,song_list_id: " + song_list_id, + "platform: " + song_list_platform)
re_dict.update(_Return_Error_Post(code=ReturnStatus.ERROR_SEVER, status="Failed", detail="ERROR_SEVER"))
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/check_user', methods = ['GET','POST'])
def check_user():
re_dict = {}
"""
以GET请求方式请求服务器(参数为user_id),
得到用户想要注册的用户名,
检测用户名是否已经被注册.
如果是返回元祖第一个值为零则表示系统是查询所账户不存在, 可以注册
返回1表示账户存在, 返回2表示账户新注册成功注册(均同时返回新的账户user_id)
flag = 1时表示查询账户是否存在, flag = 0时表示当前账户不存在并且希望注册新账户
value -> 200 表示账户未被注册
-> 201 账户已经被他人注册
-> 202 账户注册成功
"""
if request.method == "POST":
data = request.get_data()
try:
dict_data = json.loads(data)
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PSOT_DATA, status="Failed", detail = "ERROR_PSOT_DATA")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if re.findall(r"wechat", request.headers.get("User-Agent")):
# 如果判断用户请求是来自微信小程序
try:
user_id = dict_data["open_id"]
passwd = "Wechat_Mini_Program"
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail="ERROR_PARAMS")
else:
user_id = dict_data["user_id"]
passwd = dict_data["passwd"]
try:
flag = dict_data["flag"]
except:flag = 1
# 默认flag为登录的意思,而不是注册
if flag:
status = bcrypt_hash.Sign_In_Check(user_id, passwd)
if status["code"] == ReturnStatus.USER_SUCCESS_SIGN_IN or status["code"] == ReturnStatus.USER_WECHAT_SIGN:
# 如果用户登录成功则请求同步歌单
pass
elif request.method == "GET":
pass
else:
re_dict = _Return_Error_Post(code=ReturnStatus.DATABASE_OFF, status="Failed", detail = "DATABASE_OFF")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/id', methods = ['POST', 'GET'])
def play_id():
"""
用于前端请求歌曲id时服务器针对性的反馈方法
基本内容如上.
"""
global re_dict
if request.method == 'POST':
data = request.get_data()
dict_data = json.loads(data)
try:
music_platform = dict_data['platform']
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
else:
if music_platform != '' or music_platform != None:
if music_platform == "Neteasemusic":
neteasymusic_id = neteasy_scrawl.Netmusic()
music_id = dict_data["id"]
re_dict = neteasymusic_id.music_id_requests(music_id)
if re_dict:
re_dict.update({"code":ReturnStatus.SUCCESS, "status":"Success"})
else:
re_dict = _Return_Error_Post(code=ReturnStatus.NO_MUSIC_DETAIL, status="Failed", detail = "NO_MUSIC_DETAIL")
elif music_platform == "Xiamimusic":
try:
music_id = dict_data["id"]
except KeyError:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
else:
re_dict = xiami_scrawl.Search_xiami.id_req(music_id)
if re_dict:
re_dict.update({"code":ReturnStatus.SUCCESS, "status":"Success"})
else:
re_dict = _Return_Error_Post(code=ReturnStatus.OVER_MAXPAGE, status="Failed", detail = "OVER_MAXPAGE")
elif music_platform == "QQmusic":
qqmusic_id = qq_scrawl.QQMusic()
re_dict = qqmusic_id.search_by_id(dict_data["id"])
if re_dict:
re_dict.update({"code":ReturnStatus.SUCCESS, "status":"Success"})
else:
re_dict = _Return_Error_Post(code=ReturnStatus.OVER_MAXPAGE, status="Failed", detail = "OVER_MAXPAGE")
elif music_platform == "Kugoumusic":
kugou = kugou_scrawl.Kugou()
re_dict = kugou.hash_search(dict_data["id"])
if re_dict:
re_dict.update({"code":ReturnStatus.SUCCESS, "status":"Success"})
else:
re_dict = _Return_Error_Post(code=ReturnStatus.OVER_MAXPAGE, status="Failed", detail = "OVER_MAXPAGE")
elif music_platform == "Kuwomusic":
kuwo = kuwo_scrawl.KuwoMusic()
re_dict = kuwo.Search_details(dict_data["id"])
if re_dict:
re_dict.update({"code":ReturnStatus.SUCCESS, "status":"Success"})
else:
re_dict = _Return_Error_Post(code=ReturnStatus.OVER_MAXPAGE, status="Failed", detail = "OVER_MAXPAGE")
elif music_platform == "Migumusic":
migu = migu_scrawl.Migu()
re_dict = migu.search_details(dict_data["id"])
if re_dict:
re_dict.update({"code":ReturnStatus.SUCCESS, "status":"Success"})
else:
re_dict = _Return_Error_Post(code=ReturnStatus.OVER_MAXPAGE, status="Failed", detail = "OVER_MAXPAGE")
elif music_platform == "Baidumusic":
baidu_search = baidu_scrawl.BaiduMusic()
re_dict = baidu_search.search_by_id(song_id=dict_data["id"])
else:
logger.logger.warning("平台不受支持,请求的平台是: " + music_platform)
re_dict = _Return_Error_Post(code=ReturnStatus.NO_SUPPORT, status="Failed", detail = "NO_SUPPORT")
finally:
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
else:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_METHOD, status="Failed", detail = "ERROR_METHOD")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.before_request
def redirect():
if not request.path=='/' and request.path!='/get_token' and request.path!='/exist_token':
try:
token = request.headers['token']
token_value = Test_api(token)
if token_value != 1:
if token_value == ReturnStatus.TOKEN_ERROR:
raise Error.Token_Time_Error()
elif token_value == ReturnStatus.TOKEN_FORBED:
raise Error.Token_Contorl_Error()
except Error.Token_Time_Error:
re_dict = _Return_Error_Post(code=ReturnStatus.TOKEN_ERROR, status="Failed", detail = "remind token")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
except Error.Token_Contorl_Error:
re_dict = _Return_Error_Post(code=ReturnStatus.TOKEN_FORBED, status="Failed", detail = "TOKEN_FORBED")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
except:
re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
response = Response(json.dumps(re_dict), mimetype = 'application/json')
response.headers.add('Server','python flask')
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
else:
pass
# @app.route('/SpotifyLogin', methods=['GET', 'POST']) #登陆 并入以前的登陆接口 加入错误处理逻辑
# def SpotifyLogin():
# if request.method == 'POST':
# data = request.get_data()
# try:
# dict_data = json.loads(data)
# username = dict_data['username']
# password = dict_data['password']
# except KeyError:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
# except:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_UNKNOWN, status="Failed", detail = "ERROR_UNKNOWN")
# else:
# re_dict = sp.login(username, password)
# finally:
# response = Response(json.dumps(re_dict), mimetype='application/json')
# response.headers.add('Server','python flask')
# response.headers['Access-Control-Allow-Origin'] = '*'
# response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
# response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
# return response
# else:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_METHOD, status="Failed", detail = "ERROR_METHOD")
# response = Response(json.dumps(re_dict), mimetype = 'application/json')
# response.headers.add('Server','python flask')
# response.headers['Access-Control-Allow-Origin'] = '*'
# response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
# response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
# return response
# @app.route('/SpotifyGoogle', methods=['GET', 'POST']) #独立的 google 认证 加入错误处理逻辑 具体数据格式我和前端说
# def google():
# if request.method == 'POST':
# data = request.get_data()
# try:
# dict_data = json.loads(data)
# username = dict_data['username']
# method = dict_data['method']
# nums = dict_data['nums']
# except KeyError:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
# except:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_UNKNOWN, status="Failed", detail = "ERROR_UNKNOWN")
# else:
# if method == 'mul_submit':
# re_dict = sp.mul_submit(username=username, nums=nums)
# elif method == 'single_click':
# re_dict = sp.single_click(username=username, num=nums[0])
# elif method == 'submit':
# re_dict = sp.submit(username=username)
# else:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS->MethodParams")
# response = Response(json.dumps(re_dict), mimetype = 'application/json')
# response.headers.add('Server','python flask')
# response.headers['Access-Control-Allow-Origin'] = '*'
# response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
# response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
# return response
# else:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_METHOD, status="Failed", detail = "ERROR_METHOD")
# response = Response(json.dumps(re_dict), mimetype = 'application/json')
# response.headers.add('Server','python flask')
# response.headers['Access-Control-Allow-Origin'] = '*'
# response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
# response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
# return response
# @app.route("/callback/") #这个路由不能改
# def callback():
# """
# 后端的日志功能还不完善,所以暂时不修改异常写入
# """
# try:
# error = request.args['error']
# if error != None:
# return error
# except:
# print('noerror') # 这里表示没有异常
# try:
# code = request.args['code']
# state = request.args['state']
# sp.user_load_token(code, state)
# except:
# return 'error' # 按错误处理
# return 'ok'
# @app.route("/SpotifyLogout") #注销函数 加入错误处理必要逻辑
# def SpotifyLogout():
# try:
# username = request.args['username']
# except KeyError:
# re_dict = _Return_Error_Post(code=ReturnStatus.ERROR_PARAMS, status="Failed", detail = "ERROR_PARAMS")
# else:
# sp.user_login[username] = False
# re_dict = _Return_Error_Post(code=ReturnStatus.SUCCESS, status="SUCCESS", detail = "SUCCESS Logout")
# finally:
# response = Response(json.dumps(re_dict), mimetype = 'application/json')
# response.headers.add('Server','python flask')
# response.headers['Access-Control-Allow-Origin'] = '*'
# response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
# response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
# return response
if __name__ == '__main__':
"""
利用configparser库实现读取配置文件的功能
这里是启用flask的debug模式
"""
host = project.Config.config.getConfig("apptest", "apphost")
port = project.Config.config.getConfig("apptest", "appport")
app.run(host=host, port=int(port), debug = True)
|
sdca_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from threading import Thread
import tensorflow as tf
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import _ShardedMutableDenseHashTable
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SparseFeatureColumn
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3, 10]
_NUM_LOSS_PARTITIONS = [2, 4]
def make_example_proto(feature_dict, target, value=1.0):
e = tf.train.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target': tf.FixedLenFeature(shape=[1],
dtype=tf.float32,
default_value=0),
'age_indices': tf.VarLenFeature(dtype=tf.int64),
'age_values': tf.VarLenFeature(dtype=tf.float32),
'gender_indices': tf.VarLenFeature(dtype=tf.int64),
'gender_values': tf.VarLenFeature(dtype=tf.float32)
}
return tf.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
SparseFeatureColumn(
tf.reshape(
tf.split(1, 2, parsed['age_indices'].indices)[0], [-1]),
tf.reshape(parsed['age_indices'].values, [-1]),
tf.reshape(parsed['age_values'].values, [-1])), SparseFeatureColumn(
tf.reshape(
tf.split(1, 2, parsed['gender_indices'].indices)[0], [-1]),
tf.reshape(parsed['gender_indices'].values, [-1]),
tf.reshape(parsed['gender_values'].values, [-1]))
]
return dict(sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=tf.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_variable_dict(max_age, max_gender):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
age_weights = tf.Variable(tf.zeros([max_age + 1], dtype=tf.float32))
gender_weights = tf.Variable(tf.zeros([max_gender + 1], dtype=tf.float32))
return dict(sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = tf.convert_to_tensor(dense_feature, dtype=tf.float32)
check_shape_op = tf.Assert(
tf.less_equal(tf.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with tf.control_dependencies([check_shape_op]):
dense_tensor = tf.reshape(dense_tensor,
[dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
tf.Variable(
tf.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=tf.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return tf.cast(
tf.greater_equal(predictions, tf.ones_like(predictions) * cutoff),
dtype=tf.int32)
def get_binary_predictions_for_hinge(predictions):
return tf.cast(
tf.greater_equal(predictions, tf.zeros_like(predictions)),
dtype=tf.int32)
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
class SdcaWithLogisticLossTest(SdcaModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testDistributedSimple(self):
# Setup test data
example_protos = [
make_example_proto({'age': [0],
'gender': [0]}, 0),
make_example_proto({'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
for num_loss_partitions in _NUM_LOSS_PARTITIONS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_loss_partitions=num_loss_partitions)
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def Minimize():
with self._single_threaded_test_session():
for _ in range(_MAX_ITERATIONS):
train_op.run()
threads = []
for _ in range(num_loss_partitions):
threads.append(Thread(target=Minimize))
threads[-1].start()
for t in threads:
t.join()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures
# that the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There is neither L1 nor L2 loss, so regularized and unregularized
# losses should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 0),
# Will be used.
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0.1),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [2],
'gender': [0]}, 0),
make_example_proto(
{'age': [3],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.226487 + 0.102902,
unregularized_loss.eval(),
atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
rtol=2e-2,
atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
rtol=2e-2,
atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [0]}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({'age': [0],
'gender': [0]}, 0),
make_example_proto({'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'indices.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
tf.Variable(tf.zeros(
[1], dtype=tf.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
predictions.eval(),
rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0,
lr.approximate_duality_gap().eval(),
atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
# 2 more identical examples
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0, -2.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose([-10 * 40.0 / 41.0, 14.0 * 24 / 25.0],
predictions.eval(),
atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0/3],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
class SdcaWithSmoothHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for smooth hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({'age': [0],
'gender': [0]}, 0),
make_example_proto({'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='smooth_hinge_loss')
model = SdcaModel(examples, variables, options)
tf.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
# an unregularized hinge loss of 0.33 and a 0.11 L2 loss
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
class SparseFeatureColumnTest(SdcaModelTest):
"""Tests for SparseFeatureColumn.
"""
def testBasic(self):
expected_example_indices = [1, 1, 1, 2]
expected_feature_indices = [0, 1, 2, 0]
sfc = SparseFeatureColumn(expected_example_indices,
expected_feature_indices, None)
self.assertTrue(isinstance(sfc.example_indices, tf.Tensor))
self.assertTrue(isinstance(sfc.feature_indices, tf.Tensor))
self.assertEqual(sfc.feature_values, None)
with self._single_threaded_test_session():
self.assertAllEqual(expected_example_indices, sfc.example_indices.eval())
self.assertAllEqual(expected_feature_indices, sfc.feature_indices.eval())
expected_feature_values = [1.0, 2.0, 3.0, 4.0]
sfc = SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0],
expected_feature_values)
with self._single_threaded_test_session():
self.assertAllEqual(expected_feature_values, sfc.feature_values.eval())
class SdcaFprintTest(SdcaModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = tf.constant(['abc', 'very looooooong string', 'def'])
out_data = gen_sdca_ops._sdca_fprint(in_data)
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
[5849691694103072671, -4874542629849009556],
[603227410218889250, 8762207001949257490]],
out_data.eval())
class ShardedMutableDenseHashTableTest(SdcaModelTest):
"""Tests for the _ShardedMutableHashTable class."""
def testShardedMutableHashTable(self):
for num_shards in [1, 3, 10]:
with self._single_threaded_test_session():
default_val = -1
empty_key = 0
keys = tf.constant([11, 12, 13], tf.int64)
values = tf.constant([0, 1, 2], tf.int64)
table = _ShardedMutableDenseHashTable(
tf.int64, tf.int64, default_val, empty_key, num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([11, 12, 14], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
self.assertAllEqual([0, 1, -1], output.eval())
def testShardedMutableHashTableVectors(self):
for num_shards in [1, 3, 10]:
with self._single_threaded_test_session():
default_val = [-0.1, 0.2]
empty_key = [0, 1]
keys = tf.constant([[11, 12], [13, 14], [15, 16]], tf.int64)
values = tf.constant([[0.5, 0.6], [1.5, 1.6], [2.5, 2.6]], tf.float32)
table = _ShardedMutableDenseHashTable(
tf.int64, tf.float32, default_val, empty_key, num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = tf.constant([[11, 12], [13, 14], [11, 14]], tf.int64)
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
self.assertAllClose([[0.5, 0.6], [1.5, 1.6], [-0.1, 0.2]],
output.eval())
def testExportSharded(self):
with self._single_threaded_test_session():
empty_key = -2
default_val = -1
num_shards = 2
keys = tf.constant([10, 11, 12], tf.int64)
values = tf.constant([2, 3, 4], tf.int64)
table = _ShardedMutableDenseHashTable(
tf.int64, tf.int64, default_val, empty_key, num_shards=num_shards)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
keys_list, values_list = table.export_sharded()
self.assertAllEqual(num_shards, len(keys_list))
self.assertAllEqual(num_shards, len(values_list))
# Exported keys include empty key buckets set to the empty_key
self.assertAllEqual(set([-2, 10, 12]), set(keys_list[0].eval().flatten()))
self.assertAllEqual(set([-2, 11]), set(keys_list[1].eval().flatten()))
# Exported values include empty value buckets set to 0
self.assertAllEqual(set([0, 2, 4]), set(values_list[0].eval().flatten()))
self.assertAllEqual(set([0, 3]), set(values_list[1].eval().flatten()))
if __name__ == '__main__':
googletest.main()
|
utils.py
|
"""."""
from threading import Event as _Event
import logging as _log
import sys as _sys
from epics.ca import CAThread as _Thread
from mathphys.functions import save_pickle as _save_pickle, \
load_pickle as _load_pickle
import apsuite.commisslib as _commisslib
class DataBaseClass:
"""."""
def __init__(self, params=None):
"""."""
self.data = dict()
self.params = params
def save_data(self, fname: str, overwrite=False):
"""Save `data` and `params` to pickle file.
Args:
fname (str): name of the pickle file. Extension is not needed.
overwrite (bool, optional): Whether to overwrite existing file.
Defaults to False.
"""
data = dict(params=self.params.to_dict(), data=self.data)
_save_pickle(data, fname, overwrite=overwrite)
def load_and_apply(self, fname: str):
"""Load and apply `data` and `params` from pickle file.
Args:
fname (str): name of the pickle file. Extension is not needed.
"""
data = self.load_data(fname)
self.data = data['data']
params = data['params']
if not isinstance(params, dict):
params = params.to_dict()
self.params.from_dict(params_dict=params)
@staticmethod
def load_data(fname: str):
"""Load and return `data` and `params` from pickle file.
Args:
fname (str): name of the pickle file. Extension is not needed.
Returns:
data (dict): Dictionary with keys: `data` and `params`.
"""
try:
data = _load_pickle(fname)
except ModuleNotFoundError:
_sys.modules['apsuite.commissioning_scripts'] = _commisslib
data = _load_pickle(fname)
return data
class ParamsBaseClass:
"""."""
def to_dict(self):
"""."""
return self.__dict__
def from_dict(self, params_dict):
"""."""
self.__dict__.update(params_dict)
class MeasBaseClass(DataBaseClass):
"""."""
def __init__(self, params=None, isonline=True):
"""."""
super().__init__(params=params)
self.isonline = bool(isonline)
self.devices = dict()
self.analysis = dict()
self.pvs = dict()
@property
def connected(self):
"""."""
conn = all([dev.connected for dev in self.devices.values()])
conn &= all([pv.connected for pv in self.pvs.values()])
return conn
def wait_for_connection(self, timeout=None):
"""."""
obs = list(self.devices.values()) + list(self.pvs.values())
for dev in obs:
if not dev.wait_for_connection(timeout=timeout):
return False
return True
class ThreadedMeasBaseClass(MeasBaseClass):
"""."""
def __init__(self, params=None, target=None, isonline=True):
"""."""
super().__init__(params=params, isonline=isonline)
self._target = target
self._stopevt = _Event()
self._finished = _Event()
self._finished.set()
self._thread = _Thread(target=self._run, daemon=True)
def start(self):
"""."""
if self.ismeasuring:
_log.error('There is another measurement happening.')
return
self._stopevt.clear()
self._finished.clear()
self._thread = _Thread(target=self._run, daemon=True)
self._thread.start()
def stop(self):
"""."""
self._stopevt.set()
@property
def ismeasuring(self):
"""."""
return self._thread.is_alive()
def wait_measurement(self, timeout=None):
"""Wait for measurement to finish."""
return self._finished.wait(timeout=timeout)
def _run(self):
if self._target is not None:
self._target()
self._finished.set()
|
newthreadscheduler.py
|
import time
import logging
import threading
from rx.core import Scheduler, Disposable
from .schedulerbase import SchedulerBase
from .eventloopscheduler import EventLoopScheduler
log = logging.getLogger('Rx')
class NewThreadScheduler(SchedulerBase):
"""Creates an object that schedules each unit of work on a separate thread.
"""
def __init__(self, thread_factory=None):
super(NewThreadScheduler, self).__init__()
def default_factory(target, args=None):
t = threading.Thread(target=target, args=args or [])
t.setDaemon(True)
return t
self.thread_factory = thread_factory or default_factory
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True)
return scheduler.schedule(action, state)
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed after duetime."""
scheduler = EventLoopScheduler(thread_factory=self.thread_factory, exit_if_empty=True)
return scheduler.schedule_relative(duetime, action, state)
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed at duetime."""
return self.schedule_relative(duetime - self.now, action, state=None)
def schedule_periodic(self, period, action, state=None):
"""Schedule a periodic piece of work."""
secs = self.to_relative(period) / 1000.0
disposed = []
s = [state]
def run():
while True:
time.sleep(secs)
if disposed:
return
new_state = action(s[0])
if new_state is not None:
s[0] = new_state
thread = self.thread_factory(run)
thread.start()
def dispose():
disposed.append(True)
return Disposable.create(dispose)
new_thread_scheduler = NewThreadScheduler()
|
test_client.py
|
import os
import pytest
import time
import sys
import logging
import queue
import threading
import _thread
from unittest.mock import patch
import ray.util.client.server.server as ray_client_server
from ray.tests.client_test_utils import create_remote_signal_actor
from ray.tests.client_test_utils import run_wrapped_actor_creation
from ray.util.client.common import ClientObjectRef
from ray.util.client.ray_client_helpers import connect_to_client_or_not
from ray.util.client.ray_client_helpers import ray_start_client_server
from ray._private.client_mode_hook import client_mode_should_convert
from ray._private.client_mode_hook import disable_client_hook
from ray._private.client_mode_hook import enable_client_mode
from ray._private.test_utils import run_string_as_driver
@pytest.mark.parametrize("connect_to_client", [False, True])
def test_client_context_manager(ray_start_regular_shared, connect_to_client):
import ray
with connect_to_client_or_not(connect_to_client):
if connect_to_client:
# Client mode is on.
assert client_mode_should_convert(auto_init=True)
# We're connected to Ray client.
assert ray.util.client.ray.is_connected()
else:
assert not client_mode_should_convert(auto_init=True)
assert not ray.util.client.ray.is_connected()
def test_client_thread_safe(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
return "ok"
class Blocker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
def run(self):
ray.get(block.remote())
b = Blocker()
b.start()
time.sleep(1)
# Can concurrently execute the get.
assert ray.get(fast.remote(), timeout=5) == "ok"
# @pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
# @pytest.mark.skip()
def test_client_mode_hook_thread_safe(ray_start_regular_shared):
with ray_start_client_server():
with enable_client_mode():
assert client_mode_should_convert(auto_init=True)
lock = threading.Lock()
lock.acquire()
q = queue.Queue()
def disable():
with disable_client_hook():
q.put(client_mode_should_convert(auto_init=True))
lock.acquire()
q.put(client_mode_should_convert(auto_init=True))
t = threading.Thread(target=disable)
t.start()
assert client_mode_should_convert(auto_init=True)
lock.release()
t.join()
assert q.get() is False, "Threaded disable_client_hook failed to disable"
assert q.get() is True, "Threaded disable_client_hook failed to re-enable"
def test_interrupt_ray_get(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with ray_start_client_server() as ray:
@ray.remote
def block():
print("blocking run")
time.sleep(99)
@ray.remote
def fast():
print("fast run")
time.sleep(1)
return "ok"
class Interrupt(threading.Thread):
def run(self):
time.sleep(2)
_thread.interrupt_main()
it = Interrupt()
it.start()
with pytest.raises(KeyboardInterrupt):
ray.get(block.remote())
# Assert we can still get new items after the interrupt.
assert ray.get(fast.remote()) == "ok"
def test_get_list(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def f():
return "OK"
assert ray.get([]) == []
assert ray.get([f.remote()]) == ["OK"]
get_count = 0
get_stub = ray.worker.server.GetObject
# ray.get() uses unary-unary RPC. Mock the server handler to count
# the number of requests received.
def get(req, metadata=None):
nonlocal get_count
get_count += 1
return get_stub(req, metadata=metadata)
ray.worker.server.GetObject = get
refs = [f.remote() for _ in range(100)]
assert ray.get(refs) == ["OK" for _ in range(100)]
# Only 1 RPC should be sent.
assert get_count == 1
def test_real_ray_fallback(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def get_nodes_real():
import ray as real_ray
return real_ray.nodes()
nodes = ray.get(get_nodes_real.remote())
assert len(nodes) == 1, nodes
@ray.remote
def get_nodes():
# Can access the full Ray API in remote methods.
return ray.nodes()
nodes = ray.get(get_nodes.remote())
assert len(nodes) == 1, nodes
def test_nested_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
@ray.remote
def f():
return "OK"
return ray.get(f.remote())
assert ray.get(g.remote()) == "OK"
def test_put_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
print(objectref)
retval = ray.get(objectref)
assert retval == "hello world"
# Make sure ray.put(1) == 1 is False and does not raise an exception.
objectref = ray.put(1)
assert not objectref == 1
# Make sure it returns True when necessary as well.
assert objectref == ClientObjectRef(objectref.id)
# Assert output is correct type.
list_put = ray.put([1, 2, 3])
assert isinstance(list_put, ClientObjectRef)
assert ray.get(list_put) == [1, 2, 3]
def test_put_failure_get(ray_start_regular_shared):
with ray_start_client_server() as ray:
class DeSerializationFailure:
def __getstate__(self):
return ""
def __setstate__(self, i):
raise ZeroDivisionError
dsf = DeSerializationFailure()
with pytest.raises(ZeroDivisionError):
ray.put(dsf)
# Ensure Ray Client is still connected
assert ray.get(ray.put(100)) == 100
def test_wait(ray_start_regular_shared):
with ray_start_client_server() as ray:
objectref = ray.put("hello world")
ready, remaining = ray.wait([objectref])
assert remaining == []
retval = ray.get(ready[0])
assert retval == "hello world"
objectref2 = ray.put(5)
ready, remaining = ray.wait([objectref, objectref2])
assert (ready, remaining) == ([objectref], [objectref2]) or (
ready,
remaining,
) == ([objectref2], [objectref])
ready_retval = ray.get(ready[0])
remaining_retval = ray.get(remaining[0])
assert (ready_retval, remaining_retval) == ("hello world", 5) or (
ready_retval,
remaining_retval,
) == (5, "hello world")
with pytest.raises(Exception):
# Reference not in the object store.
ray.wait([ClientObjectRef(b"blabla")])
with pytest.raises(TypeError):
ray.wait("blabla")
with pytest.raises(TypeError):
ray.wait(ClientObjectRef("blabla"))
with pytest.raises(TypeError):
ray.wait(["blabla"])
def test_remote_functions(ray_start_regular_shared):
with ray_start_client_server() as ray:
SignalActor = create_remote_signal_actor(ray)
signaler = SignalActor.remote()
@ray.remote
def plus2(x):
return x + 2
@ray.remote
def fact(x):
print(x, type(fact))
if x <= 0:
return 1
# This hits the "nested tasks" issue
# https://github.com/ray-project/ray/issues/3644
# So we're on the right track!
return ray.get(fact.remote(x - 1)) * x
ref2 = plus2.remote(234)
# `236`
assert ray.get(ref2) == 236
ref3 = fact.remote(20)
# `2432902008176640000`
assert ray.get(ref3) == 2_432_902_008_176_640_000
# Reuse the cached ClientRemoteFunc object
ref4 = fact.remote(5)
assert ray.get(ref4) == 120
# Test ray.wait()
ref5 = fact.remote(10)
# should return ref2, ref3, ref4
res = ray.wait([ref5, ref2, ref3, ref4], num_returns=3)
assert [ref2, ref3, ref4] == res[0]
assert [ref5] == res[1]
assert ray.get(res[0]) == [236, 2_432_902_008_176_640_000, 120]
# should return ref2, ref3, ref4, ref5
res = ray.wait([ref2, ref3, ref4, ref5], num_returns=4)
assert [ref2, ref3, ref4, ref5] == res[0]
assert [] == res[1]
all_vals = ray.get(res[0])
assert all_vals == [236, 2_432_902_008_176_640_000, 120, 3628800]
# Timeout 0 on ray.wait leads to immediate return
# (not indefinite wait for first return as with timeout None):
unready_ref = signaler.wait.remote()
res = ray.wait([unready_ref], timeout=0)
# Not ready.
assert res[0] == [] and len(res[1]) == 1
ray.get(signaler.send.remote())
ready_ref = signaler.wait.remote()
# Ready.
res = ray.wait([ready_ref], timeout=10)
assert len(res[0]) == 1 and res[1] == []
def test_function_calling_function(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
def g():
return "OK"
@ray.remote
def f():
print(f, g)
return ray.get(g.remote())
print(f, type(f))
assert ray.get(f.remote()) == "OK"
def test_basic_actor(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote
class HelloActor:
def __init__(self):
self.count = 0
def say_hello(self, whom):
self.count += 1
return "Hello " + whom, self.count
@ray.method(num_returns=2)
def say_hi(self, whom):
self.count += 1
return "Hi " + whom, self.count
actor = HelloActor.remote()
s, count = ray.get(actor.say_hello.remote("you"))
assert s == "Hello you"
assert count == 1
ref = actor.say_hello.remote("world")
s, count = ray.get(ref)
assert s == "Hello world"
assert count == 2
r1, r2 = actor.say_hi.remote("ray")
assert ray.get(r1) == "Hi ray"
assert ray.get(r2) == 3
def test_pass_handles(ray_start_regular_shared):
"""Test that passing client handles to actors and functions to remote actors
in functions (on the server or raylet side) works transparently to the
caller.
"""
with ray_start_client_server() as ray:
@ray.remote
class ExecActor:
def exec(self, f, x):
return ray.get(f.remote(x))
def exec_exec(self, actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def fact(x):
out = 1
while x > 0:
out = out * x
x -= 1
return out
@ray.remote
def func_exec(f, x):
return ray.get(f.remote(x))
@ray.remote
def func_actor_exec(actor, f, x):
return ray.get(actor.exec.remote(f, x))
@ray.remote
def sneaky_func_exec(obj, x):
return ray.get(obj["f"].remote(x))
@ray.remote
def sneaky_actor_exec(obj, x):
return ray.get(obj["actor"].exec.remote(obj["f"], x))
def local_fact(x):
if x <= 0:
return 1
return x * local_fact(x - 1)
assert ray.get(fact.remote(7)) == local_fact(7)
assert ray.get(func_exec.remote(fact, 8)) == local_fact(8)
test_obj = {}
test_obj["f"] = fact
assert ray.get(sneaky_func_exec.remote(test_obj, 5)) == local_fact(5)
actor_handle = ExecActor.remote()
assert ray.get(actor_handle.exec.remote(fact, 7)) == local_fact(7)
assert ray.get(func_actor_exec.remote(actor_handle, fact, 10)) == local_fact(10)
second_actor = ExecActor.remote()
assert ray.get(
actor_handle.exec_exec.remote(second_actor, fact, 9)
) == local_fact(9)
test_actor_obj = {}
test_actor_obj["actor"] = second_actor
test_actor_obj["f"] = fact
assert ray.get(sneaky_actor_exec.remote(test_actor_obj, 4)) == local_fact(4)
def test_basic_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.log = test_log
ray.worker.log_client.set_logstream_level(logging.DEBUG)
# Allow some time to propogate
time.sleep(1)
x = ray.put("Foo")
assert ray.get(x) == "Foo"
time.sleep(1)
logs_with_id = [msg for msg in log_msgs if msg.find(x.id.hex()) >= 0]
assert len(logs_with_id) >= 2, logs_with_id
assert any((msg.find("get") >= 0 for msg in logs_with_id)), logs_with_id
assert any((msg.find("put") >= 0 for msg in logs_with_id)), logs_with_id
def test_stdout_log_stream(ray_start_regular_shared):
with ray_start_client_server() as ray:
log_msgs = []
def test_log(level, msg):
log_msgs.append(msg)
ray.worker.log_client.stdstream = test_log
@ray.remote
def print_on_stderr_and_stdout(s):
print(s)
print(s, file=sys.stderr)
time.sleep(1)
print_on_stderr_and_stdout.remote("Hello world")
time.sleep(1)
num_hello = 0
for msg in log_msgs:
if "Hello world" in msg:
num_hello += 1
assert num_hello == 2, f"Invalid logs: {log_msgs}"
def test_serializing_exceptions(ray_start_regular_shared):
with ray_start_client_server() as ray:
with pytest.raises(ValueError, match="Failed to look up actor with name 'abc'"):
ray.get_actor("abc")
def test_invalid_task(ray_start_regular_shared):
with ray_start_client_server() as ray:
@ray.remote(runtime_env="invalid value")
def f():
return 1
# No exception on making the remote call.
ref = f.remote()
# Exception during scheduling will be raised on ray.get()
with pytest.raises(Exception):
ray.get(ref)
def test_create_remote_before_start(ray_start_regular_shared):
"""Creates remote objects (as though in a library) before
starting the client.
"""
from ray.util.client import ray
@ray.remote
class Returner:
def doit(self):
return "foo"
@ray.remote
def f(x):
return x + 20
# Prints in verbose tests
print("Created remote functions")
with ray_start_client_server() as ray:
assert ray.get(f.remote(3)) == 23
a = Returner.remote()
assert ray.get(a.doit.remote()) == "foo"
def test_basic_named_actor(ray_start_regular_shared):
"""Test that ray.get_actor() can create and return a detached actor."""
with ray_start_client_server() as ray:
@ray.remote
class Accumulator:
def __init__(self):
self.x = 0
def inc(self):
self.x += 1
def get(self):
return self.x
@ray.method(num_returns=2)
def half(self):
return self.x / 2, self.x / 2
# Create the actor
actor = Accumulator.options(name="test_acc").remote()
actor.inc.remote()
actor.inc.remote()
# Make sure the get_actor call works
new_actor = ray.get_actor("test_acc")
new_actor.inc.remote()
assert ray.get(new_actor.get.remote()) == 3
del actor
actor = Accumulator.options(name="test_acc2", lifetime="detached").remote()
actor.inc.remote()
del actor
detatched_actor = ray.get_actor("test_acc2")
for i in range(5):
detatched_actor.inc.remote()
assert ray.get(detatched_actor.get.remote()) == 6
h1, h2 = ray.get(detatched_actor.half.remote())
assert h1 == 3
assert h2 == 3
def test_error_serialization(ray_start_regular_shared):
"""Test that errors will be serialized properly."""
fake_path = os.path.join(os.path.dirname(__file__), "not_a_real_file")
with pytest.raises(FileNotFoundError):
with ray_start_client_server() as ray:
@ray.remote
def g():
with open(fake_path, "r") as f:
f.read()
# Raises a FileNotFoundError
ray.get(g.remote())
def test_internal_kv(ray_start_regular_shared):
with ray_start_client_server() as ray:
assert ray._internal_kv_initialized()
assert not ray._internal_kv_put("apple", "b")
assert ray._internal_kv_put("apple", "asdf")
assert ray._internal_kv_put("apple", "b")
assert ray._internal_kv_get("apple") == b"b"
assert ray._internal_kv_put("apple", "asdf", overwrite=True)
assert ray._internal_kv_get("apple") == b"asdf"
assert ray._internal_kv_list("a") == [b"apple"]
ray._internal_kv_del("apple")
assert ray._internal_kv_get("apple") == b""
def test_startup_retry(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
with pytest.raises(ConnectionError):
ray_client.connect("localhost:50051", connection_retries=1)
def run_client():
ray_client.connect("localhost:50051")
ray_client.disconnect()
thread = threading.Thread(target=run_client, daemon=True)
thread.start()
time.sleep(3)
server = ray_client_server.serve("localhost:50051")
thread.join()
server.stop(0)
ray_client._inside_client_test = False
def test_dataclient_server_drop(ray_start_regular_shared):
from ray.util.client import ray as ray_client
ray_client._inside_client_test = True
@ray_client.remote
def f(x):
time.sleep(4)
return x
def stop_server(server):
time.sleep(2)
server.stop(0)
server = ray_client_server.serve("localhost:50051")
ray_client.connect("localhost:50051")
thread = threading.Thread(target=stop_server, args=(server,))
thread.start()
x = f.remote(2)
with pytest.raises(ConnectionError):
_ = ray_client.get(x)
thread.join()
ray_client.disconnect()
ray_client._inside_client_test = False
# Wait for f(x) to finish before ray.shutdown() in the fixture
time.sleep(3)
@patch.dict(os.environ, {"RAY_ENABLE_AUTO_CONNECT": "0"})
def test_client_gpu_ids(call_ray_stop_only):
import ray
ray.init(num_cpus=2)
with enable_client_mode():
# No client connection.
with pytest.raises(Exception) as e:
ray.get_gpu_ids()
assert (
str(e.value) == "Ray Client is not connected."
" Please connect by calling `ray.init`."
)
with ray_start_client_server():
# Now have a client connection.
assert ray.get_gpu_ids() == []
def test_client_serialize_addon(call_ray_stop_only):
import ray
import pydantic
ray.init(num_cpus=0)
class User(pydantic.BaseModel):
name: str
with ray_start_client_server() as ray:
assert ray.get(ray.put(User(name="ray"))).name == "ray"
object_ref_cleanup_script = """
import ray
ray.init("ray://localhost:50051")
@ray.remote
def f():
return 42
@ray.remote
class SomeClass:
pass
obj_ref = f.remote()
actor_ref = SomeClass.remote()
"""
def test_object_ref_cleanup():
# Checks no error output when running the script in
# object_ref_cleanup_script
# See https://github.com/ray-project/ray/issues/17968 for details
with ray_start_client_server():
result = run_string_as_driver(object_ref_cleanup_script)
assert "Error in sys.excepthook:" not in result
assert "AttributeError: 'NoneType' object has no " not in result
assert "Exception ignored in" not in result
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25552 --port 0"],
indirect=True,
)
def test_wrapped_actor_creation(call_ray_start):
"""
When the client schedules an actor, the server will load a separate
copy of the actor class if it's defined in a separate file. This
means that modifications to the client's copy of the actor class
aren't propagated to the server. Currently, tracing logic modifies
the signatures of actor methods to pass around metadata when ray.remote
is applied to an actor class. However, if a user does something like:
class SomeActor:
def __init__(self):
pass
def decorate_actor():
RemoteActor = ray.remote(SomeActor)
...
Then the SomeActor class will have its signatures modified on the client
side, but not on the server side, since ray.remote was applied inside of
the function instead of directly on the actor. Note if it were directly
applied to the actor then the signature would be modified when the server
imports the class.
"""
import ray
ray.init("ray://localhost:25552")
run_wrapped_actor_creation()
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25553 --num-cpus 0"],
indirect=True,
)
@pytest.mark.parametrize("use_client", [True, False])
def test_init_requires_no_resources(call_ray_start, use_client):
import ray
if use_client:
address = call_ray_start
ray.init(address)
else:
ray.init("ray://localhost:25553")
@ray.remote(num_cpus=0)
def f():
pass
ray.get(f.remote())
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
run_capture_image.py
|
import io
import socket
import struct
import time
import datetime
import picamera
import logging
import socketserver
import threading
from threading import Thread
from threading import Condition
from http import server
date_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
PAGE="""\
<html>
<head>
<title>picamera MJPEG streaming demo</title>
</head>
<body>
<h1>PiCamera MJPEG Streaming Demo</h1>
<img src="stream.mjpg" width="640" height="480" />
</body>
</html>
"""
class StreamingOutput(object):
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
elif self.path == '/index.html':
content = PAGE.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/stream.mjpg':
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(frame))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning(
'Removed streaming client %s: %s',
self.client_address, str(e))
else:
self.send_error(404)
self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
with picamera.PiCamera() as camera:
#camera.resolution = (1920, 1080)
camera.resolution = (640, 480)
camera.framerate = 2
camera.rotation = 180
#camera.exposure_mode = 'snow'
camera.shutter_speed = 10000
address = ('', 8000)
server = StreamingServer(address, StreamingHandler)
server_thread = Thread(target=server.serve_forever)
output = StreamingOutput()
camera.start_recording(output, format='mjpeg')
try:
server_thread.start()
while True:
time.sleep(2)
camera.capture_sequence([
date_time + '.jpg'
], use_video_port=True, splitter_port=2)
continue
finally:
camera.stop_recording()
|
mdns_example_test.py
|
import os
import re
import socket
import struct
import subprocess
import time
from threading import Event, Thread
import dpkt
import dpkt.dns
import ttfw_idf
from tiny_test_fw import DUT
stop_mdns_server = Event()
esp_answered = Event()
def get_dns_query_for_esp(esp_host):
dns = dpkt.dns.DNS(b'\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01')
dns.qd[0].name = esp_host + u'.local'
print('Created query for esp host: {} '.format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns(tester_host):
dns = dpkt.dns.DNS(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_A
arr.name = tester_host
arr.ip = socket.inet_aton('127.0.0.1')
dns. an.append(arr)
print('Created answer to mdns query: {} '.format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns_lwip(tester_host, id):
dns = dpkt.dns.DNS(b'\x5e\x39\x84\x00\x00\x01\x00\x01\x00\x00\x00\x00\x0a\x64\x61\x76\x69\x64'
b'\x2d\x63\x6f\x6d\x70\x05\x6c\x6f\x63\x61\x6c\x00\x00\x01\x00\x01\xc0\x0c'
b'\x00\x01\x00\x01\x00\x00\x00\x0a\x00\x04\xc0\xa8\x0a\x6c')
dns.qd[0].name = tester_host
dns.an[0].name = tester_host
dns.an[0].ip = socket.inet_aton('127.0.0.1')
dns.an[0].rdata = socket.inet_aton('127.0.0.1')
dns.id = id
print('Created answer to mdns (lwip) query: {} '.format(dns.__repr__()))
return dns.pack()
def mdns_server(esp_host):
global esp_answered
UDP_IP = '0.0.0.0'
UDP_PORT = 5353
MCAST_GRP = '224.0.0.251'
TESTER_NAME = u'tinytester.local'
TESTER_NAME_LWIP = u'tinytester-lwip.local'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((UDP_IP,UDP_PORT))
mreq = struct.pack('4sl', socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(30)
while not stop_mdns_server.is_set():
try:
if not esp_answered.is_set():
sock.sendto(get_dns_query_for_esp(esp_host), (MCAST_GRP,UDP_PORT))
time.sleep(0.2)
data, addr = sock.recvfrom(1024)
dns = dpkt.dns.DNS(data)
if len(dns.qd) > 0 and dns.qd[0].type == dpkt.dns.DNS_A:
if dns.qd[0].name == TESTER_NAME:
print('Received query: {} '.format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns(TESTER_NAME), (MCAST_GRP,UDP_PORT))
elif dns.qd[0].name == TESTER_NAME_LWIP:
print('Received query: {} '.format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns_lwip(TESTER_NAME_LWIP, dns.id), addr)
if len(dns.an) > 0 and dns.an[0].type == dpkt.dns.DNS_A:
if dns.an[0].name == esp_host + u'.local':
print('Received answer to esp32-mdns query: {}'.format(dns.__repr__()))
esp_answered.set()
except socket.timeout:
break
except dpkt.UnpackError:
continue
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_mdns(env, extra_data):
global stop_mdns_server
"""
steps: |
1. join AP + init mdns example
2. get the dut host name (and IP address)
3. check the mdns name is accessible
4. check DUT output if mdns advertized host is resolved
"""
dut1 = env.get_dut('mdns-test', 'examples/protocols/mdns', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'mdns-test.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('mdns-test_bin_size', '{}KB'.format(bin_size // 1024))
# 1. start mdns application
dut1.start_app()
# 2. get the dut host name (and IP address)
specific_host = dut1.expect(re.compile(r'mdns hostname set to: \[([^\]]+)\]'), timeout=30)
specific_host = str(specific_host[0])
thread1 = Thread(target=mdns_server, args=(specific_host,))
thread1.start()
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)[0]
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
stop_mdns_server.set()
thread1.join()
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
try:
# 3. check the mdns name is accessible
if not esp_answered.wait(timeout=30):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
# 4. check DUT output if mdns advertized host is resolved
dut1.expect(re.compile(r'mdns-test: Query A: tinytester.local resolved to: 127.0.0.1'), timeout=30)
dut1.expect(re.compile(r'mdns-test: gethostbyname: tinytester-lwip.local resolved to: 127.0.0.1'), timeout=30)
dut1.expect(re.compile(r'mdns-test: getaddrinfo: tinytester-lwip.local resolved to: 127.0.0.1'), timeout=30)
# 5. check the DUT answers to `dig` command
dig_output = subprocess.check_output(['dig', '+short', '-p', '5353', '@224.0.0.251',
'{}.local'.format(specific_host)])
print('Resolving {} using "dig" succeeded with:\n{}'.format(specific_host, dig_output))
if not ip_address.encode('utf-8') in dig_output:
raise ValueError('Test has failed: Incorrectly resolved DUT hostname using dig'
"Output should've contained DUT's IP address:{}".format(ip_address))
finally:
stop_mdns_server.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mdns()
|
event.py
|
# encoding: UTF-8
import logging
from threading import Thread
from queue import Queue, Empty
from .threads import ReStartableThread
class EventManager:
'''
以 key 为键标识每个事件,对一个 key 可以添加或移除多个监听器。
发布事件时传入 key 和数据 data ,后者将会传给监听器。
可指定 logger 记录日志,否则默认使用 __name__ 的 logger 。
'''
def __init__(self, logger=None, eventBlockTime=0.1):
self._eventQueue = Queue()
self._listenThread = ReStartableThread(target=self._run)
# 保存对应的事件的响应函数,每个键对应一个集合
self._map = {}
self.eventBlockTime = eventBlockTime
self.logger = logger if logger else logging.getLogger(__name__)
self.setDaemon(True)
def _run(self, stop_event):
while not stop_event.is_set():
try:
event = self._eventQueue.get(block=True, timeout=self.eventBlockTime)
if stop_event.is_set():
return
self.logger.info('process event. key=%s' %
(str(event[0])))
self._process(event)
except Empty:
pass
def _process(self, event):
if event[0] in self._map:
for listener in self._map[event[0]]:
def sub(listener=listener):
try:
listener(event[0], *event[1], **event[2])
except Exception as e:
self.logger.warn('listener exception. listener=%s, exception=%s, event_key=%s' %
(str(listener), str(e), str(event[0])))
raise
thread = Thread(target=sub)
thread.setDaemon(self._daemon)
thread.start()
def setDaemon(self, daemon):
'''修改处理函数是否为守护线程'''
self._daemon = True if daemon else False
self.logger.info('daemon set to ' + str(self._daemon))
def start(self):
'''开始事件监听。'''
self._listenThread.start()
self.logger.info('listen start')
def stop(self):
'''停止事件监听。'''
self._listenThread.stop()
self._listenThread.join(1)
self.logger.info('listen stop')
def clear(self):
'''清空事件。'''
self._eventQueue.queue.clear()
def addListener(self, key, listener):
self.logger.info('add listener. key=%s, listener=%s' %
(str(key), str(listener)))
self._map.setdefault(key, set()).add(listener)
def removeListener(self, key, listener):
try:
self._map[key].remove(listener)
self.logger.info('removed listener. key=%s, listener=%s' % (
str(key), str(listener)))
except KeyError:
pass
def sendEvent(self, key, *args, **kwargs):
'''发布事件。以 key 标识;data 为数据,将会传给监听器。'''
self.logger.info('send event. key=%s' %
(str(key)))
self._eventQueue.put((key, args, kwargs))
__all__ = ['EventManager']
|
mnist_to_mr.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mnist convert tool for MindRecord
"""
from importlib import import_module
import os
import time
import gzip
import numpy as np
from mindspore import log as logger
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
try:
cv2 = import_module("cv2")
except ModuleNotFoundError:
cv2 = None
__all__ = ['MnistToMR']
class MnistToMR:
"""
A class to transform from Mnist to MindRecord.
Args:
source (str): Directory that contains t10k-images-idx3-ubyte.gz,
train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz
and train-labels-idx1-ubyte.gz.
destination (str): MindRecord file path to transform into, ensure that no file with the same name
exists in the directory.
partition_number (int, optional): The partition size. Default: 1.
Raises:
ValueError: If `source`, `destination`, `partition_number` is invalid.
"""
def __init__(self, source, destination, partition_number=1):
self.image_size = 28
self.num_channels = 1
check_filename(source)
self.source = source
self.train_data_filename_ = os.path.join(self.source, 'train-images-idx3-ubyte.gz')
self.train_labels_filename_ = os.path.join(self.source, 'train-labels-idx1-ubyte.gz')
self.test_data_filename_ = os.path.join(self.source, 't10k-images-idx3-ubyte.gz')
self.test_labels_filename_ = os.path.join(self.source, 't10k-labels-idx1-ubyte.gz')
check_filename(self.train_data_filename_)
check_filename(self.train_labels_filename_)
check_filename(self.test_data_filename_)
check_filename(self.test_labels_filename_)
check_filename(destination)
if partition_number is not None:
if not isinstance(partition_number, int):
raise ValueError("The parameter partition_number must be int")
self.partition_number = partition_number
else:
raise ValueError("The parameter partition_number must be int")
self.writer_train = FileWriter("{}_train.mindrecord".format(destination), self.partition_number)
self.writer_test = FileWriter("{}_test.mindrecord".format(destination), self.partition_number)
self.mnist_schema_json = {"label": {"type": "int64"}, "data": {"type": "bytes"}}
def _extract_images(self, filename):
"""Extract the images into a 4D tensor [image index, y, x, channels]."""
real_file_path = os.path.realpath(filename)
with gzip.open(real_file_path) as bytestream:
bytestream.read(16)
buf = bytestream.read()
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(-1, self.image_size, self.image_size, self.num_channels)
return data
def _extract_labels(self, filename):
"""Extract the labels into a vector of int64 label IDs."""
real_file_path = os.path.realpath(filename)
with gzip.open(real_file_path) as bytestream:
bytestream.read(8)
buf = bytestream.read()
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def _mnist_train_iterator(self):
"""
get data from mnist train data and label file.
Yields:
data (dict of list): mnist data list which contains dict.
"""
train_data = self._extract_images(self.train_data_filename_)
train_labels = self._extract_labels(self.train_labels_filename_)
for data, label in zip(train_data, train_labels):
_, img = cv2.imencode(".jpeg", data)
yield {"label": int(label), "data": img.tobytes()}
def _mnist_test_iterator(self):
"""
get data from mnist test data and label file.
Yields:
data (dict of list): mnist data list which contains dict.
"""
test_data = self._extract_images(self.test_data_filename_)
test_labels = self._extract_labels(self.test_labels_filename_)
for data, label in zip(test_data, test_labels):
_, img = cv2.imencode(".jpeg", data)
yield {"label": int(label), "data": img.tobytes()}
def _transform_train(self):
"""
Execute transformation from Mnist train part to MindRecord.
Returns:
MSRStatus, whether successfully written into MindRecord.
"""
t0_total = time.time()
logger.info("transformed MindRecord schema is: {}".format(self.mnist_schema_json))
# set the header size
self.writer_train.set_header_size(1 << 24)
# set the page size
self.writer_train.set_page_size(1 << 26)
# create the schema
self.writer_train.add_schema(self.mnist_schema_json, "mnist_schema")
# add the index
self.writer_train.add_index(["label"])
train_iter = self._mnist_train_iterator()
batch_size = 256
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(train_iter.__next__())
transform_count += 1
self.writer_train.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
self.writer_train.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
break
ret = self.writer_train.commit()
t1_total = time.time()
logger.info("--------------------------------------------")
logger.info("Total time [train]: {}".format(t1_total - t0_total))
logger.info("--------------------------------------------")
return ret
def _transform_test(self):
"""
Execute transformation from Mnist test part to MindRecord.
Returns:
MSRStatus, whether Mnist is successfully transformed to MindRecord.
"""
t0_total = time.time()
logger.info("transformed MindRecord schema is: {}".format(self.mnist_schema_json))
# set the header size
self.writer_test.set_header_size(1 << 24)
# set the page size
self.writer_test.set_page_size(1 << 26)
# create the schema
self.writer_test.add_schema(self.mnist_schema_json, "mnist_schema")
# add the index
self.writer_test.add_index(["label"])
train_iter = self._mnist_test_iterator()
batch_size = 256
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(train_iter.__next__())
transform_count += 1
self.writer_test.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
self.writer_test.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
break
ret = self.writer_test.commit()
t1_total = time.time()
logger.info("--------------------------------------------")
logger.info("Total time [test]: {}".format(t1_total - t0_total))
logger.info("--------------------------------------------")
return ret
def run(self):
"""
Execute transformation from Mnist to MindRecord.
Returns:
MSRStatus, SUCCESS or FAILED.
"""
if not cv2:
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
if self._transform_train() == FAILED:
return FAILED
if self._transform_test() == FAILED:
return FAILED
return SUCCESS
def transform(self):
"""
Encapsulate the run function to exit normally.
Returns:
MSRStatus, SUCCESS or FAILED.
"""
t = ExceptionThread(target=self.run)
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
|
chat_client_class.py
|
import time
import socket
import select
import sys
from chat_utils import *
import client_state_machine as csm
import threading
class Client:
def __init__(self):
self.peer = ''
self.console_input = []
self.state = S_OFFLINE
self.system_msg = ''
self.local_msg = ''
self.peer_msg = ''
def quit(self):
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
def get_name(self):
return self.name
def init_chat(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
# if len(argv) > 1, we assume they're giving an IP address to connect to
# else, use the localhost as defined in chat_utils.py
if len(sys.argv) > 1:
alt_IP = sys.argv[-1]
alt_SERVER = (alt_IP, CHAT_PORT)
self.socket.connect(alt_SERVER)
else:
self.socket.connect(SERVER)
self.sm = csm.ClientSM(self.socket)
reading_thread = threading.Thread(target=self.read_input)
reading_thread.daemon = True
reading_thread.start()
def shutdown_chat(self):
return
def send(self, msg):
mysend(self.socket, msg)
def recv(self):
return myrecv(self.socket)
def get_msgs(self):
read, write, error = select.select([self.socket], [], [], 0)
my_msg = ''
peer_msg = []
peer_code = M_UNDEF
if len(self.console_input) > 0:
my_msg = self.console_input.pop(0)
if self.socket in read:
peer_msg = self.recv()
peer_code = peer_msg[0]
peer_msg = peer_msg[1:]
return my_msg, peer_code, peer_msg
def output(self):
if len(self.system_msg) > 0:
print(self.system_msg)
self.system_msg = ''
def login(self):
my_msg, peer_code, peer_msg = self.get_msgs()
if len(my_msg) > 0:
self.name = my_msg
msg = M_LOGIN + self.name
self.send(msg)
response = self.recv()
if response == M_LOGIN+'ok':
self.state = S_LOGGEDIN
self.sm.set_state(S_LOGGEDIN)
self.sm.set_myname(self.name)
self.print_instructions()
return (True)
elif response == M_LOGIN + 'duplicate':
self.system_msg += 'Duplicate username, try again'
return False
else: # fix: dup is only one of the reasons
return(False)
def read_input(self):
while True: # uncomment the below for a stress test
# if self.state == S_CHATTING:
# text = 'adfadsfafd' + self.name
# time.sleep(2)
# else:
text = sys.stdin.readline()[:-1]
self.console_input.append(text) # no need for lock, append is thread safe
def print_instructions(self):
self.system_msg += menu
def run_chat(self):
self.init_chat()
self.system_msg += 'Welcome to ICS chat\n'
self.system_msg += 'Please enter your name: '
self.output()
while self.login() != True:
self.output()
self.system_msg += 'Welcome, ' + self.get_name() + '!'
self.output()
while self.sm.get_state() != S_OFFLINE:
self.proc()
self.output()
time.sleep(CHAT_WAIT)
self.quit()
#==============================================================================
# main processing loop
#==============================================================================
def proc(self):
my_msg, peer_code, peer_msg = self.get_msgs()
self.system_msg += self.sm.proc(my_msg, peer_code, peer_msg)
|
wake.py
|
"""Wake word support."""
import json
import os
import re
import shutil
import struct
import subprocess
import threading
import time
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Type
from rhasspy.actor import RhasspyActor
from rhasspy.events import (
AudioData,
ListenForWakeWord,
MqttMessage,
MqttSubscribe,
PauseListeningForWakeWord,
ResumeListeningForWakeWord,
StartStreaming,
StopListeningForWakeWord,
StopStreaming,
WakeWordDetected,
WakeWordNotDetected,
)
from rhasspy.utils import read_dict
# -----------------------------------------------------------------------------
def get_wake_class(system: str) -> Type[RhasspyActor]:
"""Get type for profile wake system."""
assert system in [
"dummy",
"pocketsphinx",
"hermes",
"snowboy",
"precise",
"porcupine",
"command",
], f"Invalid wake system: {system}"
if system == "pocketsphinx":
# Use pocketsphinx locally
return PocketsphinxWakeListener
if system == "hermes":
# Use remote system via MQTT
return HermesWakeListener
if system == "snowboy":
# Use snowboy locally
return SnowboyWakeListener
if system == "precise":
# Use Mycroft Precise locally
return PreciseWakeListener
if system == "porcupine":
# Use Picovoice's porcupine locally
return PorcupineWakeListener
if system == "command":
# Use command-line listener
return CommandWakeListener
# Use dummy listener as a fallback
return DummyWakeListener
# -----------------------------------------------------------------------------
class DummyWakeListener(RhasspyActor):
"""Does nothing"""
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
pass
# -----------------------------------------------------------------------------
# Pocketsphinx based wake word listener
# https://github.com/cmusphinx/pocketsphinx
# -----------------------------------------------------------------------------,
class PocketsphinxWakeListener(RhasspyActor):
"""Listens for a wake word with pocketsphinx."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.decoder = None
self.decoder_started: bool = False
self.preload = False
self.not_detected = False
self.chunk_size = 960
self.recorder: Optional[RhasspyActor] = None
self.threshold = 0.0
self.keyphrase = ""
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.pocketsphinx.chunk_size", 960)
if self.preload:
with self._lock:
try:
self.load_decoder()
except Exception:
self._logger.exception("loading wake decoder")
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
self.load_decoder()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
if not self.decoder_started:
assert self.decoder is not None
self.decoder.start_utt()
self.decoder_started = True
audio_data = message.data
chunk = audio_data[: self.chunk_size]
detected = False
while chunk:
result = self.process_data(chunk)
if result is not None:
detected = True
self._logger.debug("Hotword detected (%s)", self.keyphrase)
detected_msg = WakeWordDetected(
self.keyphrase, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, detected_msg)
break
audio_data = audio_data[self.chunk_size :]
chunk = audio_data[: self.chunk_size]
# End utterance
if detected and self.decoder_started:
assert self.decoder is not None
self.decoder.end_utt()
self.decoder_started = False
if not detected and self.not_detected:
# Report non-detection
not_detected_msg = WakeWordNotDetected(
self.keyphrase, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_msg)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
# End utterance
if self.decoder_started:
assert self.decoder is not None
self.decoder.end_utt()
self.decoder_started = False
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -------------------------------------------------------------------------
def process_data(self, data: bytes) -> Optional[str]:
"""Process single chunk of audio."""
assert self.decoder is not None
self.decoder.process_raw(data, False, False)
hyp = self.decoder.hyp()
if hyp:
if self.decoder_started:
self.decoder.end_utt()
self.decoder_started = False
return hyp.hypstr
return None
# -------------------------------------------------------------------------
def load_decoder(self) -> None:
"""Loads speech decoder if not cached."""
if self.decoder is None:
import pocketsphinx
# Load decoder settings (use speech-to-text configuration as a fallback)
hmm_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.acoustic_model", None)
or self.profile.get("speech_to_text.pocketsphinx.acoustic_model")
)
dict_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.dictionary", None)
or self.profile.get("speech_to_text.pocketsphinx.dictionary")
)
self.threshold = float(
self.profile.get("wake.pocketsphinx.threshold", 1e-40)
)
self.keyphrase = self.profile.get("wake.pocketsphinx.keyphrase", "")
assert self.keyphrase, "No wake keyphrase"
# Fix casing
dict_casing = self.profile.get("speech_to_text.dictionary_casing", "")
if dict_casing == "lower":
self.keyphrase = self.keyphrase.lower()
elif dict_casing == "upper":
self.keyphrase = self.keyphrase.upper()
# Verify that keyphrase words are in dictionary
keyphrase_words = re.split(r"\s+", self.keyphrase)
with open(dict_path, "r") as dict_file:
word_dict = read_dict(dict_file)
for word in keyphrase_words:
if word not in word_dict:
self._logger.warning("%s not in dictionary", word)
self._logger.debug(
"Loading wake decoder with hmm=%s, dict=%s", hmm_path, dict_path
)
decoder_config = pocketsphinx.Decoder.default_config()
decoder_config.set_string("-hmm", hmm_path)
decoder_config.set_string("-dict", dict_path)
decoder_config.set_string("-keyphrase", self.keyphrase)
decoder_config.set_string("-logfn", "/dev/null")
decoder_config.set_float("-kws_threshold", self.threshold)
mllr_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.mllr_matrix")
)
if os.path.exists(mllr_path):
self._logger.debug(
"Using tuned MLLR matrix for acoustic model: %s", mllr_path
)
decoder_config.set_string("-mllr", mllr_path)
self.decoder = pocketsphinx.Decoder(decoder_config)
self.decoder_started = False
# -----------------------------------------------------------------------------
# Snowboy wake listener
# https://snowboy.kitt.ai
# -----------------------------------------------------------------------------
class SnowboyWakeListener(RhasspyActor):
"""Listen for wake word with snowboy."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.detectors: List[Any] = []
self.preload = False
self.not_detected = False
self.chunk_size = 960
self.recorder: Optional[RhasspyActor] = None
self.apply_frontend = False
self.models: Dict[str, Any] = {}
self.model_names: List[str] = []
self.single_detection: bool = True
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.snowboy.chunk_size", 960)
self.single_detection = self.profile.get("wake.snowboy.single_detection", True)
if self.preload:
try:
self.load_detectors()
except Exception as e:
self._logger.warning("preload: %s", e)
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_detectors()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("in_loaded")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
audio_data = message.data
chunk = audio_data[: self.chunk_size]
detected = []
while chunk:
for detector_index, result_index in enumerate(self.process_data(chunk)):
if result_index > 0:
detected.append(detector_index)
if detected:
# Don't process the rest of the audio data if hotword has
# already been detected.
break
audio_data = audio_data[self.chunk_size :]
chunk = audio_data[: self.chunk_size]
# Handle results
if detected:
# Detected
detected_names = [self.model_names[i] for i in detected]
self._logger.debug("Hotword(s) detected: %s", detected_names)
# Send events
for model_name in detected_names:
detected_event = WakeWordDetected(
model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, detected_event)
if self.single_detection:
# Only allow for a single hotword to be detected
break
elif self.not_detected:
# Not detected
for model_name in self.model_names:
not_detected_event = WakeWordNotDetected(
model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_event)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -------------------------------------------------------------------------
def process_data(self, data: bytes) -> Iterable[int]:
"""Process single chunk of audio data."""
try:
for detector in self.detectors:
# Return is:
# -2 silence
# -1 error
# 0 voice
# n index n-1
yield detector.RunDetection(data)
except Exception:
self._logger.exception("process_data")
# All silences
return [-2] * len(self.detectors)
# -------------------------------------------------------------------------
def load_detectors(self) -> None:
"""Load snowboy detector."""
if not self.detectors:
from snowboy import snowboydetect, snowboydecoder
# Load model names and settings
self.models = self._parse_models()
self.model_names = sorted(self.models)
# Create snowboy detectors
for model_name in self.model_names:
model_settings = self.models[model_name]
model_path = Path(self.profile.read_path(model_name))
assert model_path.is_file(), f"Missing {model_path}"
self._logger.debug("Loading snowboy model from %s", model_path)
detector = snowboydetect.SnowboyDetect(
snowboydecoder.RESOURCE_FILE.encode(), str(model_path).encode()
)
detector.SetSensitivity(str(model_settings["sensitivity"]).encode())
detector.SetAudioGain(float(model_settings["audio_gain"]))
detector.ApplyFrontend(bool(model_settings["apply_frontend"]))
self.detectors.append(detector)
self._logger.debug(
"Loaded snowboy model %s (%s)", model_name, model_settings
)
# -------------------------------------------------------------------------
def _parse_models(self) -> Dict[str, Dict[str, Any]]:
# Default sensitivity
sensitivity: str = str(self.profile.get("wake.snowboy.sensitivity", "0.5"))
# Default audio gain
audio_gain: float = float(self.profile.get("wake.snowboy.audio_gain", "1.0"))
# Default frontend
apply_frontend: bool = self.profile.get("wake.snowboy.apply_frontend", False)
model_names: List[str] = self.profile.get(
"wake.snowboy.model", "snowboy/snowboy.umdl"
).split(",")
model_settings: Dict[str, Dict[str, Any]] = self.profile.get(
"wake.snowboy.model_settings", {}
)
models_dict = {}
for model_name in model_names:
# Add default settings
settings = model_settings.get(model_name, {})
if "sensitivity" not in settings:
settings["sensitivity"] = sensitivity
if "audio_gain" not in settings:
settings["audio_gain"] = audio_gain
if "apply_frontend" not in settings:
settings["apply_frontend"] = apply_frontend
models_dict[model_name] = settings
return models_dict
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
try:
# pylint: disable=W0611
from snowboy import snowboydetect, snowboydecoder # noqa: F401
except Exception:
problems[
"snowboy not installed"
] = "The snowboy Python library is not installed. Try pip3 install snowboy"
# Verify that all snowboy models exist
models = self._parse_models()
model_paths = [
Path(self.profile.read_path(model_name)) for model_name in models
]
for model_path in model_paths:
if not model_path.is_file():
problems[
"Missing model"
] = f"Snowboy model could not be loaded from {model_path}"
return problems
# -----------------------------------------------------------------------------
# Mycroft Precise wake listener
# https://github.com/MycroftAI/mycroft-precise
# -----------------------------------------------------------------------------
class PreciseWakeListener(RhasspyActor):
"""Listens for a wake word using Mycroft Precise."""
def __init__(self) -> None:
# pylint: disable=E0401
from precise_runner import ReadWriteStream
RhasspyActor.__init__(self)
self.audio_buffer: bytes = bytes()
self.audio_info: Dict[Any, Any] = {}
self.chunk_delay = 0
self.chunk_size = 2048
self.detected: bool = False
self.engine = None
self.engine_path = ""
self.model_name = ""
self.model_path = ""
self.prediction_sem = threading.Semaphore()
self.preload = False
self.receivers: List[RhasspyActor] = []
self.recorder: Optional[RhasspyActor] = None
self.runner = None
self.send_not_detected = False
self.stream: Optional[ReadWriteStream] = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.send_not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.precise.chunk_size", 2048)
self.chunk_delay = self.profile.get("wake.precise.chunk_delay", 0)
if self.preload:
try:
self.load_runner()
except Exception:
pass
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_runner()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("in_loaded")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
try:
if isinstance(message, AudioData):
self.audio_info = message.info
self.detected = False
self.audio_buffer += message.data
num_chunks = len(self.audio_buffer) // self.chunk_size
if num_chunks > 0:
assert self.stream is not None
self.prediction_sem = threading.Semaphore()
for _ in range(num_chunks):
chunk = self.audio_buffer[: self.chunk_size]
if chunk:
self.stream.write(chunk)
self.audio_buffer = self.audio_buffer[self.chunk_size :]
if self.send_not_detected:
# Wait for all chunks to finish processing
for _ in range(num_chunks):
self.prediction_sem.acquire(timeout=0.1)
# Wait a little bit for the precise engine to finish processing
time.sleep(self.chunk_delay)
if not self.detected:
# Not detected
not_detected_event = WakeWordNotDetected(
self.model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_event)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, str):
# Detected
self._logger.debug("Hotword detected (%s)", self.model_name)
detected_event = WakeWordDetected(
self.model_name, audio_data_info=self.audio_info
)
for receiver in self.receivers:
self.send(receiver, detected_event)
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
except Exception:
self._logger.exception("in_listening")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
def to_stopped(self, from_state: str) -> None:
"""Transition to stopped state."""
self.stream = None
if self.runner is not None:
self.runner.stop()
# -------------------------------------------------------------------------
def load_runner(self) -> None:
"""Load precise runner."""
if self.engine is None:
# pylint: disable=E0401
from precise_runner import PreciseEngine
self.model_name = self.profile.get("wake.precise.model", "hey-mycroft-2.pb")
self.model_path = self.profile.read_path(self.model_name)
self.engine_path = os.path.expandvars(
self.profile.get("wake.precise.engine_path", "precise-engine")
)
self._logger.debug("Loading Precise engine at %s", self.engine_path)
self.engine = PreciseEngine(
self.engine_path, self.model_path, chunk_size=self.chunk_size
)
if self.runner is None:
# pylint: disable=E0401
from precise_runner import PreciseRunner, ReadWriteStream
self.stream = ReadWriteStream()
sensitivity = float(self.profile.get("wake.precise.sensitivity", 0.5))
trigger_level = int(self.profile.get("wake.precise.trigger_level", 3))
def on_prediction(prob: float) -> None:
self.prediction_sem.release()
def on_activation() -> None:
self.detected = True
self.send(self.myAddress, "activated")
self.runner = PreciseRunner(
self.engine,
stream=self.stream,
sensitivity=sensitivity,
trigger_level=trigger_level,
on_activation=on_activation,
on_prediction=on_prediction,
)
assert self.runner is not None
self.runner.start()
self._logger.debug(
"Loaded Mycroft Precise (model=%s, sensitivity=%s, trigger_level=%s)",
self.model_path,
sensitivity,
trigger_level,
)
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
try:
# pylint: disable=E0401,W0611
from precise_runner import PreciseRunner, ReadWriteStream # noqa: F401
except Exception:
problems[
"precise_runner not installed"
] = "The precise_runner Python library is not installed. Try pip3 install precise_runner"
engine_path = os.path.expandvars(
self.profile.get("wake.precise.engine_path", "precise-engine")
)
if not os.path.exists(engine_path) and not shutil.which(engine_path):
problems[
"Missing precise-engine"
] = 'The Mycroft Precise engine is not installed. Follow the <a href="https://github.com/MycroftAI/mycroft-precise#binary-install">binary install instructions</a>.'
model_name = self.profile.get("wake.precise.model", "hey-mycroft-2.pb")
model_path = self.profile.read_path(model_name)
if not os.path.exists(model_path):
problems[
"Missing model"
] = f"Your Mycroft Precise model could not be loaded from {model_path}"
return problems
# -----------------------------------------------------------------------------
# MQTT-based wake listener (Hermes protocol)
# https://docs.snips.ai/reference/hermes
# -----------------------------------------------------------------------------
class HermesWakeListener(RhasspyActor):
"""Listens for a wake word using MQTT."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.site_ids = "default"
self.wakeword_id = "default"
self.wake_topic = ""
self.mqtt: Optional[RhasspyActor] = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.mqtt = self.config["mqtt"]
# Subscribe to wake topic
self.site_ids = self.profile.get("mqtt.site_id", "default").split(",")
self.wakeword_id = self.profile.get("wake.hermes.wakeword_id", "default")
self.wake_topic = f"hermes/hotword/{self.wakeword_id}/detected"
self.send(self.mqtt, MqttSubscribe(self.wake_topic))
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
self.receivers.append(message.receiver or sender)
self.transition("listening")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, MqttMessage):
if message.topic == self.wake_topic:
# Check site ID
payload = json.loads(message.payload.decode())
payload_site_id = payload.get("siteId", "")
if payload_site_id not in self.site_ids:
self._logger.debug(
"Got detected message, but wrong site id (%s)", payload_site_id
)
return
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", self.wakeword_id)
result = WakeWordDetected(self.wakeword_id)
for receiver in self.receivers:
self.send(receiver, result)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -----------------------------------------------------------------------------
# Porcupine Wake Listener
# https://github.com/Picovoice/Porcupine
# -----------------------------------------------------------------------------
class PorcupineWakeListener(RhasspyActor):
"""Wake word listener that uses picovoice's porcupine library"""
def __init__(self):
RhasspyActor.__init__(self)
self.audio_buffer: bytes = bytes()
self.chunk_format = ""
self.chunk_size = 1024
self.handle = None
self.keyword_paths: List[Path] = []
self.library_path = ""
self.model_path = ""
self.preload: bool = False
self.receivers: List[RhasspyActor] = []
self.recorder: Optional[RhasspyActor] = None
self.sensitivities = []
self.wake_proc = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.library_path = self.profile.read_path(
self.profile.get(
"wake.porcupine.library_path", "porcupine/libpv_porcupine.so"
)
)
self.model_path = self.profile.read_path(
self.profile.get(
"wake.porcupine.model_path", "porcupine/porcupine_params.pv"
)
)
self.keyword_paths = [
Path(self.profile.read_path(p))
for p in self.profile.get(
"wake.porcupine.keyword_path", "porcupine/porcupine.ppn"
).split(",")
]
self.sensitivities = [
float(s)
for s in str(self.profile.get("wake.porcupine.sensitivity", "0.5")).split(
","
)
]
self.preload = self.config.get("preload", False)
if self.preload:
try:
self.load_handle()
except Exception:
self._logger.exception("loading wake handle")
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_handle()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("loading wake handle")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
self.audio_buffer += message.data
num_chunks = len(self.audio_buffer) // self.chunk_size
if num_chunks > 0:
assert self.handle is not None
for _ in range(num_chunks):
chunk = self.audio_buffer[: self.chunk_size]
unpacked_chunk = struct.unpack_from(self.chunk_format, chunk)
self.audio_buffer = self.audio_buffer[self.chunk_size :]
# Process chunk
keyword_index = self.handle.process(unpacked_chunk)
if keyword_index:
if len(self.keyword_paths) == 1:
keyword_index = 0
wakeword_name = str(keyword_index)
if keyword_index < len(self.keyword_paths):
wakeword_name = self.keyword_paths[keyword_index].stem
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", keyword_index)
result = WakeWordDetected(wakeword_name)
for receiver in self.receivers:
self.send(receiver, result)
elif isinstance(message, WakeWordDetected):
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", message.name)
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, WakeWordNotDetected):
# Pass downstream to receivers
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
if self.handle is not None:
self.handle.delete()
self.handle = None
self.transition("started")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
def load_handle(self):
"""Load porcupine library."""
if self.handle is None:
for kw_path in self.keyword_paths:
assert kw_path.is_file(), f"Missing {kw_path}"
from porcupine import Porcupine
self.handle = Porcupine(
self.library_path,
self.model_path,
keyword_file_paths=[str(p) for p in self.keyword_paths],
sensitivities=self.sensitivities,
)
# 16-bit
self.chunk_size = self.handle.frame_length * 2
self.chunk_format = "h" * self.handle.frame_length
self._logger.debug(
"Loaded porcupine (keyword=%s). Expecting sample rate=%s, frame length=%s",
self.keyword_paths,
self.handle.sample_rate,
self.handle.frame_length,
)
# -----------------------------------------------------------------------------
# Command Wake Listener
# -----------------------------------------------------------------------------
class CommandWakeListener(RhasspyActor):
"""Command-line based wake word listener"""
def __init__(self):
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.wake_proc = None
self.command: List[str] = []
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
program = os.path.expandvars(self.profile.get("wake.command.program"))
arguments = [
os.path.expandvars(str(a))
for a in self.profile.get("wake.command.arguments", [])
]
self.command = [program] + arguments
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForWakeWord):
self.receivers.append(message.receiver or sender)
self.wake_proc = subprocess.Popen(self.command, stdout=subprocess.PIPE)
def post_result() -> None:
# STDOUT -> text
try:
out, _ = self.wake_proc.communicate()
wakeword_id = out.decode().strip()
except Exception:
wakeword_id = ""
self._logger.exception("post_result")
# Actor will forward
if wakeword_id:
self.send(self.myAddress, WakeWordDetected(wakeword_id))
else:
self.send(self.myAddress, WakeWordNotDetected(wakeword_id))
self.transition("listening")
# Wait for program in a separate thread
threading.Thread(target=post_result, daemon=True).start()
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, WakeWordDetected):
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", message.name)
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, WakeWordNotDetected):
# Pass downstream to receivers
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if not self.receivers:
if self.wake_proc is not None:
self.wake_proc.terminate()
self.transition("started")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, eosio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="led"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission eosio@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
cam.py
|
"""Wrapper around two RPI and the multi channel switch.
It relies on the use of the Multi_Adapter_Board_2Channel_uc444 for switching camera via the I2C and GPIO control.
See https://github.com/ArduCAM/RaspberryPi/tree/master/Multi_Camera_Adapter/Multi_Adapter_Board_2Channel_uc444
"""
import time
import cv2 as cv
from threading import Thread, Event, Lock
from ..error import CameraNotFoundError
class BackgroundVideoCapture(object):
"""Wrapper on OpenCV VideoCapture object.
Args:
camera_index (int): index of the used camera (see OpenCV doc for details)
resolution (int, int): desired resolution for the grabbed frame (the resolution must be compatible with the driver)
Instantiating this object will automatically start the polling of image in background.
This wrapper is reponsible for automatically polling image on the camera.
This ensures that we can always access the most recent image.
"""
def __init__(self, camera_index, resolution=(600, 800), lazy_setup=True):
"""Open video capture on the specified camera."""
self.camera_index = camera_index
self.resolution = resolution
if not lazy_setup:
self._setup()
def _setup(self):
self.cap = cv.VideoCapture(self.camera_index)
if not self.cap.isOpened():
raise CameraNotFoundError(
message=f'Camera {self.camera_index} not found!',
camera_id=self.camera_index,
)
self.cap.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc('M', 'J', 'P', 'G'))
self.cap.set(cv.CAP_PROP_FRAME_HEIGHT, self.resolution[0])
self.cap.set(cv.CAP_PROP_FRAME_WIDTH, self.resolution[1])
self._lock = Lock()
self.running = Event()
self._img = None
self._t = Thread(target=self._read_loop)
self._t.daemon = True
self._t.start()
for _ in range(50):
time.sleep(0.1)
if self._img is not None:
break
def close(self):
"""Stop polling image and release the Video Capture."""
self.running.clear()
if self._t.is_alive():
self._t.join()
self.cap.release()
def _read_loop(self):
self.running.set()
while self.running.is_set():
b, img = self.cap.read()
if b:
with self._lock:
self._img = img.copy()
def read(self):
"""Retrieve the last grabbed image."""
if not hasattr(self, 'cap'):
self._setup()
with self._lock:
return self._img is not None, self._img
|
camera.py
|
import cv2,io,base64
from PIL import Image
import threading
from time import sleep
from imutils.video import WebcamVideoStream
import numpy as np
import cv2
#import PIL.Image
import time
#from pygame import mixer
#from tkinker import *
#from PIL import ImageTk, Image
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import dlib
import numpy as np
from facePoints import image_score
from facePoints import predict
from engineio.payload import Payload
Payload.max_decode_packets = 100
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("t.dat")
class VideoCamera(object):
def __init__(self):
self.smiling = False
self.history = [0,0,0,0,0,0,0]
self.total = 0
self.smframes = 0
self.vid = cv2.VideoCapture(0)
self.show_vector = True
#added above
self.to_process = []
self.output_image_rgb = []
self.output_image_bgr = []
thread = threading.Thread(target=self.keep_processing, args=())
thread.daemon = True
thread.start()
def process_one(self):
if not self.to_process:
return
input_str = self.to_process.pop(0)
imgdata = base64.b64decode(input_str)
input_img = np.array(Image.open(io.BytesIO(imgdata)))
"""
After getting the image you can do any preprocessing here
"""
#_______________________________________Performing some pre processing_______________________________________________
bgr_image = self.process_frame(input_img)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB) # Changing color from bgr to rgb
#______________________________________________________________________________________________________________________
ret,rgb_jpeg = cv2.imencode('.jpg',rgb_image)
_,bgr_jpeg = cv2.imencode('.jpg',bgr_image)
self.output_image_rgb.append(rgb_jpeg.tobytes())
self.output_image_bgr.append(bgr_jpeg.tobytes())
def keep_processing(self):
while True:
self.process_one()
sleep(0.01)
def enqueue_input(self, input):
self.to_process.append(input)
def get_frame(self):
while not self.output_image_rgb:
sleep(0.05)
return self.output_image_rgb.pop(0) , self.output_image_bgr.pop(0)
def gen_panel(self,h,w):
frame = np.full((h, w, 3), 255, np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
if self.total != 0:
cv2.putText(frame, str(int(100*(self.smframes/self.total)))+"%", (0,int(w/2)), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
if self.smiling:
cv2.circle(frame, (int(h/2), int(w/2)), 25, (0, 255, 0), -1)
else:
cv2.circle(frame, (int(h/2), int(w/2)), 25, (255, 0, 0), -1)
return frame
def process_frame(self, frame):
frame = imutils.resize(frame, width=400)
shape = image_score(frame)
smiling = False
if np.ndim(shape) != 0:
# self.is_smiling.set("Status: Not Smiling")
if predict(frame):
# self.is_smiling.set("Status: Smiling")
smiling = True
self.smframes = self.smframes + 1
self.smiling = smiling
#sh = self.history
#sh.pop(len(sh)-1)
#sh = [int(smiling)] + sh
self.total = self.total + 1
#self.history = sh
if self.show_vector:
for idx, (x, y) in enumerate(shape):
if idx in range(48,68):
#color points in mouth red
if smiling:
cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)
else:
cv2.circle(frame, (x, y), 1, (255, 0, 0), -1)
# else:
# self.is_smiling.set("Status: Face not Detected")
#frame design goes here
h, w, c = frame.shape
background = np.full((200, w, 3), 255, np.uint8)
panel = self.gen_panel(h+200, 640-w)
frame = cv2.vconcat([frame, background])
frame = cv2.hconcat([frame, panel])
#end frame design
#ret, jpeg = cv2.imencode('.jpg', cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
#return jpeg
#return jpeg.tobytes()
#return frame
return frame
def show_vector_points(self):
# Shows the vector points on the mouth of the video feed
self.show_vector = not self.show_vector
|
server.py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JobSubmission daemon.
This is run as a separate process from the main web server.
The intention is that the actual act of job submission could
be separated from the web app onto a different machine or sandbox.
Furthermore, the implementation could change.
"""
# TODO(philip):
# - Write stdout/stderr back to HDFS.
# - Be more resilient to failures
# - Cache localized files locally to avoid re-downloading
# - Support multiple filesystems. Jar might be local to server (via, say, NFS)
import sys
import os
import tempfile
import traceback
import shutil
import subprocess
import logging
import processing
import datetime
import stat
from thrift.transport.TSocket import TServerSocket
from thrift.transport.TTransport import TBufferedTransportFactory
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
from thrift.server.TServer import TThreadedServer
from django.contrib.auth.models import User
from django.core import urlresolvers
from jobsubd import JobSubmissionService
from jobsubd.ttypes import SubmissionHandle, JobData, State, SubmissionError, PreFabLocalizedFiles
from jobsub.server_models import ServerSubmissionState
from jobbrowser.views import single_job
import desktop.lib.django_util
import hadoop.cluster
import hadoop.conf
import jobsub.conf
LOG = logging.getLogger(__name__)
HOST = jobsub.conf.JOBSUBD_HOST.get()
PORT = jobsub.conf.JOBSUBD_PORT.get()
FS = hadoop.cluster.get_hdfs()
def coerce_exceptions(f):
"""
Wrapper/decorator that maps all excptions
into SubmissionErrors, which get passed
along via Thrift. Prevents clients from seeing
TTransportException.
"""
def wrapper(*args, **kwds):
try:
return f(*args, **kwds)
except SubmissionError, e:
# These are already forwardable; no need for further wrapping.
raise
except:
logging.exception("Coercing to SubmissionError")
type, instance, _ = sys.exc_info()
raise SubmissionError(
message=str(instance),
detail=traceback.format_exc())
return wrapper
def run_plan(id, plan, tmp_dir):
PlanRunner(id, plan, tmp_dir).run()
class PlanRunner(object):
# Map of pre-fab files
PREFAB_LOCALIZED_FILES = {
PreFabLocalizedFiles.STREAMING: hadoop.conf.HADOOP_STREAMING_JAR.get(),
}
def __init__(self, id, plan, tmp_dir):
self.id = id
self.plan = plan
self.tmp_dir = tmp_dir
def _send_notification(self, hadoop_job_ids, is_success):
try:
username = self.plan.user
user = User.objects.get(username=username)
if not user.email:
return
if is_success:
result = "succeeded"
else:
result = "failed"
subject = "Hadoop job %s: %s" % (result, self.plan.name)
body = "Hadoop job '%s' has %s.\n\n" % (self.plan.name, result)
links = [ "Job ID: %s\n%s/#launch=JobBrowser:%s\n" %
(job_id,
desktop.lib.django_util.get_desktop_uri_prefix(),
urlresolvers.reverse(single_job, kwargs={'jobid': job_id}))
for job_id in hadoop_job_ids ]
body += '\n'.join(links)
user.email_user(subject, body)
logging.info("Sent notification email about job %d." % (self.id,))
except Exception, ex:
# Catch all. SMTP can throw a large variety of errors.
logging.error("Failed to send job completion notification via e-mail to %s: %s" % (username, ex))
def setup_logging(self):
# Write logs out into the same stderr file that the subprocesses use.
root_logger = logging.getLogger()
handler = logging.StreamHandler(self.stderr)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-6s %(module)s %(message)s')
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def run(self):
try:
try:
self.stdout = self.internal_file("stdout", "a")
self.stderr = self.internal_file("stderr", "a")
# Touch the jobs file.
self.internal_file("jobs", "a").close()
self.setup_logging()
self.work_dir = os.path.join(self.tmp_dir, "work")
os.mkdir(self.work_dir)
os.chdir(self.work_dir)
success = True
for step in self.plan.steps:
if step.localize_files_step is not None:
self.run_localize_files_step(step.localize_files_step)
elif step.bin_hadoop_step is not None:
self.run_bin_hadoop_step(step.bin_hadoop_step)
else:
raise Exception("Unexpected step to run: " % repr(step))
# TODO(philip): Copy stdout/stderr to HDFS, on request?
except Exception:
logging.exception("jobsubd PlanRunner saw exception.")
success = False
raise
finally:
# We've finished, update the database
state = ServerSubmissionState.objects.get(id=self.id)
if success:
state.submission_state = State.SUCCESS
else:
state.submission_state = State.FAILURE
state.end_time = datetime.datetime.now()
state.save()
logging.info("Marked jobsubd job %d as done." % self.id)
hadoop_job_ids = self.internal_file("jobs", "r").read().splitlines()
self._send_notification(hadoop_job_ids, success)
# TODO(philip): Clean up tmpdir after a while?
def run_localize_files_step(self, step):
for x in step.localize_files:
self.localize_file(x)
def localize_file(self, loc_file):
assert os.path.sep not in loc_file.target_name, "Target %s must be filename in working directory." % repr(loc_file.target_name)
target = os.path.join(self.work_dir, loc_file.target_name)
if loc_file.pre_fab_localized_file is not None:
source = self.PREFAB_LOCALIZED_FILES[loc_file.pre_fab_localized_file]
LOG.info("Linking %s->%s" % (source, target))
os.symlink(source, target)
elif loc_file.path_on_hdfs is not None:
# TODO(philip): Could do caching based on checksums here.
FS.setuser(self.plan.user)
LOG.info("Copying %s->%s" % (loc_file.path_on_hdfs, target))
src = FS.open(loc_file.path_on_hdfs)
try:
dst = file(target, "w")
try:
shutil.copyfileobj(src, dst)
finally:
dst.close()
finally:
src.close()
def internal_file(self, name, options="r"):
return file(self.internal_file_name(name), options)
def internal_file_name(self, name):
return os.path.join(self.tmp_dir, name)
def run_bin_hadoop_step(self, step):
"""
user.name is used by FileSystem.getHomeDirectory().
The environment variables for _USER and _GROUPS are used
by the aspectj aspect to overwrite Hadoop's notion of
users and groups.
"""
java_properties = {}
java_properties["hue.suffix"] = "-via-hue"
java_properties["user.name"] = self.plan.user
java_prop_str = " ".join("-D%s=%s" % (k,v) for k, v in java_properties.iteritems())
env = {
'HADOOP_HOME': hadoop.conf.HADOOP_HOME.get(),
'HADOOP_OPTS': "-javaagent:%s %s" % (jobsub.conf.ASPECTJWEAVER.get(), java_prop_str),
'HADOOP_CLASSPATH': jobsub.conf.ASPECTPATH.get(),
'HUE_JOBTRACE_LOG': self.internal_file_name("jobs"),
'HUE_JOBSUB_USER': self.plan.user,
'HUE_JOBSUB_GROUPS': ",".join(self.plan.groups),
}
java_home = os.getenv('JAVA_HOME')
if java_home:
env["JAVA_HOME"] = java_home
for k, v in env.iteritems():
assert v is not None, "Environment key %s missing value." % k
args = [ hadoop.conf.HADOOP_BIN.get() ]
if hadoop.conf.HADOOP_CONF_DIR.get():
args.append("--config")
args.append(hadoop.conf.HADOOP_CONF_DIR.get())
args += step.arguments
LOG.info("Starting %s. (Env: %s)", repr(args), repr(env))
LOG.info("Running: %s" % " ".join(args))
self.pipe = subprocess.Popen(
args,
stdin=None,
cwd=self.work_dir,
stdout=self.stdout,
stderr=self.stderr,
shell=False,
close_fds=True,
env=env)
retcode = self.pipe.wait()
if 0 != retcode:
raise Exception("bin/hadoop returned non-zero %d" % retcode)
LOG.info("bin/hadoop returned %d" % retcode)
class JobSubmissionServiceImpl(object):
@coerce_exceptions
def get_job_data(self, handle):
# TODO: Could use waitpid(pid, WNOHANG) to update the
# state, not to mention update the state if it's no longer
# running.
# Look up the submission
state = ServerSubmissionState.objects.get(id=handle.id)
def job_file(name):
"""Helper to make a directory name."""
return file(os.path.join(state.tmp_dir, name))
# Handle stdout, stderr
def tail(name):
"""Helper to find the last 10K of a file."""
TAIL_SIZE = 10*1024 # 10KB
try:
f = job_file(name)
try:
file_size = os.stat(f.name)[stat.ST_SIZE]
seek = max(0, file_size - TAIL_SIZE)
f.seek(seek)
return f.read(TAIL_SIZE)
finally:
f.close()
except IOError:
return "No longer available."
j = JobData()
j.stdout_tail = tail("stdout")
j.stderr_tail = tail("stderr")
j.state = state.submission_state
try:
j.hadoop_job_ids = job_file("jobs").read().splitlines()
except IOError:
j.hadoop_job_ids = []
return j
@coerce_exceptions
def submit(self, plan):
"""
Starts a subprocess to manage the submission, and returns quickly.
"""
tmp_dir = tempfile.mkdtemp(dir="/tmp", prefix="jobsub-")
state = ServerSubmissionState(submission_state=State.SUBMITTED, tmp_dir=tmp_dir)
state.save() # Early save to get an "id"
process = processing.Process(target=run_plan, args=(state.id, plan, tmp_dir), name=plan.name)
process.setDaemon(True)
process.start()
state.pid = process.getPid()
state.submission_state = State.RUNNING
state.save()
return SubmissionHandle(id=state.id)
def main():
"""
main() loop, called from jobsubd management command.
"""
LOG.info("Starting daemon on port %s" % PORT)
sock = TServerSocket(PORT)
sock.host = HOST
TThreadedServer(JobSubmissionService.Processor(JobSubmissionServiceImpl()),
sock,
TBufferedTransportFactory(),
TBinaryProtocolFactory()).serve()
|
t29.py
|
'''
Created on 2019年6月19日
@author: bkd
'''
import time, threading
from kdGUI import *
def run():
for i in range(10000):
time.sleep(1)
print(i)
p.setValue(i * 1000)
app = Window("abk")
p = Progressbar(app)
p.setMaximum(10000)
p.setOrientation("horizontal")
p["mode"] = "determinate"
# p.start(1)
t = threading.Thread(target=run)
t.setDaemon(True)
t.start()
app.run()
|
do-partition-stop.py
|
#! /usr/bin/env python
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt. Contact: ctb@msu.edu
#
import khmer
import sys
import threading
import Queue
import gc
import os.path
K = 32
HASHTABLE_SIZE = int(1e9)
N_HT = 4
COUNTING_SIZE = int(1e8)
SUBSET_SIZE = int(1e4)
N_THREADS = 8
ht = None
###
save_ht = False
load_ht = False
save_merged_pmap = True
remove_orig_pmap = True
assert not (save_ht and load_ht) # incompatible
if not save_merged_pmap and remove_orig_pmap:
print '** warning, all the pmap files are going away! no permanent record!'
print ''
###
def worker(q, basename):
while 1:
try:
(ht, n, start, stop) = q.get(False)
except Queue.Empty:
print 'exiting'
return
outfile = basename + '.subset.%d.pmap' % (n,)
if os.path.exists(outfile):
print 'SKIPPING', basename, ' -- already exists'
continue
print 'starting:', basename, n
subset = ht.do_subset_partition(start, stop)
print 'saving:', basename, n
ht.save_subset_partitionmap(subset, outfile)
del subset
gc.collect()
def main(filename):
global ht
basename = os.path.basename(filename)
print 'input file to partition: %s' % filename
print '-- settings:'
print 'K', K
print 'HASHTABLE SIZE %g' % HASHTABLE_SIZE
print 'N HASHTABLES %d' % N_HT
print 'SUBSET SIZE', SUBSET_SIZE
print 'N THREADS', N_THREADS
print '--'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
# populate the hash table and tag set
if not load_ht:
print 'reading sequences and loading tagset from %s...' % (filename,)
ht.consume_fasta_and_tag(filename)
# save to a file (optional)
if save_ht:
print 'saving...'
ht.save(basename + '.ht')
print 'saving tagset...'
ht.save_tagset(basename + '.tagset')
# calculate the hashtable occupancy
print '---'
print 'hashtable occupancy:', ht.n_occupied() / float(HASHTABLE_SIZE)
print '---'
else:
print 'loading ht %s.ht' % basename
ht.load(basename + '.ht')
print 'loading tagset %s.tagset...' % basename
ht.load_tagset(basename + '.tagset')
###
counting = khmer.new_counting_hash(K, COUNTING_SIZE, N_HT)
ht.traverse_from_tags(counting, 40, 2000, 5)
print 'saving stoptags binary'
ht.save_stop_tags(basename + '.stoptags')
sys.exit(0)
#
x = counting.abundance_distribution(filename)
fp = open(basename + '.tabund', 'w')
for i, n in enumerate(x):
if n:
print >>fp, i, n
fp.close()
print 'converting to stoptags'
ht.hitraverse_to_stoptags(filename, counting, 5)
print 'saving stoptags binary'
ht.save_stop_tags(basename + '.stoptags')
print 'saving stoptags text'
ht.print_stop_tags(basename + '.stoptags.txt')
print 'eliminating counting hash'
del counting
gc.collect()
sys.exit(0)
###
# divide the tags up into subsets
print 'divvying up tags'
divvy = ht.divide_tags_into_subsets(SUBSET_SIZE)
n_subsets = len(divvy)
divvy.append(0)
# build a queue of tasks:
worker_q = Queue.Queue()
for i in range(0, n_subsets):
start = divvy[i]
end = divvy[i + 1]
worker_q.put((ht, i, start, end))
print 'enqueued %d subset tasks' % n_subsets
open('%s.info' % basename, 'w').write('%d subsets total\n' % (n_subsets))
threads = []
for n in range(N_THREADS):
t = threading.Thread(target=worker, args=(worker_q, basename))
threads.append(t)
t.start()
print 'started threads'
# wait for threads
for t in threads:
t.join()
print 'done making subsets! see %s.subset.*.pmap' % (basename,)
###
print 'erasing old ht, creating new'
del ht
gc.collect()
# create a new, empty ht object for merging; K matters, but not
# hashtable size.
ht = khmer.new_hashbits(K, 1, 1)
# load & merge all pmap files
for i in range(0, n_subsets):
pmap_file = basename + '.subset.%d.pmap' % (i,)
print 'loading', pmap_file
ht.merge_subset_from_disk(pmap_file)
# save merged partitionmap
if save_merged_pmap:
print 'saving merged pmap to %s.pmap.merged' % basename
ht.save_partitionmap(basename + '.pmap.merged')
if remove_orig_pmap:
print 'removing subset pmap files'
for i in range(0, n_subsets):
pmap_file = basename + '.subset.%d.pmap' % (i,)
os.unlink(pmap_file)
# output partitions!
n_partitions = ht.output_partitions(filename, basename + '.part')
(n_partitions, n_singletons) = ht.count_partitions()
print 'output partitions:', n_partitions
print 'pmap partitions:', n_partitions
print 'singletons:', n_singletons
if __name__ == '__main__':
main(sys.argv[1])
|
__init__.py
|
#!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import time
import os
import timeago
import flask_login
from flask_login import login_required
import threading
from threading import Event
import queue
from flask import Flask, render_template, request, send_from_directory, abort, redirect, url_for, flash
from feedgen.feed import FeedGenerator
from flask import make_response
import datetime
import pytz
from copy import deepcopy
__version__ = '0.39.7'
datastore = None
# Local
running_update_threads = []
ticker_thread = None
extra_stylesheets = []
update_q = queue.Queue()
notification_q = queue.Queue()
# Needs to be set this way because we also build and publish via pip
base_path = os.path.dirname(os.path.realpath(__file__))
app = Flask(__name__,
static_url_path="{}/static".format(base_path),
template_folder="{}/templates".format(base_path))
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
app.config['LOGIN_DISABLED'] = False
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
def init_app_secret(datastore_path):
secret = ""
path = "{}/secret.txt".format(datastore_path)
try:
with open(path, "r") as f:
secret = f.read()
except FileNotFoundError:
import secrets
with open(path, "w") as f:
secret = secrets.token_hex(32)
f.write(secret)
return secret
# Remember python is by reference
# populate_form in wtfors didnt work for me. (try using a setattr() obj type on datastore.watch?)
def populate_form_from_watch(form, watch):
for i in form.__dict__.keys():
if i[0] != '_':
p = getattr(form, i)
if hasattr(p, 'data') and i in watch:
setattr(p, "data", watch[i])
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
class User(flask_login.UserMixin):
id=None
def set_password(self, password):
return True
def get_user(self, email="defaultuser@changedetection.io"):
return self
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
def check_password(self, password):
import hashlib
import base64
# Getting the values back out
raw_salt_pass = base64.b64decode(datastore.data['settings']['application']['password'])
salt_from_storage = raw_salt_pass[:32] # 32 is the length of the salt
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt_from_storage,
100000
)
new_key = salt_from_storage + new_key
return new_key == raw_salt_pass
pass
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
#app.config.update(config or {})
login_manager = flask_login.LoginManager(app)
login_manager.login_view = 'login'
app.secret_key = init_app_secret(config['datastore_path'])
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
@login_manager.user_loader
def user_loader(email):
user = User()
user.get_user(email)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
# @todo validate its a URL of this host and use that
return redirect(url_for('login', next=url_for('index')))
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('index'))
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route('/login', methods=['GET', 'POST'])
def login():
if not datastore.data['settings']['application']['password']:
flash("Login not required, no password enabled.", "notice")
return redirect(url_for('index'))
if request.method == 'GET':
output = render_template("login.html")
return output
user = User()
user.id = "defaultuser@changedetection.io"
password = request.form.get('password')
if (user.check_password(password)):
flask_login.login_user(user, remember=True)
next = request.args.get('next')
# if not is_safe_url(next):
# return flask.abort(400)
return redirect(next or url_for('index'))
else:
flash('Incorrect password', 'error')
return redirect(url_for('login'))
@app.before_request
def do_something_whenever_a_request_comes_in():
# Disable password loginif there is not one set
app.config['LOGIN_DISABLED'] = datastore.data['settings']['application']['password'] == False
# For the RSS path, allow access via a token
if request.path == '/rss' and request.args.get('token'):
app_rss_token = datastore.data['settings']['application']['rss_access_token']
rss_url_token = request.args.get('token')
if app_rss_token == rss_url_token:
app.config['LOGIN_DISABLED'] = True
@app.route("/rss", methods=['GET'])
@login_required
def rss():
limit_tag = request.args.get('tag')
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
# @todo needs a .itemsWithTag() or something
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
if not watch['viewed']:
# Re #239 - GUID needs to be individual for each event
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
guid = "{}/{}".format(watch['uuid'], watch['last_changed'])
fe = fg.add_entry()
# Include a link to the diff page, they will have to login here to see if password protection is enabled.
# Description is the page you watch, link takes you to the diff JS UI page
base_url = datastore.data['settings']['application']['base_url']
if base_url == '':
base_url = "<base-url-env-var-not-set>"
diff_link = {'href': "{}{}".format(base_url, url_for('diff_history_page', uuid=watch['uuid']))}
# @todo use title if it exists
fe.link(link=diff_link)
fe.title(title=watch['url'])
# @todo in the future <description><![CDATA[<html><body>Any code html is valid.</body></html>]]></description>
fe.description(description=watch['url'])
fe.guid(guid, permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch['newest_history_key']))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
@app.route("/", methods=['GET'])
@login_required
def index():
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
# Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag))
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
output = render_template("watch-overview.html",
form=form,
watches=sorted_watches,
tags=existing_tags,
active_tag=limit_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'],
has_unviewed=datastore.data['has_unviewed'])
return output
@app.route("/scrub", methods=['GET', 'POST'])
@login_required
def scrub_page():
import re
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
limit_date = request.form.get('limit_date')
limit_timestamp = 0
# Re #149 - allow empty/0 timestamp limit
if len(limit_date):
try:
limit_date = limit_date.replace('T', ' ')
# I noticed chrome will show '/' but actually submit '-'
limit_date = limit_date.replace('-', '/')
# In the case that :ss seconds are supplied
limit_date = re.sub(r'(\d\d:\d\d)(:\d\d)', '\\1', limit_date)
str_to_dt = datetime.datetime.strptime(limit_date, '%Y/%m/%d %H:%M')
limit_timestamp = int(str_to_dt.timestamp())
if limit_timestamp > time.time():
flash("Timestamp is in the future, cannot continue.", 'error')
return redirect(url_for('scrub_page'))
except ValueError:
flash('Incorrect date format, cannot continue.', 'error')
return redirect(url_for('scrub_page'))
if confirmtext == 'scrub':
changes_removed = 0
for uuid, watch in datastore.data['watching'].items():
if limit_timestamp:
changes_removed += datastore.scrub_watch(uuid, limit_timestamp=limit_timestamp)
else:
changes_removed += datastore.scrub_watch(uuid)
flash("Cleared snapshot history ({} snapshots removed)".format(changes_removed))
else:
flash('Incorrect confirmation text.', 'error')
return redirect(url_for('index'))
output = render_template("scrub.html")
return output
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from changedetectionio import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history available
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid]['history'].keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid]['history'][newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = handler.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
if datastore.data['settings']['application'].get('ignore_whitespace', False):
checksum = hashlib.md5(stripped_content.translate(None, b'\r\n\t ')).hexdigest()
else:
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_required
def edit_page(uuid):
from changedetectionio import forms
form = forms.watchForm(request.form)
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if request.method == 'GET':
if not uuid in datastore.data['watching']:
flash("No watch with the UUID %s found." % (uuid), "error")
return redirect(url_for('index'))
populate_form_from_watch(form, datastore.data['watching'][uuid])
if datastore.data['watching'][uuid]['fetch_backend'] is None:
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
if request.method == 'POST' and form.validate():
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
if form.minutes_between_check.data == datastore.data['settings']['requests']['minutes_between_check']:
form.minutes_between_check.data = None
if form.fetch_backend.data == datastore.data['settings']['application']['fetch_backend']:
form.fetch_backend.data = None
update_obj = {'url': form.url.data.strip(),
'minutes_between_check': form.minutes_between_check.data,
'tag': form.tag.data.strip(),
'title': form.title.data.strip(),
'headers': form.headers.data,
'body': form.body.data,
'method': form.method.data,
'fetch_backend': form.fetch_backend.data,
'trigger_text': form.trigger_text.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
'extract_title_as_title': form.extract_title_as_title.data
}
# Notification URLs
datastore.data['watching'][uuid]['notification_urls'] = form.notification_urls.data
# Ignore text
form_ignore_text = form.ignore_text.data
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form_ignore_text:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid]['css_filter'] = form.css_filter.data.strip()
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid].update(update_obj)
flash("Updated watch.")
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
# But in the case something is added we should save straight away
datastore.sync_to_json()
# Queue the watch for immediate recheck
update_q.put(uuid)
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': form.url.data.strip(),
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
# Diff page [edit] link should go back to diff page
if request.args.get("next") and request.args.get("next") == 'diff':
return redirect(url_for('diff_history_page', uuid=uuid))
else:
return redirect(url_for('index'))
else:
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
# Re #110 offer the default minutes
using_default_minutes = False
if form.minutes_between_check.data == None:
form.minutes_between_check.data = datastore.data['settings']['requests']['minutes_between_check']
using_default_minutes = True
output = render_template("edit.html",
uuid=uuid,
watch=datastore.data['watching'][uuid],
form=form,
using_default_minutes=using_default_minutes,
current_base_url = datastore.data['settings']['application']['base_url']
)
return output
@app.route("/settings", methods=['GET', "POST"])
@login_required
def settings_page():
from changedetectionio import forms
from changedetectionio import content_fetcher
form = forms.globalSettingsForm(request.form)
if request.method == 'GET':
form.minutes_between_check.data = int(datastore.data['settings']['requests']['minutes_between_check'])
form.notification_urls.data = datastore.data['settings']['application']['notification_urls']
form.global_ignore_text.data = datastore.data['settings']['application']['global_ignore_text']
form.ignore_whitespace.data = datastore.data['settings']['application']['ignore_whitespace']
form.extract_title_as_title.data = datastore.data['settings']['application']['extract_title_as_title']
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
form.notification_title.data = datastore.data['settings']['application']['notification_title']
form.notification_body.data = datastore.data['settings']['application']['notification_body']
form.notification_format.data = datastore.data['settings']['application']['notification_format']
form.base_url.data = datastore.data['settings']['application']['base_url']
# Password unset is a GET
if request.values.get('removepassword') == 'yes':
from pathlib import Path
datastore.data['settings']['application']['password'] = False
flash("Password protection removed.", 'notice')
flask_login.logout_user()
return redirect(url_for('settings_page'))
if request.method == 'POST' and form.validate():
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['requests']['minutes_between_check'] = form.minutes_between_check.data
datastore.data['settings']['application']['extract_title_as_title'] = form.extract_title_as_title.data
datastore.data['settings']['application']['fetch_backend'] = form.fetch_backend.data
datastore.data['settings']['application']['notification_title'] = form.notification_title.data
datastore.data['settings']['application']['notification_body'] = form.notification_body.data
datastore.data['settings']['application']['notification_format'] = form.notification_format.data
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['application']['base_url'] = form.base_url.data
datastore.data['settings']['application']['global_ignore_text'] = form.global_ignore_text.data
datastore.data['settings']['application']['ignore_whitespace'] = form.ignore_whitespace.data
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': "Test from changedetection.io!",
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
if form.password.encrypted_password:
datastore.data['settings']['application']['password'] = form.password.encrypted_password
flash("Password protection enabled.", 'notice')
flask_login.logout_user()
return redirect(url_for('index'))
datastore.needs_write = True
flash("Settings updated.")
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
output = render_template("settings.html", form=form, current_base_url = datastore.data['settings']['application']['base_url'])
return output
@app.route("/import", methods=['GET', "POST"])
@login_required
def import_page():
import validators
remaining_urls = []
good = 0
if request.method == 'POST':
urls = request.values.get('urls').split("\n")
for url in urls:
url = url.strip()
# Flask wtform validators wont work with basic auth, use validators package
if len(url) and validators.url(url):
new_uuid = datastore.add_watch(url=url.strip(), tag="")
# Straight into the queue.
update_q.put(new_uuid)
good += 1
else:
if len(url):
remaining_urls.append(url)
flash("{} Imported, {} Skipped.".format(good, len(remaining_urls)))
if len(remaining_urls) == 0:
# Looking good, redirect to index.
return redirect(url_for('index'))
# Could be some remaining, or we could be on GET
output = render_template("import.html",
remaining="\n".join(remaining_urls)
)
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/api/mark-all-viewed", methods=['GET'])
@login_required
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, watch['newest_history_key'])
flash("Cleared all statuses.")
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
@login_required
def diff_history_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
dates = list(watch['history'].keys())
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
if len(dates) < 2:
flash("Not enough saved change detection snapshots to produce a report.", "error")
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, dates[0])
newest_file = watch['history'][dates[0]]
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
previous_version = request.args.get('previous_version')
try:
previous_file = watch['history'][previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = watch['history'][dates[1]]
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
output = render_template("diff.html", watch_a=watch,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
uuid=uuid,
newest_version_timestamp=dates[0],
current_previous_version=str(previous_version),
current_diff_url=watch['url'],
extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
left_sticky= True )
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
@login_required
def preview_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
newest = list(watch['history'].keys())[-1]
with open(watch['history'][newest], 'r') as f:
content = f.readlines()
output = render_template("preview.html",
content=content,
extra_stylesheets=extra_stylesheets,
current_diff_url=watch['url'],
uuid=uuid)
return output
@app.route("/api/<string:uuid>/snapshot/current", methods=['GET'])
@login_required
def api_snapshot(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
try:
watch = datastore.data['watching'][uuid]
except KeyError:
return abort(400, "No history found for the specified link, bad link?")
newest = list(watch['history'].keys())[-1]
with open(watch['history'][newest], 'r') as f:
content = f.read()
resp = make_response(content)
resp.headers['Content-Type'] = 'text/plain'
return resp
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("static/images", path="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
@login_required
def get_backup():
import zipfile
from pathlib import Path
# Remove any existing backup file, for now we just keep one file
for previous_backup_filename in Path(datastore_o.datastore_path).rglob('changedetection-backup-*.zip'):
os.unlink(previous_backup_filename)
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(datastore_o.datastore_path, backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(datastore_o.datastore_path, "url-watches.json"), arcname="url-watches.json")
# Add the flask app secret
zipObj.write(os.path.join(datastore_o.datastore_path, "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(datastore_o.datastore_path).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(datastore_o.datastore_path, ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
list_file = os.path.join(datastore_o.datastore_path, "url-list.txt")
with open(list_file, "w") as f:
for uuid in datastore.data['watching']:
url = datastore.data['watching'][uuid]['url']
f.write("{}\r\n".format(url))
# Add it to the Zip
zipObj.write(list_file,
arcname="url-list.txt",
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Send_from_directory needs to be the full absolute path
return send_from_directory(os.path.abspath(datastore_o.datastore_path), backupname, as_attachment=True)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
# These files should be in our subdirectory
try:
return send_from_directory("static/{}".format(group), path=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
@login_required
def api_watch_add():
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
if form.validate():
url = request.form.get('url').strip()
if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error")
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
# Straight into the queue.
update_q.put(new_uuid)
flash("Watch added.")
return redirect(url_for('index'))
else:
flash("Error")
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
@login_required
def api_delete():
uuid = request.args.get('uuid')
datastore.delete(uuid)
flash('Deleted.')
return redirect(url_for('index'))
@app.route("/api/clone", methods=['GET'])
@login_required
def api_clone():
uuid = request.args.get('uuid')
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
new_uuid = datastore.clone(uuid)
update_q.put(new_uuid)
flash('Cloned.')
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
@login_required
def api_watch_checknow():
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
flash("{} watches are rechecking.".format(i))
return redirect(url_for('index', tag=tag))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start()
# Check for new release version, but not when running in test/build
if not os.getenv("GITHUB_REF", False):
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': __version__,
'app_guid': datastore.data['app_guid'],
'watch_count': len(datastore.data['watching'])
},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
def notification_runner():
while not app.config.exit.is_set():
try:
# At the moment only one thread runs (single runner)
n_object = notification_q.get(block=False)
except queue.Empty:
time.sleep(1)
else:
# Process notifications
try:
from changedetectionio import notification
notification.process_notification(n_object, datastore)
except Exception as e:
print("Watch URL: {} Error {}".format(n_object['watch_url'], e))
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
from changedetectionio import update_worker
# Spin up Workers.
for _ in range(datastore.data['settings']['requests']['workers']):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
# Get a list of watches by UUID that are currently fetching data
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Re #232 - Deepcopy the data incase it changes while we're iterating through it all
copied_datastore = deepcopy(datastore)
# Check for watches outside of the time threshold to put in the thread queue.
for uuid, watch in copied_datastore.data['watching'].items():
# If they supplied an individual entry minutes to threshold.
if 'minutes_between_check' in watch and watch['minutes_between_check'] is not None:
# Cast to int just incase
max_time = int(watch['minutes_between_check']) * 60
else:
# Default system wide.
max_time = int(copied_datastore.data['settings']['requests']['minutes_between_check']) * 60
threshold = time.time() - max_time
# Yeah, put it in the queue, it's more than time.
if not watch['paused'] and watch['last_checked'] <= threshold:
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Wait a few seconds before checking the list again
time.sleep(3)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
|
test_sys.py
|
import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import locale
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.support.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated",
"dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('5P2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('13P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
if __name__ == "__main__":
unittest.main()
|
__init__.py
|
# target-himydata
# Copyright 2018 Himydata, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This product includes software developed at
# himydata, Inc.(https://himydata.com/).
import argparse
import logging
import logging.config
import os
import copy
import io
import sys
import time
import json
import threading
import http.client
import urllib
import pkg_resources
from datetime import datetime
from dateutil import tz
from strict_rfc3339 import rfc3339_to_timestamp
from jsonschema import Draft4Validator, validators, FormatChecker
from target_himydata.client import Client
import singer
logger = singer.get_logger()
DEFAULT_HIMYDATA_URL = 'https://platform.himydata.com/'
def write_last_state(states):
logger.info('Persisted batch of {} records to Himydata Platform'.format(len(states)))
last_state = None
for state in reversed(states):
if state is not None:
last_state = state
break
if last_state:
line = json.dumps(state)
logger.debug('Emitting state {}'.format(line))
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
class DryRunClient(object):
"""A client that doesn't actually persist to the Gate.
Useful for testing.
"""
def __init__(self, buffer_size=100):
self.pending_callback_args = []
self.buffer_size = buffer_size
def flush(self):
logger.info("---- DRY RUN: NOTHING IS BEING PERSISTED TO Himydata Platform ----")
write_last_state(self.pending_callback_args)
self.pending_callback_args = []
def push(self, himydata_record, table_name, callback_arg=None):
self.pending_callback_args.append(callback_arg)
if len(self.pending_callback_args) % self.buffer_size == 0:
self.flush()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.flush()
def _required_key(msg, k):
if k not in msg:
raise Exception("Message is missing required key '{}': {}".format(k, msg))
return msg[k]
def persist_lines(himydata_client, lines):
state = None
schemas = {}
key_properties = {}
for line in lines:
try:
message = singer.parse_message(line)
if isinstance(message, singer.RecordMessage):
himydata_record = message.record
himydata_record["keys"] = key_properties[message.stream]
table_name = message.stream
himydata_client.push(himydata_record, table_name, state)
state = None
elif isinstance(message, singer.StateMessage):
state = message.value
elif isinstance(message, singer.SchemaMessage):
himydata_record = message.schema
himydata_record["keys"] = message.key_properties
table_name = message.stream
schemas[message.stream] = message.schema
key_properties[message.stream] = message.key_properties
himydata_client.check_dataset(himydata_record, table_name)
else:
raise Exception("Unrecognized message {} parsed from line {}".format(message, line))
except Exception as e:
pass
return state
def himydata_client(args):
"""Returns an instance of Himydata Client or DryRunClient"""
if args.dry_run:
return DryRunClient()
else:
with open(args.config) as input:
config = json.load(input)
if not config.get('disable_collection', True):
logger.info('Sending version information to stitchdata.com. ' +
'To disable sending anonymous usage data, set ' +
'the config parameter "disable_collection" to true')
threading.Thread(target=collect).start()
missing_fields = []
if 'client_id' in config:
client_id = config['client_id']
else:
missing_fields.append('client_id')
if 'himydata_url' in config:
himydata_url = config['himydata_url']
else:
himydata_url = DEFAULT_HIMYDATA_URL
if 'api_key' in config:
api_key = config['api_key']
else:
missing_fields.append('api_key')
if missing_fields:
raise Exception('Configuration is missing required fields: {}'
.format(missing_fields))
return Client(client_id, api_key, himydata_url=himydata_url, callback_function=write_last_state)
def collect():
try:
version = pkg_resources.get_distribution('target-himydata').version
conn = http.client.HTTPSConnection('collector.stitchdata.com', timeout=10)
conn.connect()
params = {
'e': 'se',
'aid': 'singer',
'se_ca': 'target-himydata',
'se_ac': 'open',
'se_la': version,
}
conn.request('GET', '/i?' + urllib.parse.urlencode(params))
response = conn.getresponse()
conn.close()
except:
logger.debug('Collection request failed')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Config file')
parser.add_argument('-n', '--dry-run', help='Dry run - Do not push data to Himydata Platform', action='store_true')
args = parser.parse_args()
if not args.dry_run and args.config is None:
parser.error("config file required if not in dry run mode")
input = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
with himydata_client(args) as client:
state = persist_lines(client, input)
write_last_state([state])
logger.info("Exiting normally")
if __name__ == '__main__':
main()
|
crawler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import datetime
import geopy
import geopy.distance
import json
import logging
import math
import os
import requests
import ssl
import threading
import urllib.request
import urllib.parse
from geopy.distance import vincenty
from geopy.distance import VincentyDistance
from queue import Queue
# change for logging visibility
# logging.getLogger().setLevel(logging.INFO)
# urls for google api web service
radar_url = "https://maps.googleapis.com/maps/api/place/radarsearch/json?location={},{}&radius={}&types={}&key={}"
detail_url = "https://maps.googleapis.com/maps/api/place/details/json?placeid={}&key={}"
# user agent for populartimes request
user_agent = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/54.0.2840.98 Safari/537.36"}
def get_circle_centers(b1, b2, radius):
"""
the function covers the area within the bounds with circles
this is done by calculating the lat/lng distances and the number of circles needed to fill the area
as these circles only intersect at one point, an additional grid with a (+radius,+radius) offset is used to
cover the empty spaces
:param b1: bounds
:param b2: bounds
:param radius: specified radius, adapt for high density areas
:return: list of circle centers that cover the area between lower/upper
"""
sw = geopy.Point(b1)
ne = geopy.Point(b2)
# north/east distances
dist_lat = int(vincenty(geopy.Point(sw[0], sw[1]), geopy.Point(ne[0], sw[1])).meters)
dist_lng = int(vincenty(geopy.Point(sw[0], sw[1]), geopy.Point(sw[0], ne[1])).meters)
def cover(p_start, n_lat, n_lng, r):
_coords = []
for i in range(n_lat):
for j in range(n_lng):
v_north = VincentyDistance(meters=i * r * 2)
v_east = VincentyDistance(meters=j * r * 2)
_coords.append(v_north.destination(v_east.destination(point=p_start, bearing=90), bearing=0))
return _coords
coords = []
# get circles for base cover
coords += cover(sw,
math.ceil((dist_lat - radius) / (2 * radius)) + 1,
math.ceil((dist_lng - radius) / (2 * radius)) + 1, radius)
# update south-west for second cover
vc_radius = VincentyDistance(meters=radius)
sw = vc_radius.destination(vc_radius.destination(point=sw, bearing=0), bearing=90)
# get circles for offset cover
coords += cover(sw,
math.ceil((dist_lat - 2 * radius) / (2 * radius)) + 1,
math.ceil((dist_lng - 2 * radius) / (2 * radius)) + 1, radius)
# only return the coordinates
return [c[:2] for c in coords]
def worker_radar():
"""
worker that gets coordinates of queue and starts radar search
:return:
"""
while True:
item = q_radar.get()
get_radar(item[0], item[1])
q_radar.task_done()
def get_radar(_lat, _lng):
# places - radar search - https://developers.google.com/places/web-service/search?hl=de#RadarSearchRequests
radar_str = radar_url.format(_lat, _lng, params["radius"], "|".join(params["type"]), params["API_key"])
resp = json.loads(requests.get(radar_str, auth=('user', 'pass')).text)
check_response_code(resp)
radar = resp["results"]
if len(radar) > 200:
logging.warning("more than 200 places in search radius, some data may get lost")
bounds = params["bounds"]
# retrieve google ids for detail search
for place in radar:
geo = place["geometry"]["location"]
if bounds["lower"]["lat"] <= geo["lat"] <= bounds["upper"]["lat"] \
and bounds["lower"]["lng"] <= geo["lng"] <= bounds["upper"]["lng"]:
# this isn't thread safe, but we don't really care, since at worst, a set entry is simply overwritten
if place["place_id"] not in g_place_ids:
g_place_ids.add(place["place_id"])
def worker_detail():
"""
worker that gets item of queue and starts detailed data retrieval
:return:
"""
while True:
item = q_detail.get()
get_detail(item)
q_detail.task_done()
def get_popularity_for_day(popularity):
days_json = [[0 for _ in range(24)] for _ in range(7)]
for day in popularity:
day_no, pop_times = day[:2]
if pop_times is not None:
for el in pop_times:
hour, pop = el[:2]
days_json[day_no - 1][hour] = pop
# day wrap
if hour == 23:
day_no = day_no % 7 + 1
# {"name" : "monday", "data": [...]} for each weekday as list
return [
{
"name": list(calendar.day_name)[d],
"data": days_json[d]
} for d in range(7)
]
def get_detail(place_id):
"""
loads data for a given area
:return:
"""
# places api - detail search - https://developers.google.com/places/web-service/details?hl=de
detail_str = detail_url.format(place_id, params["API_key"])
resp = json.loads(requests.get(detail_str, auth=('user', 'pass')).text)
check_response_code(resp)
detail = resp["result"]
searchterm = "{} {}".format(detail["name"], detail["formatted_address"])
popularity, rating, rating_n = get_populartimes(searchterm)
detail_json = {
"id": detail["place_id"],
"name": detail["name"],
"address": detail["formatted_address"],
"searchterm": searchterm,
"types": detail["types"],
"coordinates": detail["geometry"]["location"],
}
# check optional return parameters
if rating is not None:
detail_json["rating"] = rating
elif "rating" in detail:
detail_json["rating"] = detail["rating"]
if rating_n is None:
detail_json["rating_n"] = 0
else:
detail_json["rating_n"] = rating_n
if "international_phone_number" in detail:
detail_json["international_phone_number"] = detail["international_phone_number"]
# get current popularity
place_identifier = "{} {}".format(detail["name"], detail["formatted_address"])
_, _, _, current_popularity = get_current_popularity(place_identifier)
if current_popularity is not None:
detail_json["current_popularity"] = current_popularity
detail_json["populartimes"] = get_popularity_for_day(popularity) if popularity is not None else []
if params["all_places"] or len(detail_json["populartimes"]) > 0:
results.append(detail_json)
def get_populartimes(place_identifier):
"""
sends request to google/search and parses json response to get data
:param place_identifier: string with place name and address
:return: tuple with popular times, rating and number of ratings/comments
"""
params_url = {
"tbm": "map",
"hl": "de",
"tch": 1,
"q": urllib.parse.quote_plus(place_identifier)
}
search_url = "https://www.google.de/search?" + "&".join(k + "=" + str(v) for k, v in params_url.items())
logging.info("searchterm: " + search_url)
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
resp = urllib.request.urlopen(urllib.request.Request(url=search_url, data=None, headers=user_agent),
context=gcontext)
data = resp.read().decode('utf-8')
# find eof json
jend = data.rfind("}")
if jend >= 0:
data = data[:jend + 1]
jdata = json.loads(data)["d"]
jdata = json.loads(jdata[4:])
popular_times, rating, rating_n = None, None, None
try:
# get info from result array, has to be adapted if backend api changes
info = jdata[0][1][0][14]
rating = info[4][7]
rating_n = info[4][8]
popular_times = info[84][0]
# ignore, there is either no info available or no popular times
# TypeError: rating/rating_n/populartimes in None
# IndexError: info is not available
except (TypeError, IndexError):
pass
return popular_times, rating, rating_n
def get_current_popularity(place_identifier):
"""
request information for a place and parse current popularity
:param place_identifier: name and address string
:return:
"""
params_url = {
"tbm": "map",
"tch": 1,
"q": urllib.parse.quote_plus(place_identifier),
# TODO construct own proto buffer
"pb": "!4m12!1m3!1d4005.9771522653964!2d-122.42072974863942!3d37.8077459796541!2m3!1f0!2f0!3f0!3m2!1i1125!2i976"
"!4f13.1!7i20!10b1!12m6!2m3!5m1!6e2!20e3!10b1!16b1!19m3!2m2!1i392!2i106!20m61!2m2!1i203!2i100!3m2!2i4!5b1"
"!6m6!1m2!1i86!2i86!1m2!1i408!2i200!7m46!1m3!1e1!2b0!3e3!1m3!1e2!2b1!3e2!1m3!1e2!2b0!3e3!1m3!1e3!2b0!3e3!"
"1m3!1e4!2b0!3e3!1m3!1e8!2b0!3e3!1m3!1e3!2b1!3e2!1m3!1e9!2b1!3e2!1m3!1e10!2b0!3e3!1m3!1e10!2b1!3e2!1m3!1e"
"10!2b0!3e4!2b1!4b1!9b0!22m6!1sa9fVWea_MsX8adX8j8AE%3A1!2zMWk6Mix0OjExODg3LGU6MSxwOmE5ZlZXZWFfTXNYOGFkWDh"
"qOEFFOjE!7e81!12e3!17sa9fVWea_MsX8adX8j8AE%3A564!18e15!24m15!2b1!5m4!2b1!3b1!5b1!6b1!10m1!8e3!17b1!24b1!"
"25b1!26b1!30m1!2b1!36b1!26m3!2m2!1i80!2i92!30m28!1m6!1m2!1i0!2i0!2m2!1i458!2i976!1m6!1m2!1i1075!2i0!2m2!"
"1i1125!2i976!1m6!1m2!1i0!2i0!2m2!1i1125!2i20!1m6!1m2!1i0!2i956!2m2!1i1125!2i976!37m1!1e81!42b1!47m0!49m1"
"!3b1"
}
search_url = "https://www.google.de/search?" + "&".join(k + "=" + str(v) for k, v in params_url.items())
logging.info("searchterm: " + search_url)
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
resp = urllib.request.urlopen(urllib.request.Request(url=search_url, data=None, headers=user_agent),
context=gcontext)
data = resp.read().decode('utf-8')
# find eof json
jend = data.rfind("}")
if jend >= 0:
data = data[:jend + 1]
jdata = json.loads(data)["d"]
jdata = json.loads(jdata[4:])
popular_times, rating, rating_n, current_popularity = None, None, None, None
try:
# get info from result array, has to be adapted if backend api changes
info = jdata[0][1][0][14]
rating = info[4][7]
rating_n = info[4][8]
popular_times = info[84][0]
# current_popularity is not available if popular_times is
current_popularity = info[84][7][1]
# ignore, there is either no info available or no popular times
# TypeError: rating/rating_n/populartimes in None
# IndexError: info is not available
except (TypeError, IndexError):
pass
return rating, rating_n, popular_times, current_popularity
def get_current_popular_times(api_key, place_id):
"""
sends request to detail to get a search string and uses standard proto buffer to get additional information
on the current status of popular times
:return: json details
"""
# places api - detail search - https://developers.google.com/places/web-service/details?hl=de
detail_str = detail_url.format(place_id, api_key)
resp = json.loads(requests.get(detail_str, auth=('user', 'pass')).text)
check_response_code(resp)
detail = resp["result"]
place_identifier = "{} {}".format(detail["name"], detail["formatted_address"])
rating, rating_n, popularity, current_popularity = get_current_popularity(place_identifier)
detail_json = {
"id": detail["place_id"],
"name": detail["name"],
"address": detail["formatted_address"],
"types": detail["types"],
"coordinates": detail["geometry"]["location"]
}
# check optional return parameters
if rating is not None:
detail_json["rating"] = rating
elif "rating" in detail:
detail_json["rating"] = detail["rating"]
if rating_n is None:
detail_json["rating_n"] = 0
else:
detail_json["rating_n"] = rating_n
if "international_phone_number" in detail:
detail_json["international_phone_number"] = detail["international_phone_number"]
if current_popularity is not None:
detail_json["current_popularity"] = current_popularity
detail_json["populartimes"] = get_popularity_for_day(popularity) if popularity is not None else []
return detail_json
def check_response_code(resp):
"""
check if query quota has been surpassed or other errors occured
:param resp: json response
:return:
"""
if resp["status"] == "OK" or resp["status"] == "ZERO_RESULTS":
return
if resp["status"] == "REQUEST_DENIED":
logging.error("Your request was denied, the API key is invalid.")
if resp["status"] == "OVER_QUERY_LIMIT":
logging.error("You exceeded your Query Limit for Google Places API Web Service, "
"check https://developers.google.com/places/web-service/usage to upgrade your quota.")
if resp["status"] == "INVALID_REQUEST":
logging.error("The query string is malformed, "
"check params.json if your formatting for lat/lng and radius is correct.")
# TODO return intermediary result
logging.error("Exiting application ...")
os._exit(1)
def run(_params):
"""
wrap execution logic in method, for later external call
:return:
"""
start = datetime.datetime.now()
global params, g_place_ids, q_radar, q_detail, results
# shared variables
params = _params
q_radar, q_detail = Queue(), Queue()
g_place_ids, results = set(), list()
logging.info("Adding places to queue...")
# threading for radar search
for i in range(params["n_threads"]):
t = threading.Thread(target=worker_radar)
t.daemon = True
t.start()
# cover search area with circles
bounds = params["bounds"]
for lat, lng in get_circle_centers([bounds["lower"]["lat"], bounds["lower"]["lng"]], # southwest
[bounds["upper"]["lat"], bounds["upper"]["lng"]], # northeast
params["radius"]):
q_radar.put((lat, lng))
q_radar.join()
logging.info("Finished in: {}".format(str(datetime.datetime.now() - start)))
logging.info("{} places to process...".format(len(g_place_ids)))
# threading for detail search and popular times
for i in range(params["n_threads"]):
t = threading.Thread(target=worker_detail)
t.daemon = True
t.start()
for g_place_id in g_place_ids:
q_detail.put(g_place_id)
q_detail.join()
logging.info("Finished in: {}".format(str(datetime.datetime.now() - start)))
return results
|
Dashboard.py
|
import curses
import datetime
import threading
'''
# 간단 사용법
0. 사용할 모니터.py에서 Dashbord.Dashbord()를 선언합니다.
dashboard = Dashbord.Dashbord()
1. 사용할 모니터.py에서 self.LASTEST_CRASH_TIME, self.DCHECK_COUNT, self.CRSAH_COUNT에 대한 것을 수집 할 수 있는 코드들을 넣어줍니다.
self.LASTEST_CRASH_TIME = 크래시가 수집된 시간
self.CRSAH_COUNT = 크래시 로그 개수
self.DCHECK_COUNT = dcheck 로그 개수
2. 사용할 모니터.py에서 dashboard.run_dashboard(testcase_path)를 호출합니다.
'''
class Dashboard:
def __init__(self):
self.CRSAH_COUNT = 0
self.LASTEST_CRASH_TIME = 'None'
self.STARTTIME = datetime.datetime.now()
self.File_Path = None
self.DCHECK_COUNT = 0
self.TESTCASE_COUNT = -1 # watcher 함수 조건문에 의해 시작하자마자 +1이 되어 0으로 시작합니다.
self.Chrome_COUNT = 0 # 크롬 재실행 횟수
self.Chrome_PID = -1
def dashboard(self):
begin_x = 0; begin_y = 0
height = 30; width = 80
curses.initscr()
curses.curs_set(0)
field = curses.newwin(height, width, begin_y, begin_x)
while(1):
field.refresh()
running_time = (datetime.datetime.now() - self.STARTTIME).seconds
running_time_seconds = running_time % 60
running_time_minutes = running_time // 60 % 60
running_time_hours = running_time // 60 // 60 % 24
running_time = "%d:%d:%d" % (running_time_hours, running_time_minutes, running_time_seconds)
dashboard_template = '''
#############################################################
Dash Board
=============================================================
StartTime : %s
RunTime : %s
Crash : %d
Lastest Crash : %s
Dcheck Failed : %d
TestCase : %d
Chrome Count : %d
Chrome PID : %d
#############################################################''' % (self.STARTTIME, running_time, self.CRSAH_COUNT, self.LASTEST_CRASH_TIME, self.DCHECK_COUNT, self.TESTCASE_COUNT, self.Chrome_COUNT, self.Chrome_PID)
field.addstr(0, 0, dashboard_template)
def watcher(self):
testcase_data = b''
while(True):
with open(self.File_Path, 'rb') as fp:
tmp = fp.read()
if (testcase_data != tmp) and (tmp != b''):
self.TESTCASE_COUNT += 1
testcase_data = tmp
def run_dashboard(self, file_path):
self.File_Path = file_path
dashboard_thread = threading.Thread(target=self.dashboard).start()
watcher_thread = threading.Thread(target=self.watcher).start()
|
segment_deconvnet.py
|
#!/shared/xudongliu/anaconda3/envs/f_torch04/bin/python
import argparse
import json
import logging
import os
import threading
from os.path import exists, join, split, dirname
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
import torch.utils.data
from torch import nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import dla_up
import data_transforms as transforms
import dataset
from miou import RunningConfusionMatrix
from models.deconvnet import conv_deconv
import torchvision
try:
from modules import batchnormsync
HAS_BN_SYNC = True
except ImportError:
HAS_BN_SYNC = False
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CITYSCAPE_PALLETE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, base_transform, list_dir=None,
out_name=False, out_size=False, binary=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.out_size = out_size
self.binary = binary
self.read_lists()
self.base_transform = base_transform
def __getitem__(self, index):
image = Image.open(join(self.data_dir, self.image_list[index]))
data = [image]
if self.label_list is not None:
label_map = Image.open(join(self.data_dir, self.label_list[index]))
if self.binary:
label_map = Image.fromarray(
(np.array(label_map) > 0).astype(np.uint8))
data.append(label_map)
if self.bbox_list is not None:
data.append(Image.open(join(self.data_dir, self.bbox_list[index])))
data = list(self.transforms(*data))
data[0] = torchvision.transforms.functional.resize(data[0], 224)
data = list(self.base_transform(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
if self.out_size:
data.append(torch.from_numpy(np.array(image.size, dtype=int)))
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
bbox_path = join(self.list_dir, self.phase + '_bboxes.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
if exists(bbox_path):
self.bbox_list = [line.strip() for line in open(bbox_path, 'r')]
assert len(self.image_list) == len(self.bbox_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(join(self.data_dir,
self.label_list[index])))
# data = list(self.transforms(*data))
if len(data) > 1:
out_data = list(self.transforms(*data))
else:
out_data = [self.transforms(*data)]
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10):
# miou part >>>
confusion_labels = np.arange(0, 19)
confusion_matrix = RunningConfusionMatrix(confusion_labels)
# miou part <<<
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
confusion_matrix.update_matrix(target, output)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
score.update(eval_score(output, target_var), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score), flush=True)
miou, top_1, top_5 = confusion_matrix.compute_current_mean_intersection_over_union()
print(' * Score {top1.avg:.3f}'.format(top1=score))
print(' * mIoU {top1:.3f}'.format(top1=miou))
confusion_matrix.show_classes()
return miou
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data[0]
def train(train_loader, model, criterion, optimizer, epoch,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# pdb.set_trace()
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
args = parse_args()
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, join(args.checkpoint_dir, 'model_best.pth.tar'))
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
checkpoint_dir = args.checkpoint_dir
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
pretrained_base = args.pretrained_base
single_model = conv_deconv(args.classes)
# single_model = dla_up.__dict__.get(args.arch)(
# args.classes, pretrained_base, down_ratio=args.down)
model = torch.nn.DataParallel(single_model).cuda()
if args.edge_weight > 0:
weight = torch.from_numpy(
np.array([1, args.edge_weight], dtype=np.float32))
criterion = nn.NLLLoss2d(ignore_index=255, weight=weight)
else:
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
data_dir = args.data_dir
info = dataset.load_dataset_info(data_dir)
normalize = transforms.Normalize(mean=info.mean, std=info.std)
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.append(transforms.RandomCrop(crop_size))
if args.random_color:
t.append(transforms.RandomJitter(0.4, 0.4, 0.4))
t.extend([transforms.RandomHorizontalFlip()])
t_base = [transforms.ToTensor(),
normalize]
train_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'train', transforms.Compose(t), transforms.Compose(t_base),
binary=(args.classes == 2)),
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'val', transforms.Compose([
transforms.RandomCrop(crop_size),
# transforms.RandomHorizontalFlip(),
]), transforms.Compose(t_base), binary=(args.classes == 2)),
batch_size=4, shuffle=False, num_workers=num_workers,
pin_memory=True
)
optimizer = torch.optim.SGD(single_model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
# validate(val_loader, model, criterion, eval_score=accuracy) # TODO delete
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
print('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,
eval_score=accuracy)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
# checkpoint_path = 'checkpoint_latest.pth.tar'
checkpoint_path = os.path.join(checkpoint_dir,'checkpoint_{}.pth.tar'.format(epoch))
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'prec1': prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % args.save_freq == 0:
history_path = os.path.join(checkpoint_dir, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1))
shutil.copyfile(checkpoint_path, history_path)
def adjust_learning_rate(args, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10
every 30 epochs"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
# if epoch == 0:
# lr = args.lr / 10
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def crop_image(image, size):
left = (image.size[0] - size[0]) // 2
upper = (image.size[1] - size[1]) // 2
right = left + size[0]
lower = upper + size[1]
# print(left.item(), upper.item(), right.item(), lower.item())
return image.crop((left.item(), upper.item(), right.item(), lower.item()))
def save_output_images(predictions, filenames, output_dir, sizes=None):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_prob_images(prob, filenames, output_dir, sizes=None):
for ind in range(len(filenames)):
im = Image.fromarray(
(prob[ind][1].squeeze().data.cpu().numpy() * 255).astype(np.uint8))
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes, sizes=None):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
if sizes is not None:
im = crop_image(im, sizes[ind])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name, size) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
prob = torch.exp(final)
if save_vis:
save_output_images(pred, name, output_dir, size)
if prob.size(1) == 2:
save_prob_images(prob, name, output_dir + '_prob', size)
else:
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE, size)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
print('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
print('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
ious = per_class_iu(hist) * 100
print(' '.join('{:.03f}'.format(i) for i in ious))
if has_gt: # val
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALLETE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: # val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
base_out_dir = args.checkpoint_dir
for k, v in args.__dict__.items():
print(k, ':', v)
# single_model = SegNet(args.classes, pretrained=True)
single_model = conv_deconv(args.classes)
# single_model = dla_up.__dict__.get(args.arch)(
# args.classes, down_ratio=args.down)
model = torch.nn.DataParallel(single_model).cuda()
data_dir = args.data_dir
info = dataset.load_dataset_info(data_dir)
normalize = transforms.Normalize(mean=info.mean, std=info.std)
# scales = [0.5, 0.75, 1.25, 1.5, 1.75]
scales = [0.5, 0.75, 1.25, 1.5]
t = []
if args.crop_size > 0:
t.append(transforms.PadToSize(args.crop_size))
# t.extend([transforms.ToTensor(), normalize])
t_base = [transforms.ToTensor(),
normalize]
if args.ms:
data = SegListMS(data_dir, phase, transforms.Compose(t), scales)
else:
data = SegList(data_dir, phase, transforms.Compose(t), transforms.Compose(t_base),
out_name=True, out_size=True,
binary=args.classes == 2)
test_loader = torch.utils.data.DataLoader(
data,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
out_dir = os.path.join(base_out_dir, '{}_{:03d}_{}'.format(args.arch, start_epoch, phase))
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
print('mAP: ', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(
description='DLA Segmentation and Boundary Prediction')
parser.add_argument('cmd', choices=['train', 'test'])
parser.add_argument('-d', '--data-dir', default=None)
parser.add_argument('-c', '--classes', default=0, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--train-samples', default=16000, type=int)
parser.add_argument('--loss', default='l1', type=str)
parser.add_argument('--test-batch-size', type=int, default=1000,
metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='- seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging '
'training status')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained-base', default=None,
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--down', default=2, type=int, choices=[2, 4, 8, 16],
help='Downsampling ratio of IDA network output, which '
'is then upsampled to the original resolution '
'with bilinear interpolation.')
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--lr-mode', default='step')
parser.add_argument('--bn-sync', action='store_true', default=False)
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--random-color', action='store_true', default=False)
parser.add_argument('--save-freq', default=10, type=int)
parser.add_argument('--ms', action='store_true', default=False)
parser.add_argument('--edge-weight', type=int, default=-1)
parser.add_argument('--test-suffix', default='')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('-o', '--checkpoint-dir')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
assert args.data_dir is not None
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
return args
def main():
args = parse_args()
if not exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
if args.bn_sync:
if HAS_BN_SYNC:
dla_up.set_bn(batchnormsync.BatchNormSync)
else:
print('batch normalization synchronization across GPUs '
'is not imported.')
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
if __name__ == '__main__':
main()
|
genmulti.py
|
# genmulti.py
#
# Generate items from multiple generators (multiplex)
#
import queue, threading
from genqueue import genfrom_queue, sendto_queue
from gencat import gen_cat
def multiplex(sources):
in_q = queue.Queue()
consumers = []
for src in sources:
thr = threading.Thread(target=sendto_queue, args=(src, in_q))
thr.start()
consumers.append(genfrom_queue(in_q))
return gen_cat(consumers)
def gen_multiplex(genlist):
item_q = queue.Queue()
def run_one(source):
for item in source:
item_q.put(item)
def run_all():
thrlist = []
for source in genlist:
t = threading.Thread(target=run_one, args=(source,))
t.start()
thrlist.append(t)
for t in thrlist:
t.join()
item_q.put(StopIteration)
threading.Thread(target=run_all).start()
while True:
item = item_q.get()
if item is StopIteration:
return
yield item
# Example use
# This example requires you to perform these setup steps:
#
# 1. Go to run/foo and logsim.py
# 2. Go to run/bar and logsim.py
#
# These two steps will start writing two different Apache log files.
# Now, we're going to read from both at the same time.
if __name__ == "__main__":
from follow import follow
log1 = follow(open("run/foo/access-log"))
log2 = follow(open("run/bar/access-log"))
log = multiplex([log1, log2])
for line in log:
print(line, end="")
|
connection.py
|
#============================================================================
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd.
#============================================================================
import sys
import os
import threading
import socket
import fcntl
from errno import EAGAIN, EINTR, EWOULDBLOCK
try:
from OpenSSL import SSL
except ImportError:
pass
from xen.xend.XendLogging import log
"""General classes to support server and client sockets, without
specifying what kind of socket they are. There are subclasses
for TCP and unix-domain sockets (see tcp.py and unix.py).
"""
BUFFER_SIZE = 16384
BACKLOG = 5
class SocketServerConnection:
"""An accepted connection to a server.
"""
def __init__(self, sock, protocol_class):
self.sock = sock
self.protocol = protocol_class()
self.protocol.setTransport(self)
threading.Thread(target=self.main).start()
def main(self):
try:
while True:
try:
data = self.sock.recv(BUFFER_SIZE)
if data == '':
break
if self.protocol.dataReceived(data):
break
except socket.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
break
finally:
try:
self.sock.close()
except:
pass
def close(self):
self.sock.close()
def write(self, data):
self.sock.send(data)
class SocketListener:
"""A server socket, running listen in a thread.
Accepts connections and runs a thread for each one.
"""
def __init__(self, protocol_class):
self.protocol_class = protocol_class
self.sock = self.createSocket()
threading.Thread(target=self.main).start()
def close(self):
try:
self.sock.close()
except:
pass
def createSocket(self):
raise NotImplementedError()
def acceptConnection(self, sock, protocol, addr):
raise NotImplementedError()
def main(self):
try:
fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
self.sock.listen(BACKLOG)
while True:
try:
(sock, addr) = self.sock.accept()
self.acceptConnection(sock, addr)
except socket.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
break
finally:
self.close()
class SSLSocketServerConnection(SocketServerConnection):
"""An SSL aware accepted connection to a server.
As pyOpenSSL SSL.Connection fileno() method just retrieve the file
descriptor number for the underlying socket, direct read/write to the file
descriptor will result no data encrypted.
recv2fd() and fd2send() are simple wrappers for functions who need direct
read/write to a file descriptor rather than a socket like object.
To use recv2fd(), you can create a pipe and start a thread to transfer all
received data to one end of the pipe, then read from the other end:
p2cread, p2cwrite = os.pipe()
threading.Thread(target=connection.SSLSocketServerConnection.recv2fd,
args=(sock, p2cwrite)).start()
os.read(p2cread, 1024)
To use fd2send():
p2cread, p2cwrite = os.pipe()
threading.Thread(target=connection.SSLSocketServerConnection.fd2send,
args=(sock, p2cread)).start()
os.write(p2cwrite, "data")
"""
def __init__(self, sock, protocol_class):
SocketServerConnection.__init__(self, sock, protocol_class)
def main(self):
try:
while True:
try:
data = self.sock.recv(BUFFER_SIZE)
if data == "":
break
if self.protocol.dataReceived(data):
break
except socket.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
break
except (SSL.WantReadError, SSL.WantWriteError, \
SSL.WantX509LookupError):
# The operation did not complete; the same I/O method
# should be called again.
continue
except SSL.ZeroReturnError:
# The SSL Connection has been closed.
break
except SSL.SysCallError, (retval, desc):
if ((retval == -1 and desc == "Unexpected EOF")
or retval > 0):
# The SSL Connection is lost.
break
log.debug("SSL SysCallError:%d:%s" % (retval, desc))
break
except SSL.Error, e:
# other SSL errors
log.debug("SSL Error:%s" % e)
break
finally:
try:
self.sock.close()
except:
pass
def recv2fd(sock, fd):
try:
while True:
try:
data = sock.recv(BUFFER_SIZE)
if data == "":
break
count = 0
while count < len(data):
try:
nbytes = os.write(fd, data[count:])
count += nbytes
except os.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
raise
except socket.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
break
except (SSL.WantReadError, SSL.WantWriteError, \
SSL.WantX509LookupError):
# The operation did not complete; the same I/O method
# should be called again.
continue
except SSL.ZeroReturnError:
# The SSL Connection has been closed.
break
except SSL.SysCallError, (retval, desc):
if ((retval == -1 and desc == "Unexpected EOF")
or retval > 0):
# The SSL Connection is lost.
break
log.debug("SSL SysCallError:%d:%s" % (retval, desc))
break
except SSL.Error, e:
# other SSL errors
log.debug("SSL Error:%s" % e)
break
finally:
try:
sock.close()
os.close(fd)
except:
pass
recv2fd = staticmethod(recv2fd)
def fd2send(sock, fd):
try:
while True:
try:
data = os.read(fd, BUFFER_SIZE)
if data == "":
break
count = 0
while count < len(data):
try:
nbytes = sock.send(data[count:])
count += nbytes
except socket.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
raise
except (SSL.WantReadError, SSL.WantWriteError, \
SSL.WantX509LookupError):
# The operation did not complete; the same I/O method
# should be called again.
continue
except SSL.ZeroReturnError:
# The SSL Connection has been closed.
raise
except SSL.SysCallError, (retval, desc):
if not (retval == -1 and data == ""):
# errors when writing empty strings are expected
# and can be ignored
log.debug("SSL SysCallError:%d:%s" % (retval, desc))
raise
except SSL.Error, e:
# other SSL errors
log.debug("SSL Error:%s" % e)
raise
except os.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
break
finally:
try:
sock.close()
os.close(fd)
except:
pass
fd2send = staticmethod(fd2send)
def hostAllowed(addrport, hosts_allowed):
if hosts_allowed is None:
return True
else:
fqdn = socket.getfqdn(addrport[0])
for h in hosts_allowed:
if h.match(fqdn) or h.match(addrport[0]):
return True
log.warn("Rejected connection from %s (%s).", addrport[0], fqdn)
return False
class SocketDgramListener:
"""A connectionless server socket, running listen in a thread.
"""
def __init__(self, protocol_class):
self.protocol = protocol_class()
self.sock = self.createSocket()
threading.Thread(target=self.main).start()
def close(self):
try:
self.sock.close()
except:
pass
def createSocket(self):
raise NotImplementedError()
def main(self):
try:
fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
while True:
try:
data = self.sock.recv(BUFFER_SIZE)
self.protocol.dataReceived(data)
except socket.error, ex:
if ex.args[0] not in (EWOULDBLOCK, EAGAIN, EINTR):
break
finally:
try:
self.close()
except:
pass
|
utils.py
|
import socket
import threading
import os
import time
# oneshot UNIX to TCP socket proxy. Stops after first connection
def unix2tcp(unix_socket_path, tcp_host, tcp_port):
try:
os.unlink(unix_socket_path)
except OSError:
pass
usock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
usock.bind(unix_socket_path)
usock.listen(1)
def proxy_loop():
uconn, addr = usock.accept()
tsock.connect((tcp_host, tcp_port))
uconn.setblocking(False)
tsock.setblocking(False)
while True:
data = None
try:
data = uconn.recv(1000)
if len(data) == 0:
break
tsock.sendall(data)
except BlockingIOError:
pass
try:
data = tsock.recv(1000)
if len(data) == 0:
break
uconn.sendall(data)
except BlockingIOError:
pass
usock.close()
uconn.close()
tsock.close()
threading.Thread(target=proxy_loop, daemon=True).start()
|
agent.py
|
# -*- coding: UTF-8 -*-
import os
import sys
import json
import time
import socket
import winerror
import requests
import win32event
import win32service
import win32timezone
import servicemanager
import win32serviceutil
from threading import Thread
from configparser import ConfigParser
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def push_event(src, time, action):
ipaddr = socket.gethostbyname(socket.gethostname())
try:
post_url = Config['Global'].get('Post_Url')
requests.post(
url=post_url,
timeout=3,
data={"directory": src,
"ipaddres": ipaddr,
"time": time,
"type": action
})
except requests.exceptions.ConnectTimeout:
headers = {'Content-Type': 'application/json'}
bot_url = Config['Global'].get('WechatBot')
post_data = {
"msgtype": "markdown",
"markdown": {
"content": "filemonitor连接异常:\n >IP地址:<font color='info'> %s </font> \n >发生时间:<font color='warning'> %s </font>" % (ipaddr, time)
}
}
requests.post(url=bot_url,headers=headers,data=json.dumps(post_data))
class MyHandler(FileSystemEventHandler):
def on_moved(self, event):
if event.is_directory:
pass
else:
now_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
push_event(event.src_path, now_date, "Moved")
def on_created(self, event):
if event.is_directory:
pass
else:
now_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
push_event(event.src_path, now_date, "Created")
def on_deleted(self, event):
if event.is_directory:
pass
else:
now_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
push_event(event.src_path, now_date, "Deleted")
def on_modified(self, event):
if event.is_directory:
pass
else:
now_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
push_event(event.src_path, now_date, "Modified")
def Monitor(path):
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(5)
except KeyboardInterrupt:
observer.stop()
observer.join()
class Monitor_Service(win32serviceutil.ServiceFramework):
_svc_name_ = 'file_monitor'
_svc_display_name_ = '站点目录监控服务'
_svc_description_ = '站点目录监控服务'
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
self.run = True
def SvcDoRun(self):
Pathlist = eval(Config['Global'].get('Website_Path'))
p_list = []
for Path in Pathlist:
p = Thread(group=None, target=Monitor, args=(Path,))
p.start()
p_list.append(p)
for p in p_list:
p.join()
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
self.run = False
if __name__ == '__main__':
Config = ConfigParser()
Config.read(r'C:\ops\config\config.ini')
if len(sys.argv) == 1:
try:
evtsrc_dll = os.path.abspath(servicemanager.__file__)
servicemanager.PrepareToHostSingle(Monitor_Service)
servicemanager.Initialize(
'Monitor_Service', evtsrc_dll)
servicemanager.StartServiceCtrlDispatcher()
except win32service.error as details:
if details == winerror.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT:
win32serviceutil.usage()
else:
win32serviceutil.HandleCommandLine(Monitor_Service)
|
process.py
|
# coding: utf-8
"""JupyterLab command handler"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
import logging
import os
import re
import signal
import sys
import threading
import time
import weakref
from tornado import gen
from .jlpmapp import which, subprocess
try:
import pty
except ImportError:
pty = False
if sys.platform == 'win32':
list2cmdline = subprocess.list2cmdline
else:
def list2cmdline(cmd_list):
import pipes
return ' '.join(map(pipes.quote, cmd_list))
logging.basicConfig(format='%(message)s', level=logging.INFO)
class Process(object):
"""A wrapper for a child process.
"""
_procs = weakref.WeakSet()
_pool = None
def __init__(self, cmd, logger=None, cwd=None, kill_event=None,
env=None):
"""Start a subprocess that can be run asynchronously.
Parameters
----------
cmd: list
The command to run.
logger: :class:`~logger.Logger`, optional
The logger instance.
cwd: string, optional
The cwd of the process.
env: dict, optional
The environment for the process.
kill_event: :class:`~threading.Event`, optional
An event used to kill the process operation.
"""
if not isinstance(cmd, (list, tuple)):
raise ValueError('Command must be given as a list')
if kill_event and kill_event.is_set():
raise ValueError('Process aborted')
self.logger = logger = logger or logging.getLogger('jupyterlab')
self._last_line = ''
self.logger.info('> ' + list2cmdline(cmd))
self.cmd = cmd
self.proc = self._create_process(cwd=cwd, env=env)
self._kill_event = kill_event or threading.Event()
Process._procs.add(self)
def terminate(self):
"""Terminate the process and return the exit code.
"""
proc = self.proc
# Kill the process.
if proc.poll() is None:
try:
os.kill(proc.pid, signal.SIGTERM)
except Exception as e:
self.logger.error(str(e))
# Wait for the process to close.
try:
proc.wait()
except Exception as e:
self.logger.error(e)
finally:
Process._procs.remove(self)
return proc.returncode
def wait(self):
"""Wait for the process to finish.
Returns
-------
The process exit code.
"""
proc = self.proc
kill_event = self._kill_event
try:
while proc.poll() is None:
if kill_event.is_set():
self.terminate()
raise ValueError('Process Aborted')
time.sleep(1.)
except subprocess.CalledProcessError as error:
output = error.output.decode('utf-8')
self.logger.error(output)
self.terminate()
raise error
return self.terminate()
@gen.coroutine
def wait_async(self):
"""Asynchronously wait for the process to finish.
"""
proc = self.proc
kill_event = self._kill_event
try:
while proc.poll() is None:
if kill_event.is_set():
self.terminate()
raise ValueError('Process Aborted')
yield gen.sleep(1.)
except subprocess.CalledProcessError as error:
output = error.output.decode('utf-8')
self.logger.error(output)
self.terminate()
raise error
raise gen.Return(self.terminate())
def _create_process(self, **kwargs):
"""Create the process.
"""
cmd = self.cmd
kwargs.setdefault('stderr', subprocess.STDOUT)
if os.name == 'nt':
kwargs['shell'] = True
cmd[0] = which(cmd[0], kwargs.get('env'))
try:
proc = subprocess.Popen(cmd, **kwargs)
except subprocess.CalledProcessError as error:
output = error.output.decode('utf-8')
self.logger.error(output)
raise error
return proc
@classmethod
def _cleanup(cls):
"""Clean up the started subprocesses at exit.
"""
for proc in list(cls._procs):
proc.terminate()
class WatchHelper(Process):
"""A process helper for a watch process.
"""
def __init__(self, cmd, startup_regex, logger=None, cwd=None,
kill_event=None, env=None):
"""Initialize the process helper.
Parameters
----------
cmd: list
The command to run.
startup_regex: string
The regex to wait for at startup.
logger: :class:`~logger.Logger`, optional
The logger instance.
cwd: string, optional
The cwd of the process.
env: dict, optional
The environment for the process.
kill_event: callable, optional
A function to call to check if we should abort.
"""
super(WatchHelper, self).__init__(cmd, logger=logger,
cwd=cwd, kill_event=kill_event, env=env)
if not pty:
self._stdout = self.proc.stdout
while 1:
line = self._stdout.readline().decode('utf-8')
if not line:
raise RuntimeError('Process ended improperly')
print(line.rstrip())
if re.match(startup_regex, line):
break
self._read_thread = threading.Thread(target=self._read_incoming)
self._read_thread.setDaemon(True)
self._read_thread.start()
def terminate(self):
"""Terminate the process.
"""
proc = self.proc
if proc.poll() is None:
if os.name != 'nt':
# Kill the process group if we started a new session.
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
else:
os.kill(proc.pid, signal.SIGTERM)
# Close stdout.
try:
self._stdout.close()
except Exception as e:
pass
# Wait for the process to close.
try:
proc.wait()
except Exception as e:
print('on close')
self.logger.error(e)
finally:
Process._procs.remove(self)
return proc.returncode
def _read_incoming(self):
"""Run in a thread to read stdout and print"""
fileno = self._stdout.fileno()
while 1:
try:
buf = os.read(fileno, 1024)
except OSError as e:
self.logger.debug('Read incoming error %s', e)
return
if not buf:
return
print(buf.decode('utf-8'), end='')
def _create_process(self, **kwargs):
"""Create the watcher helper process.
"""
kwargs['bufsize'] = 0
if pty:
master, slave = pty.openpty()
kwargs['stderr'] = kwargs['stdout'] = slave
kwargs['start_new_session'] = True
self._stdout = os.fdopen(master, 'rb')
else:
kwargs['stdout'] = subprocess.PIPE
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = startupinfo
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
return super(WatchHelper, self)._create_process(**kwargs)
# Register the cleanup handler.
atexit.register(Process._cleanup)
|
runner_mutations_nolog.py
|
# Run Quacky on original vs mutant AWS IAM policies.
# Analyze relative permissiveness.
import argparse as ap
import sys
import os
import re
import math
import multiprocessing
from utils.Shell import Shell
from utilities import get_abc_result_line
parser = ap.ArgumentParser(description = 'Run Quacky on AWS IAM policies')
parser.add_argument('-d', '--dir', help = 'Policy Directory', required = True)
parser.add_argument('-v', '--verbose', help = 'Verbose', required = False, action = 'store_true')
parser.add_argument('-c', '--constraints', help = 'use resource type constraints', required = False, action = 'store_true')
parser.add_argument('-e', '--enc', help = 'use action encoding', required = False, action = 'store_true')
parser.add_argument('-s', '--smt-lib', help = 'use SMT-LIB syntax', required = False, action = 'store_true')
parser.add_argument('-b', '--bound', help = 'Bound', required = True)
parser.add_argument('-f', '--variable', help = 'count all variables', required = False, action = 'store_true')
parser.add_argument('-t', '--timeout', help = 'Timeout', required = False, default = 120)
args = parser.parse_args()
orig_policy_dir = os.fsencode('../samples/' + args.dir + '/exp_single/')
mutated_policy_dir = os.fsencode('../samples/mutations/' + args.dir + '/exp_single/')
def call_abc(orig_path, mutated_path, p1, p2):
shell = Shell()
#Translate policies into SMT constraint formula
cmd = 'python3 translator.py -p1 {}/{} -p2 {}/{}'.format(orig_path, p1, mutated_path, p2)
if args.constraints:
cmd += ' -c'
if args.enc:
cmd += ' -e'
if args.smt_lib:
cmd += ' -s'
out, err = shell.runcmd(cmd)
if args.verbose:
print(out, err)
#Call ABC on the first outputted translation
cmd = 'timeout {}s '.format(args.timeout)
cmd += 'abc -bs {} -v 0 -i output_1.smt2 --precise --count-tuple --cache-subformula --cache-automata'.format(args.bound)
if args.variable:
cmd += ' --count-variable principal,action,resource'
out, err = shell.runcmd(cmd)
if args.verbose:
print(out, err)
#Parse output of ABC to get a dictionary of the results
result1 = get_abc_result_line(out,err)
#Call ABC on the second outputted translation
cmd = 'timeout {}s '.format(args.timeout)
cmd = 'abc -bs {} -v 0 -i output_2.smt2 --precise --count-tuple --cache-subformula --cache-automata'.format(args.bound)
if args.variable:
cmd += ' --count-variable principal,action,resource'
out, err = shell.runcmd(cmd)
if args.verbose:
print(out,err)
#Parse output of ABC to get a dictionary of the results
result2 = get_abc_result_line(out,err)
#Populate the markdown table with the results
md = ''
md += '|[{}/{}]({}/{})|[{}/{}]({}/{})|'.format(orig_path, p1, orig_path, p1, mutated_path, p2, mutated_path, p2)
if result1['is_sat'] == 'sat' and 'count' in result1.keys():
md += '{}|{}|{}|{}|'.format(result1['is_sat'], result1['solve_time'],
result1['count'], result1['count_time']
)
if args.variable:
md += '{}|{}|{}|'.format(
result1['var']['principal']['count'],
result1['var']['action']['count'],
result1['var']['resource']['count']
)
else:
md += '{}|{}|-|-|'.format(result1['is_sat'], result1['solve_time'])
if args.variable:
md += '-|-|-|'
if result2['is_sat'] == 'sat' and 'count' in result2.keys():
md += '{}|{}|{}|{}|'.format(result2['is_sat'], result2['solve_time'],
result2['count'], result2['count_time']
)
if args.variable:
md += '{}|{}|{}|'.format(
result2['var']['principal']['count'],
result2['var']['action']['count'],
result2['var']['resource']['count']
)
else:
md += '{}|{}|-|-|'.format(result2['is_sat'], result2['solve_time'])
if args.variable:
md += '-|-|-|'
print(md)
#Make directories within the same directory as the policy and store the translation in this new directory.
out, err = shell.rmrdir('{}/abc_{}_{}'.format(mutated_path, p1.replace('.json', ''), p2.replace('.json', '')))
if args.verbose:
print(out, err)
out, err = shell.mkdir('{}/abc_{}_{}'.format(mutated_path, p1.replace('.json', ''), p2.replace('.json', '')))
if args.verbose:
print(out, err)
out, err = shell.mv('output_0.smt2', '{}/abc_{}_{}'.format(mutated_path, p1.replace('.json', ''), p2.replace('.json', '')))
if args.verbose:
print(out, err)
out, err = shell.mv('output_1.smt2', '{}/abc_{}_{}'.format(mutated_path, p1.replace('.json', ''), p2.replace('.json', '')))
if args.verbose:
print(out, err)
out, err = shell.mv('output_2.smt2', '{}/abc_{}_{}'.format(mutated_path, p1.replace('.json', ''), p2.replace('.json', '')))
if args.verbose:
print(out, err)
#Print out test description
print()
print('**Policies in {}**'.format(args.dir))
print()
print('bound: `{}`, variables: `{}`, constraints: `{}`, encoding: `{}`, smt-lib: `{}`'.format(args.bound, args.variable, args.constraints, args.enc, args.smt_lib))
print()
#Set up markdown table
if args.variable:
print('|Policy 1|Policy 2|P1 => P2|Solve Time (ms)|lg(tuple)|Count Time (ms)|lg(principal)|lg(action)|lg(resource)|P2 => P1|Solve Time (ms)|lg(tuple)|Count Time (ms)|lg(principal)|lg(action)|lg(resource)|')
print('|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|')
else:
print('|Policy 1|Policy 2|P1 => P2|Solve Time (ms)|lg(tuple)|Count Time (ms)|P2 => P1|Solve Time (ms)|lg(tuple)|Count Time (ms)|')
print('|-|-|-|-|-|-|-|-|-|-|')
timed_out = []
#Iterate through all policies within directory and perform check.
for dir in os.listdir(orig_policy_dir):
orig_path = os.fsdecode(orig_policy_dir) + os.fsdecode(dir)
for orig_policy in os.listdir(orig_path):
if orig_policy.endswith('.json'):
mutated_path = mutated_policy_dir + dir + b'/' + os.fsencode(orig_policy.replace('.json', ''))
mutated_path = os.fsdecode(mutated_path)
try:
os.listdir(mutated_path)
except:
continue
for mutated_policy in os.listdir(mutated_path):
if mutated_policy.endswith('.json'):
t = multiprocessing.Process(target = call_abc, args=(orig_path, mutated_path, orig_policy, mutated_policy))
t.start()
t.join(timeout = float(args.timeout))
if t.is_alive():
t.terminate()
timed_out.append(mutated_path + '/' + mutated_policy)
print()
print('**Timed out**')
for policy in timed_out:
print(policy)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The bitphantom Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitphantomd shutdown."""
from test_framework.test_framework import bitphantomTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(bitphantomTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
wrapper.py
|
__author__ = 'ChenyangGao <https://chenyanggao.github.io/>'
__version__ = (0, 0, 2)
__all__ = ['as_thread', 'as_threads', 'timethis', 'with_lock',
'context', 'suppressed']
from concurrent.futures import Future
from threading import current_thread, Lock, Thread
from time import perf_counter
from typing import (
overload, Callable, List, Optional, Type, TypeVar, Tuple, Union
)
from .decorator import optional_decorate
T = TypeVar('T')
@optional_decorate
def as_thread(
f: Optional[Callable] = None,
/,
join: bool = False,
daemon: bool = True,
**kwds,
) -> Callable[..., Future]:
def wrapper(*args, **kwargs) -> Future:
def asfuture():
try:
ft.set_result(f(*args, **kwargs))
except BaseException as exc:
ft.set_exception(exc)
ft: Future = Future()
t = ft.thread = Thread(target=asfuture, daemon=daemon, **kwds)
t.start()
if join:
t.join()
return ft
return wrapper
@optional_decorate
def as_threads(
f: Optional[Callable] = None,
/,
amount: int = 1,
join: bool = False,
daemon: bool = True,
**kwds,
) -> Callable[..., List[Future]]:
def wrapper(*args, **kwargs) -> List[Future]:
def asfuture():
ft = Future()
ft.thread = current_thread()
futures.append(ft)
try:
ft.set_result(f(*args, **kwargs))
except BaseException as exc:
ft.set_exception(exc)
futures: List[Future] = []
threads = [
Thread(target=asfuture, daemon=daemon, **kwds)
for _ in range(amount)
]
for t in threads:
t.start()
if join:
for t in threads:
t.join()
return futures
return wrapper
@optional_decorate
def timethis(
f: Optional[Callable] = None,
/,
print: Callable = print,
) -> Callable:
def wrapper(*args, **kwargs):
begin_dt = perf_counter()
try:
return f(*args, **kwargs)
finally:
cost = perf_counter() - begin_dt
name = getattr(f, '__qualname__', getattr(f, '__name__', repr(f)))
args_str = ', '.join(
(*map(repr, args),
*(f'{k}={v!r}' for k, v in kwargs.items()))
)
print(f'{name}({args_str}) consumed {cost} seconds')
return wrapper
@optional_decorate
def with_lock(fn: Callable, /, lock=Lock()) -> Callable:
def wrapper(*args, **kwds):
with lock:
return fn(*args, **kwds)
return wrapper
@optional_decorate
def context(
fn: Callable,
/,
onenter: Optional[Callable] = None,
onexit: Optional[Callable] = None,
) -> Callable:
def wrapper(*args, **kwds):
if onenter:
onenter()
try:
return fn(*args, **kwds)
finally:
if onexit: onexit()
return wrapper
@overload
def suppressed(
fn: Callable[..., T],
/,
default: T,
exceptions: Union[
Type[BaseException],
Tuple[Type[BaseException], ...]
],
) -> Callable[..., T]:
...
@overload
def suppressed(
fn: Callable[..., T],
/,
default: None,
exceptions: Union[
Type[BaseException],
Tuple[Type[BaseException], ...]
],
) -> Callable[..., Optional[T]]:
...
@optional_decorate
def suppressed(fn, /, default=None, exceptions=Exception):
def wrapper(*args, **kwds):
try:
return fn(*args, **kwds)
except exceptions:
return default
return wrapper
|
breaksolver.py
|
##
# Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File: breaksolver.py
#
# Purpose: Show how to break a long-running task.
##
import sys
import mosek.fusion
from mosek.fusion import *
import random
import threading
import time
def main():
timeout = 5
n = 200 # number of binary variables
m = n // 5 # number of constraints
p = n // 5 # Each constraint picks p variables and requires that exactly half of them are 1
R = random.Random(1234)
print("Build problem...")
##TAG:begin-model
with Model('SolveBinary') as M:
M.setLogHandler(sys.stdout)
x = M.variable("x", n, Domain.binary())
M.objective(ObjectiveSense.Minimize, Expr.sum(x))
M.setLogHandler(sys.stdout)
L = list(range(n))
for i in range(m):
R.shuffle(L)
M.constraint(Expr.sum(x.pick(L[:p])),Domain.equalsTo(p // 2))
##TAG:end-model
print("Start thread...")
##TAG:begin-create-thread
T = threading.Thread(target = M.solve)
##TAG:end-create-thread
T0 = time.time()
try:
T.start() # optimization now running in background
##TAG:begin-check-condition
# Loop until we get a solution or you run out of patience and press Ctrl-C
while True:
if not T.isAlive():
print("Solver terminated before anything happened!")
break
elif time.time()-T0 > timeout:
print("Solver terminated due to timeout!")
M.breakSolver()
break
except KeyboardInterrupt:
print("Signalling the solver that it can give up now!")
M.breakSolver()
##TAG:end-check-condition
##TAG:begin-check-return
finally:
try:
T.join() # wait for the solver to return
except:
pass
##TAG:end-check-return
if __name__ == '__main__':
main()
|
test_models.py
|
import unittest
from yaksh.models import User, Profile, Question, Quiz, QuestionPaper,\
QuestionSet, AnswerPaper, Answer, Course, StandardTestCase,\
StdIOBasedTestCase, FileUpload, McqTestCase, AssignmentUpload,\
LearningModule, LearningUnit, Lesson, LessonFile, CourseStatus
from yaksh.code_server import (
ServerPool, get_result as get_result_from_code_server
)
import json
import ruamel.yaml as yaml
from datetime import datetime, timedelta
from django.utils import timezone
import pytz
from django.db import IntegrityError
from django.core.files import File
from textwrap import dedent
import zipfile
import os
import shutil
import tempfile
from threading import Thread
from yaksh import settings
def setUpModule():
# create user profile
user = User.objects.create_user(username='creator',
password='demo',
email='demo@test.com')
User.objects.create_user(username='demo_user2',
password='demo',
email='demo@test.com')
Profile.objects.create(user=user, roll_number=1, institute='IIT',
department='Chemical', position='Student')
student = User.objects.create_user(username='demo_user3',
password='demo',
email='demo3@test.com')
Profile.objects.create(user=student, roll_number=3, institute='IIT',
department='Chemical', position='Student')
user4 = User.objects.create_user(
username='demo_user4', password='demo', email='demo4@test.com'
)
Profile.objects.create(user=user4, roll_number=4, institute='IIT',
department='Chemical', position='Student')
# create a course
course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=user)
# create 20 questions
for i in range(1, 21):
Question.objects.create(summary='Q%d' % (i), points=1,
type='code', user=user)
# create a quiz
quiz = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=1, time_between_attempts=0,
description='demo quiz 1', pass_criteria=0,
instructions="Demo Instructions")
Quiz.objects.create(start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
duration=30, active=False,
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz 2', pass_criteria=40,
instructions="Demo Instructions")
tmp_file1 = os.path.join(tempfile.gettempdir(), "test.txt")
with open(tmp_file1, 'wb') as f:
f.write('2'.encode('ascii'))
# Learing module
learning_module_one = LearningModule.objects.create(
name='LM1', description='module one', creator=user
)
learning_module_two = LearningModule.objects.create(
name='LM2', description='module two', creator=user, order=1
)
lesson = Lesson.objects.create(name='L1', description='Video Lesson',
creator=user)
learning_unit_lesson = LearningUnit.objects.create(order=1, lesson=lesson,
type='lesson')
learning_unit_quiz = LearningUnit.objects.create(order=2, quiz=quiz,
type='quiz')
learning_module_one.learning_unit.add(learning_unit_lesson)
learning_module_one.learning_unit.add(learning_unit_quiz)
learning_module_one.save()
course.learning_module.add(learning_module_one)
course.learning_module.add(learning_module_two)
course_user = User.objects.create(username='course_user')
course.students.add(course_user)
course.save()
LessonFile.objects.create(lesson=lesson)
CourseStatus.objects.create(course=course, user=course_user)
def tearDownModule():
User.objects.all().delete()
Question.objects.all().delete()
Quiz.objects.all().delete()
Course.objects.all().delete()
QuestionPaper.objects.all().delete()
LessonFile.objects.all().delete()
Lesson.objects.all().delete()
LearningUnit.objects.all().delete()
LearningModule.objects.all().delete()
AnswerPaper.objects.all().delete()
###############################################################################
class LessonTestCases(unittest.TestCase):
def setUp(self):
self.lesson = Lesson.objects.get(name='L1')
self.creator = User.objects.get(username='creator')
def test_lesson(self):
self.assertEqual(self.lesson.name, 'L1')
self.assertEqual(self.lesson.description, 'Video Lesson')
self.assertEqual(self.lesson.creator.username, self.creator.username)
class LearningModuleTestCases(unittest.TestCase):
def setUp(self):
self.learning_module = LearningModule.objects.get(name='LM1')
self.learning_module_two = LearningModule.objects.get(name='LM2')
self.creator = User.objects.get(username='creator')
self.student = User.objects.get(username='course_user')
self.learning_unit_one = LearningUnit.objects.get(order=1)
self.learning_unit_two = LearningUnit.objects.get(order=2)
self.quiz = Quiz.objects.get(description='demo quiz 1')
self.lesson = Lesson.objects.get(name='L1')
self.course = Course.objects.get(name='Python Course')
self.course_status = CourseStatus.objects.get(
course=self.course, user=self.student)
def tearDown(self):
# Remove unit from course status completed units
self.course_status.completed_units.remove(self.learning_unit_one)
self.course_status.completed_units.remove(self.learning_unit_two)
def test_learning_module(self):
self.assertEqual(self.learning_module.description, 'module one')
self.assertEqual(self.learning_module.creator, self.creator)
self.assertTrue(self.learning_module.check_prerequisite)
self.assertEqual(self.learning_module.order, 0)
def test_get_quiz_units(self):
# Given
quizzes = [self.quiz]
# When
module_quizzes = self.learning_module.get_quiz_units()
# Then
self.assertSequenceEqual(module_quizzes, quizzes)
def test_get_learning_units(self):
# Given
learning_units = [self.learning_unit_one, self.learning_unit_two]
# When
module_units = self.learning_module.get_learning_units()
# Then
self.assertSequenceEqual(module_units, learning_units)
def test_get_added_quiz_lesson(self):
# Given
quiz_lessons = [('lesson', self.lesson), ('quiz', self.quiz)]
# When
module_quiz_lesson = self.learning_module.get_added_quiz_lesson()
# Then
self.assertEqual(module_quiz_lesson, quiz_lessons)
def test_toggle_check_prerequisite(self):
self.assertTrue(self.learning_module.check_prerequisite)
# When
self.learning_module.toggle_check_prerequisite()
# Then
self.assertFalse(self.learning_module.check_prerequisite)
# When
self.learning_module.toggle_check_prerequisite()
# Then
self.assertTrue(self.learning_module.check_prerequisite)
def test_get_next_unit(self):
# Given
current_unit_id = self.learning_unit_one.id
next_unit = self.learning_unit_two
# When
unit = self.learning_module.get_next_unit(current_unit_id)
# Then
self.assertEqual(unit, next_unit)
# Given
current_unit_id = self.learning_unit_two.id
next_unit = self.learning_unit_one
# When
unit = self.learning_module.get_next_unit(current_unit_id)
# Then
self.assertEqual(unit, next_unit)
def test_get_module_status(self):
# Given
module_status = 'not attempted'
# When
self.learning_module.learning_unit.remove(self.learning_unit_two)
status = self.learning_module.get_status(self.student, self.course)
# Then
self.assertEqual(status, module_status)
self.learning_module.learning_unit.add(self.learning_unit_two)
# Module in progress
# Given
self.course_status.completed_units.add(self.learning_unit_one)
# When
status = self.learning_module.get_status(self.student, self.course)
# Then
self.assertEqual("inprogress", status)
# Module is completed
# Given
self.course_status.completed_units.add(self.learning_unit_two)
# When
status = self.learning_module.get_status(self.student, self.course)
# Then
self.assertEqual("completed", status)
# Module with no units
self.course.learning_module.add(self.learning_module_two)
status = self.learning_module_two.get_status(self.student, self.course)
self.assertEqual("no units", status)
def test_module_completion_percent(self):
# for module without learning units
percent = self.learning_module_two.get_module_complete_percent(
self.course, self.student
)
self.assertEqual(percent, 0)
# for module with learning units
self.course_status.completed_units.add(self.learning_unit_one)
self.course_status.completed_units.add(self.learning_unit_two)
percent = self.learning_module.get_module_complete_percent(
self.course, self.student
)
self.assertEqual(percent, 100)
class LearningUnitTestCases(unittest.TestCase):
def setUp(self):
learning_module = LearningModule.objects.get(name='LM1')
self.learning_unit_one = learning_module.learning_unit.get(order=1)
self.learning_unit_two = learning_module.learning_unit.get(order=2)
self.lesson = Lesson.objects.get(name='L1')
self.quiz = Quiz.objects.get(description='demo quiz 1')
def test_learning_unit(self):
self.assertEqual(self.learning_unit_one.type, 'lesson')
self.assertEqual(self.learning_unit_two.type, 'quiz')
self.assertEqual(self.learning_unit_one.lesson, self.lesson)
self.assertEqual(self.learning_unit_two.quiz, self.quiz)
self.assertIsNone(self.learning_unit_one.quiz)
self.assertIsNone(self.learning_unit_two.lesson)
self.assertTrue(self.learning_unit_one.check_prerequisite)
self.assertTrue(self.learning_unit_two.check_prerequisite)
class ProfileTestCases(unittest.TestCase):
def setUp(self):
self.user1 = User.objects.get(username='creator')
self.profile = Profile.objects.get(user=self.user1)
self.user2 = User.objects.get(username='demo_user3')
def test_user_profile(self):
""" Test user profile"""
self.assertEqual(self.user1.username, 'creator')
self.assertEqual(self.profile.user.username, 'creator')
self.assertEqual(int(self.profile.roll_number), 1)
self.assertEqual(self.profile.institute, 'IIT')
self.assertEqual(self.profile.department, 'Chemical')
self.assertEqual(self.profile.position, 'Student')
###############################################################################
class QuestionTestCases(unittest.TestCase):
def setUp(self):
# Single question details
self.user1 = User.objects.get(username="creator")
self.user2 = User.objects.get(username="demo_user2")
self.question1 = Question.objects.create(
summary='Demo Python 1', language='Python', type='Code',
active=True, description='Write a function', points=1.0,
snippet='def myfunc()', user=self.user1
)
self.question2 = Question.objects.create(
summary='Yaml Json', language='python', type='code',
active=True, description='factorial of a no', points=2.0,
snippet='def fact()', user=self.user2
)
# create a temp directory and add files for loading questions test
file_path = os.path.join(tempfile.gettempdir(), "test.txt")
self.load_tmp_path = tempfile.mkdtemp()
shutil.copy(file_path, self.load_tmp_path)
file1 = os.path.join(self.load_tmp_path, "test.txt")
# create a temp directory and add files for dumping questions test
self.dump_tmp_path = tempfile.mkdtemp()
shutil.copy(file_path, self.dump_tmp_path)
file2 = os.path.join(self.dump_tmp_path, "test.txt")
upload_file = open(file2, "r")
django_file = File(upload_file)
FileUpload.objects.create(file=django_file,
question=self.question2
)
self.question1.tags.add('python', 'function')
self.assertion_testcase = StandardTestCase(
question=self.question1,
test_case='assert myfunc(12, 13) == 15',
type='standardtestcase'
)
self.upload_test_case = StandardTestCase(
question=self.question2,
test_case='assert fact(3) == 6',
type='standardtestcase'
)
self.upload_test_case.save()
self.user_answer = "demo_answer"
self.test_case_upload_data = [{"test_case": "assert fact(3)==6",
"test_case_type": "standardtestcase",
"test_case_args": "",
"weight": 1.0
}]
questions_data = [{"snippet": "def fact()", "active": True,
"points": 1.0,
"description": "factorial of a no",
"language": "Python", "type": "Code",
"testcase": self.test_case_upload_data,
"files": [[file1, 0]],
"summary": "Yaml Demo",
"tags": ['yaml_demo']
}]
questions_data_with_missing_fields = [{
"active": True, "points": 1.0, "description": "factorial of a no",
"language": "Python", "type": "Code",
"testcase": self.test_case_upload_data,
"summary": "Yaml Demo 2"
}]
self.yaml_questions_data = yaml.safe_dump_all(questions_data)
self.yaml_questions_data_with_missing_fields = yaml.safe_dump_all(
questions_data_with_missing_fields
)
self.bad_yaml_question_data = '''[{
"active": True, "points": 1.0, "description" "factorial of a no",
"language": "Python", "type": "Code",
"testcase": self.test_case_upload_data,
"summary": "bad yaml"
}]'''
self.test_case_without_type = [{"test_case": "assert fact(3)==6",
"test_case_args": "",
"weight": 1.0
}]
self.yaml_question_data_without_test_case_type = yaml.safe_dump_all([{
"active": True, "points": 1.0, "description": "factorial of a no",
"language": "Python", "type": "Code",
"testcase": self.test_case_without_type,
"summary": "bad yaml"
}])
def tearDown(self):
shutil.rmtree(self.load_tmp_path)
shutil.rmtree(self.dump_tmp_path)
uploaded_files = FileUpload.objects.all()
que_id_list = [file.question.id for file in uploaded_files]
for que_id in que_id_list:
dir_path = os.path.join(os.getcwd(), "yaksh", "data",
"question_{0}".format(que_id)
)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
uploaded_files.delete()
def test_question(self):
""" Test question """
self.assertEqual(self.question1.summary, 'Demo Python 1')
self.assertEqual(self.question1.language, 'Python')
self.assertEqual(self.question1.type, 'Code')
self.assertEqual(self.question1.description, 'Write a function')
self.assertEqual(self.question1.points, 1.0)
self.assertTrue(self.question1.active)
self.assertEqual(self.question1.snippet, 'def myfunc()')
tag_list = []
for tag in self.question1.tags.all():
tag_list.append(tag.name)
for tag in tag_list:
self.assertIn(tag, ['python', 'function'])
def test_dump_questions(self):
""" Test dump questions into Yaml """
question = Question()
question_id = [self.question2.id]
questions_zip = question.dump_questions(question_id, self.user2)
que_file = FileUpload.objects.get(question=self.question2.id)
zip_file = zipfile.ZipFile(questions_zip, "r")
tmp_path = tempfile.mkdtemp()
zip_file.extractall(tmp_path)
test_case = self.question2.get_test_cases()
with open("{0}/questions_dump.yaml".format(tmp_path), "r") as f:
questions = yaml.safe_load_all(f.read())
for q in questions:
self.assertEqual(self.question2.summary, q['summary'])
self.assertEqual(self.question2.language, q['language'])
self.assertEqual(self.question2.type, q['type'])
self.assertEqual(self.question2.description, q['description'])
self.assertEqual(self.question2.points, q['points'])
self.assertTrue(self.question2.active)
self.assertEqual(self.question2.snippet, q['snippet'])
self.assertEqual(os.path.basename(que_file.file.path),
q['files'][0][0])
self.assertEqual([case.get_field_value()
for case in test_case],
q['testcase']
)
for file in zip_file.namelist():
os.remove(os.path.join(tmp_path, file))
def test_load_questions_with_all_fields(self):
""" Test load questions into database from Yaml """
question = Question()
question.load_questions(self.yaml_questions_data, self.user1)
question_data = Question.objects.get(summary="Yaml Demo")
file = FileUpload.objects.get(question=question_data)
test_case = question_data.get_test_cases()
self.assertEqual(question_data.summary, 'Yaml Demo')
self.assertEqual(question_data.language, 'Python')
self.assertEqual(question_data.type, 'Code')
self.assertEqual(question_data.description, 'factorial of a no')
self.assertEqual(question_data.points, 1.0)
self.assertTrue(question_data.active)
tags = question_data.tags.all().values_list("name", flat=True)
self.assertListEqual(list(tags), ['yaml_demo'])
self.assertEqual(question_data.snippet, 'def fact()')
self.assertEqual(os.path.basename(file.file.path), "test.txt")
self.assertEqual([case.get_field_value() for case in test_case],
self.test_case_upload_data
)
def test_load_questions_with_missing_fields(self):
""" Test load questions into database from Yaml with
missing fields like files, snippet and tags. """
question = Question()
question.load_questions(
self.yaml_questions_data_with_missing_fields,
self.user1
)
question_data = Question.objects.get(summary="Yaml Demo 2")
file = FileUpload.objects.filter(question=question_data)
test_case = question_data.get_test_cases()
self.assertEqual(question_data.summary, 'Yaml Demo 2')
self.assertEqual(question_data.language, 'Python')
self.assertEqual(question_data.type, 'Code')
self.assertEqual(question_data.description, 'factorial of a no')
self.assertEqual(question_data.points, 1.0)
self.assertTrue(question_data.active)
self.assertEqual(question_data.snippet, '')
self.assertListEqual(list(file), [])
self.assertEqual([case.get_field_value() for case in test_case],
self.test_case_upload_data
)
tags = question_data.tags.all().values_list("name", flat=True)
self.assertListEqual(list(tags), [])
def test_load_questions_with_bad_yaml(self):
"""
Test if yaml file is parsed correctly
"""
question = Question()
msg = question.load_questions(
self.bad_yaml_question_data,
self.user1
)
self.assertIn("Error Parsing Yaml", msg)
msg = question.load_questions(
self.yaml_question_data_without_test_case_type,
self.user1
)
self.assertEqual(msg, "Unable to parse test case data")
###############################################################################
class QuizTestCases(unittest.TestCase):
def setUp(self):
self.course = Course.objects.get(name="Python Course")
self.creator = User.objects.get(username="creator")
self.teacher = User.objects.get(username="demo_user2")
self.student1 = User.objects.get(username='demo_user3')
self.student2 = User.objects.get(username='demo_user4')
self.quiz1 = Quiz.objects.get(description='demo quiz 1')
self.quiz2 = Quiz.objects.get(description='demo quiz 2')
self.quiz3 = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=1, time_between_attempts=0,
description='demo quiz 3', pass_criteria=0,
instructions="Demo Instructions"
)
self.question_paper3 = QuestionPaper.objects.create(quiz=self.quiz3)
self.quiz4 = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=1, time_between_attempts=0,
description='demo quiz 4', pass_criteria=0,
instructions="Demo Instructions"
)
self.answerpaper1 = AnswerPaper.objects.create(
user=self.student1,
question_paper=self.question_paper3,
course=self.course,
attempt_number=1,
start_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_time=datetime(2015, 10, 9, 10, 28, 15, 0, tzinfo=pytz.utc),
passed=True
)
self.answerpaper2 = AnswerPaper.objects.create(
user=self.student2,
question_paper=self.question_paper3,
course=self.course,
attempt_number=1,
start_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_time=datetime(2015, 10, 9, 10, 28, 15, 0, tzinfo=pytz.utc),
passed=False
)
self.trial_course = Course.objects.create_trial_course(self.creator)
def tearDown(self):
self.answerpaper1.delete()
self.answerpaper2.delete()
self.trial_course.delete()
self.quiz3.delete()
self.quiz4.delete()
self.question_paper3.delete()
def test_get_total_students(self):
self.assertEqual(self.quiz3.get_total_students(self.course), 2)
def test_get_total_students_without_questionpaper(self):
self.assertEqual(self.quiz4.get_total_students(self.course), 0)
def test_get_passed_students(self):
self.assertEqual(self.quiz3.get_passed_students(self.course), 1)
def test_get_passed_students_without_questionpaper(self):
self.assertEqual(self.quiz4.get_passed_students(self.course), 0)
def test_get_failed_students(self):
self.assertEqual(self.quiz3.get_failed_students(self.course), 1)
def test_get_failed_students_without_questionpaper(self):
self.assertEqual(self.quiz4.get_failed_students(self.course), 0)
def test_quiz(self):
""" Test Quiz"""
self.assertEqual((self.quiz1.start_date_time).strftime('%Y-%m-%d'),
'2015-10-09')
self.assertEqual((self.quiz1.start_date_time).strftime('%H:%M:%S'),
'10:08:15')
self.assertEqual(self.quiz1.duration, 30)
self.assertTrue(self.quiz1.active)
self.assertEqual(self.quiz1.description, 'demo quiz 1')
self.assertEqual(self.quiz1.pass_criteria, 0)
self.assertEqual(self.quiz1.instructions, "Demo Instructions")
def test_is_expired(self):
self.assertFalse(self.quiz1.is_expired())
self.assertTrue(self.quiz2.is_expired())
def test_get_active_quizzes(self):
quizzes = Quiz.objects.get_active_quizzes()
for quiz in quizzes:
self.assertTrue(quiz.active)
def test_create_trial_quiz(self):
"""Test to check if trial quiz is created"""
trial_quiz = Quiz.objects.create_trial_quiz(self.creator)
self.assertEqual(trial_quiz.duration, 1000)
self.assertEqual(trial_quiz.description, "trial_questions")
self.assertTrue(trial_quiz.is_trial)
self.assertEqual(trial_quiz.time_between_attempts, 0)
def test_create_trial_from_quiz_godmode(self):
"""Test to check if a copy of original quiz is created in godmode"""
trial_quiz = Quiz.objects.create_trial_from_quiz(self.quiz1.id,
self.creator,
True, self.course.id
)[0]
self.assertEqual(trial_quiz.description,
"Trial_orig_id_{}_godmode".format(self.quiz1.id)
)
self.assertTrue(trial_quiz.is_trial)
self.assertEqual(trial_quiz.duration, 1000)
self.assertTrue(trial_quiz.active)
self.assertEqual(trial_quiz.end_date_time,
datetime(2199, 1, 1, 0, 0, 0, 0, tzinfo=pytz.utc)
)
self.assertEqual(trial_quiz.time_between_attempts, 0)
def test_create_trial_from_quiz_usermode(self):
"""Test to check if a copy of original quiz is created in usermode"""
trial_quiz = Quiz.objects.create_trial_from_quiz(self.quiz2.id,
self.creator,
False, self.course.id
)[0]
self.assertEqual(trial_quiz.description,
"Trial_orig_id_{}_usermode".format(self.quiz2.id))
self.assertTrue(trial_quiz.is_trial)
self.assertEqual(trial_quiz.duration, self.quiz2.duration)
self.assertEqual(trial_quiz.active, self.quiz2.active)
self.assertEqual(trial_quiz.start_date_time,
self.quiz2.start_date_time
)
self.assertEqual(trial_quiz.end_date_time,
self.quiz2.end_date_time
)
self.assertEqual(trial_quiz.time_between_attempts, 0)
def test_view_answerpaper(self):
self.assertFalse(self.quiz1.view_answerpaper)
self.assertFalse(self.quiz2.view_answerpaper)
# When
self.quiz1.view_answerpaper = True
self.quiz1.save()
# Then
self.assertTrue(self.quiz1.view_answerpaper)
###############################################################################
class QuestionPaperTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
self.course = Course.objects.get(name="Python Course")
self.user = User.objects.get(username='creator')
# All active questions
self.questions = Question.objects.filter(active=True, user=self.user)
self.quiz = Quiz.objects.get(description="demo quiz 1")
self.quiz_with_time_between_attempts = Quiz.objects.create(
description="demo quiz with time between attempts",
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=3, time_between_attempts=1.0,
pass_criteria=0,
instructions="Demo Instructions"
)
# create question paper with only fixed questions
self.question_paper_fixed_questions = QuestionPaper.objects.create(
quiz=self.quiz)
self.question_paper_fixed_questions.fixed_questions.add(
self.questions.get(summary='Q11'),
self.questions.get(summary='Q10')
)
# create question paper with only random questions
self.question_paper_random_questions = QuestionPaper.objects.create(
quiz=self.quiz)
self.question_set_random = QuestionSet.objects.create(
marks=2, num_questions=2
)
self.question_set_random.questions.add(
self.questions.get(summary='Q13'),
self.questions.get(summary='Q5'), self.questions.get(summary='Q7')
)
self.question_paper_random_questions.random_questions.add(
self.question_set_random)
# create question paper with no questions
self.question_paper_no_questions = QuestionPaper.objects.create(
quiz=self.quiz)
# create question paper
self.question_paper = QuestionPaper.objects.create(
quiz=self.quiz, total_marks=0.0, shuffle_questions=True
)
self.question_paper_with_time_between_attempts = \
QuestionPaper.objects.create(
quiz=self.quiz_with_time_between_attempts,
total_marks=0.0,
shuffle_questions=True
)
self.question_paper.fixed_question_order = "{0}, {1}".format(
self.questions[3].id, self.questions[5].id
)
# add fixed set of questions to the question paper
self.question_paper.fixed_questions.add(
self.questions[3], self.questions[5]
)
# create two QuestionSet for random questions
# QuestionSet 1
self.question_set_1 = QuestionSet.objects.create(
marks=2, num_questions=2
)
# add pool of questions for random sampling
self.question_set_1.questions.add(
self.questions[6], self.questions[7],
self.questions[8], self.questions[9]
)
# add question set 1 to random questions in Question Paper
self.question_paper.random_questions.add(self.question_set_1)
# QuestionSet 2
self.question_set_2 = QuestionSet.objects.create(
marks=3, num_questions=3
)
# add pool of questions
self.question_set_2.questions.add(
self.questions[11], self.questions[12],
self.questions[13], self.questions[14]
)
# add question set 2
self.question_paper.random_questions.add(self.question_set_2)
# ip address for AnswerPaper
self.ip = '127.0.0.1'
self.user = User.objects.get(username="creator")
self.attempted_papers = AnswerPaper.objects.filter(
question_paper=self.question_paper,
user=self.user
)
# For Trial case
self.questions_list = [self.questions[3].id, self.questions[5].id]
self.trial_course = Course.objects.create_trial_course(self.user)
self.trial_quiz = Quiz.objects.create_trial_quiz(self.user)
@classmethod
def tearDownClass(self):
self.quiz.questionpaper_set.all().delete()
def test_get_question_bank(self):
# Given
summaries = ['Q11', 'Q10']
questions = list(Question.objects.filter(summary__in=summaries))
# When
question_bank = self.question_paper_fixed_questions.get_question_bank()
# Then
self.assertSequenceEqual(questions, question_bank)
# Given
summaries = ['Q13', 'Q5', 'Q7']
questions = list(Question.objects.filter(summary__in=summaries))
# When
question_bank = \
self.question_paper_random_questions.get_question_bank()
# Then
self.assertSequenceEqual(questions, question_bank)
# Given
questions = []
# When
question_bank = self.question_paper_no_questions.get_question_bank()
# Then
self.assertSequenceEqual(questions, question_bank)
def test_questionpaper(self):
""" Test question paper"""
self.assertEqual(self.question_paper.quiz.description, 'demo quiz 1')
self.assertSequenceEqual(self.question_paper.fixed_questions.all(),
[self.questions[3], self.questions[5]]
)
self.assertTrue(self.question_paper.shuffle_questions)
def test_update_total_marks(self):
""" Test update_total_marks() method of Question Paper"""
self.assertEqual(self.question_paper.total_marks, 0)
self.question_paper.update_total_marks()
self.assertEqual(self.question_paper.total_marks, 15)
def test_get_random_questions(self):
""" Test get_random_questions() method of Question Paper"""
random_questions_set_1 = self.question_set_1.get_random_questions()
random_questions_set_2 = self.question_set_2.get_random_questions()
total_random_questions = len(random_questions_set_1 +
random_questions_set_2)
self.assertEqual(total_random_questions, 5)
# To check whether random questions are from random_question_set
questions_set_1 = set(self.question_set_1.questions.all())
random_set_1 = set(random_questions_set_1)
random_set_2 = set(random_questions_set_2)
boolean = questions_set_1.intersection(random_set_1) == random_set_1
self.assertTrue(boolean)
self.assertEqual(len(random_set_1), 2)
# To check that the questions are random.
# If incase not random then check that the order is diferent
try:
self.assertFalse(random_set_1 == random_set_2)
except AssertionError:
self.assertTrue(random_questions_set_1 != random_questions_set_2)
def test_make_answerpaper(self):
""" Test make_answerpaper() method of Question Paper"""
already_attempted = self.attempted_papers.count()
attempt_num = already_attempted + 1
answerpaper = self.question_paper.make_answerpaper(self.user, self.ip,
attempt_num,
self.course.id)
self.assertIsInstance(answerpaper, AnswerPaper)
paper_questions = answerpaper.questions.all()
self.assertEqual(len(paper_questions), 7)
fixed_questions = set(self.question_paper.fixed_questions.all())
self.assertTrue(fixed_questions.issubset(set(paper_questions)))
answerpaper.passed = True
answerpaper.save()
# test can_attempt_now(self):
result = (False,
u'You cannot attempt demo quiz 1 quiz more than 1 time(s)')
self.assertEquals(
self.question_paper.can_attempt_now(self.user, self.course.id),
result
)
# trying to create an answerpaper with same parameters passed.
answerpaper2 = self.question_paper.make_answerpaper(
self.user, self.ip, attempt_num, self.course.id
)
# check if make_answerpaper returned an object instead of creating one.
self.assertEqual(answerpaper, answerpaper2)
def test_time_between_attempt(self):
""" Test make_answerpaper() method of Question Paper"""
attempt_num = 1
self.first_start_time = timezone.now()
self.first_end_time = self.first_start_time + timedelta(minutes=20)
self.second_start_time = self.first_start_time + timedelta(minutes=30)
self.second_end_time = self.second_start_time + timedelta(minutes=20)
# create answerpaper
self.first_answerpaper = AnswerPaper(
user=self.user,
question_paper=self.question_paper_with_time_between_attempts,
start_time=self.first_start_time,
end_time=self.first_end_time,
user_ip=self.ip,
course=self.course,
attempt_number=attempt_num
)
self.first_answerpaper.passed = True
self.first_answerpaper.save()
self.second_answerpaper = AnswerPaper(
user=self.user,
question_paper=self.question_paper_with_time_between_attempts,
start_time=self.second_start_time,
end_time=self.second_end_time,
user_ip=self.ip,
course=self.course,
attempt_number=attempt_num + 1
)
self.second_answerpaper.passed = True
self.second_answerpaper.save()
msg = u'You cannot start the next attempt ' +\
'for this quiz before1.0 hour(s)'
result = (False, msg)
self.assertEquals(
self.question_paper_with_time_between_attempts.can_attempt_now(
self.user, self.course.id), result
)
def test_create_trial_paper_to_test_quiz(self):
qu_list = [str(self.questions_list[0]), str(self.questions_list[1])]
trial_paper = \
QuestionPaper.objects.create_trial_paper_to_test_quiz(
self.trial_quiz, self.quiz.id
)
trial_paper.random_questions.add(self.question_set_1)
trial_paper.random_questions.add(self.question_set_2)
trial_paper.fixed_question_order = ",".join(qu_list)
self.assertEqual(trial_paper.quiz, self.trial_quiz)
self.assertSequenceEqual(
trial_paper.get_ordered_questions(),
self.question_paper.get_ordered_questions()
)
trial_paper_ran = [q_set.id for q_set in
trial_paper.random_questions.all()]
qp_ran = [q_set.id for q_set in
self.question_paper.random_questions.all()]
self.assertSequenceEqual(trial_paper_ran, qp_ran)
def test_create_trial_paper_to_test_questions(self):
qu_list = [str(self.questions_list[0]), str(self.questions_list[1])]
trial_paper = \
QuestionPaper.objects.create_trial_paper_to_test_questions(
self.trial_quiz, qu_list
)
self.assertEqual(trial_paper.quiz, self.trial_quiz)
fixed_q = self.question_paper.fixed_questions.values_list(
'id', flat=True)
self.assertSequenceEqual(self.questions_list, fixed_q)
def test_fixed_order_questions(self):
fixed_ques = self.question_paper.get_ordered_questions()
actual_ques = [self.questions[3], self.questions[5]]
self.assertSequenceEqual(fixed_ques, actual_ques)
###############################################################################
class AnswerPaperTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
self.course = Course.objects.get(name="Python Course")
self.ip = '101.0.0.1'
self.user = User.objects.get(username='creator')
self.user2 = User.objects.get(username='demo_user2')
self.profile = self.user.profile
self.quiz = Quiz.objects.get(description='demo quiz 1')
self.question_paper = QuestionPaper(quiz=self.quiz, total_marks=3)
self.question_paper.save()
self.quiz2 = Quiz.objects.get(description='demo quiz 2')
self.qtn_paper_with_single_question = QuestionPaper(
quiz=self.quiz2, total_marks=3
)
self.qtn_paper_with_single_question.save()
all_questions = Question.objects.filter(user=self.user).order_by("id")
self.questions = all_questions[0:3]
self.start_time = timezone.now()
self.end_time = self.start_time + timedelta(minutes=20)
self.question1 = all_questions[0]
self.question2 = all_questions[1]
self.question3 = all_questions[2]
self.question4 = all_questions[3]
# create answerpaper
self.answerpaper = AnswerPaper(
user=self.user,
question_paper=self.question_paper,
start_time=self.start_time,
end_time=self.end_time,
user_ip=self.ip,
course=self.course
)
self.attempted_papers = AnswerPaper.objects.filter(
question_paper=self.question_paper,
user=self.user
)
self.question_paper.fixed_questions.add(*self.questions)
already_attempted = self.attempted_papers.count()
self.answerpaper.attempt_number = already_attempted + 1
self.answerpaper.save()
self.answerpaper.questions.add(*self.questions)
self.answerpaper.questions_order = ",".join(
[str(q.id) for q in self.questions]
)
self.answerpaper.questions_unanswered.add(*self.questions)
self.answerpaper.save()
# answers for the Answer Paper
self.answer_right = Answer(
question=self.question1,
answer="Demo answer",
correct=True, marks=1,
error=json.dumps([])
)
self.answer_wrong = Answer(
question=self.question2,
answer="My answer",
correct=False,
marks=0,
error=json.dumps(['error1', 'error2'])
)
self.answer_right.save()
self.answer_wrong.save()
self.answerpaper.answers.add(self.answer_right)
self.answerpaper.answers.add(self.answer_wrong)
self.answer1 = Answer.objects.create(
question=self.question1,
answer="answer1", correct=False, error=json.dumps([])
)
self.answerpaper.answers.add(self.answer1)
# create an answerpaper with only one question
self.answerpaper_single_question = AnswerPaper(
user=self.user,
question_paper=self.question_paper,
start_time=self.start_time,
end_time=self.end_time,
user_ip=self.ip
)
self.attempted_papers = AnswerPaper.objects.filter(
question_paper=self.question_paper,
user=self.user
)
self.qtn_paper_with_single_question.fixed_questions.add(self.question4)
already_attempted = self.attempted_papers.count()
self.answerpaper_single_question.attempt_number = already_attempted + 1
self.answerpaper_single_question.save()
self.answerpaper_single_question.questions.add(self.question4)
self.answerpaper_single_question.questions_unanswered.add(
self.question4
)
self.answerpaper_single_question.save()
# answers for the Answer Paper
self.single_answer = Answer(
question=self.question4,
answer="Demo answer",
correct=True, marks=1,
error=json.dumps([])
)
self.single_answer.save()
self.answerpaper_single_question.answers.add(self.single_answer)
self.question1.language = 'python'
self.question1.test_case_type = 'standardtestcase'
self.question1.summary = "Q1"
self.question1.save()
self.question2.language = 'python'
self.question2.type = 'mcq'
self.question2.test_case_type = 'mcqtestcase'
self.question2.summary = "Q2"
self.question2.save()
self.question3.language = 'python'
self.question3.type = 'mcc'
self.question3.test_case_type = 'mcqtestcase'
self.question3.summary = "Q3"
self.question3.save()
self.assertion_testcase = StandardTestCase(
question=self.question1,
test_case='assert add(1, 3) == 4',
type='standardtestcase'
)
self.assertion_testcase.save()
self.mcq_based_testcase = McqTestCase(
options='a',
question=self.question2,
correct=True,
type='mcqtestcase'
)
self.mcq_based_testcase.save()
self.mcc_based_testcase = McqTestCase(
question=self.question3,
options='a',
correct=True,
type='mcqtestcase'
)
self.mcc_based_testcase.save()
# Setup quiz where questions are shuffled
# Create Quiz and Question Paper
self.quiz2 = Quiz.objects.get(description="demo quiz 2")
self.question_paper2 = QuestionPaper(
quiz=self.quiz2, total_marks=3, shuffle_questions=True)
self.question_paper2.save()
summary_list = ['Q%d' % (i) for i in range(1, 21)]
self.que_list = Question.objects.filter(summary__in=summary_list)
self.question_paper2.fixed_questions.add(*self.que_list)
# Create AnswerPaper for user1 and user2
self.user1_answerpaper = self.question_paper2.make_answerpaper(
self.user, self.ip, 1, self.course.id
)
self.user2_answerpaper = self.question_paper2.make_answerpaper(
self.user2, self.ip, 1, self.course.id
)
self.user2_answerpaper2 = self.question_paper.make_answerpaper(
self.user2, self.ip, 1, self.course.id
)
settings.code_evaluators['python']['standardtestcase'] = \
"yaksh.python_assertion_evaluator.PythonAssertionEvaluator"
self.SERVER_POOL_PORT = 4000
server_pool = ServerPool(n=1, pool_port=self.SERVER_POOL_PORT)
self.server_pool = server_pool
self.server_thread = t = Thread(target=server_pool.run)
t.start()
@classmethod
def tearDownClass(self):
self.quiz.questionpaper_set.all().delete()
self.server_pool.stop()
self.server_thread.join()
settings.code_evaluators['python']['standardtestcase'] = \
"python_assertion_evaluator.PythonAssertionEvaluator"
def test_get_per_question_score(self):
# Given
question_id = self.question4.id
expected_score = 1
# When
score = self.answerpaper_single_question.get_per_question_score(
question_id
)
# Then
self.assertEqual(score, expected_score)
# Given
question_id = self.question2.id
expected_score = 0
# When
score = self.answerpaper.get_per_question_score(question_id)
# Then
self.assertEqual(score, expected_score)
# Given
question_id = 131
expected_score = 'NA'
# When
score = self.answerpaper.get_per_question_score(question_id)
# Then
self.assertEqual(score, expected_score)
def test_returned_question_is_not_none(self):
# Test add_completed_question and next_question
# When all questions are answered
# Before questions are answered
self.assertEqual(self.answerpaper_single_question.questions_left(), 1)
current_question = \
self.answerpaper_single_question.add_completed_question(
self.question4.id
)
# Then
self.assertEqual(
self.answerpaper_single_question.questions_answered.all()[0],
self.question4
)
self.assertEqual(self.answerpaper_single_question.questions_left(), 0)
self.assertIsNotNone(current_question)
self.assertEqual(current_question.summary, "Q4")
# When
next_question = self.answerpaper_single_question.next_question(
self.question4.id
)
# Then
self.assertEqual(self.answerpaper_single_question.questions_left(), 0)
self.assertIsNotNone(next_question)
self.assertEqual(next_question.summary, "Q4")
# When
current_question = \
self.answerpaper_single_question.get_current_question(
self.answerpaper_single_question.questions.all()
)
# Then
self.assertEqual(self.answerpaper_single_question.questions_left(), 0)
self.assertIsNotNone(current_question)
self.assertEqual(current_question.summary, "Q4")
def test_validate_and_regrade_mcc_correct_answer(self):
# Given
mcc_answer = [str(self.mcc_based_testcase.id)]
self.answer = Answer(question=self.question3,
answer=mcc_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcc_answer,
self.question3, json_data
)
# Then
self.assertTrue(result['success'])
self.assertEqual(result['error'], ['Correct answer'])
self.answer.correct = True
self.answer.marks = 1
# Given
self.answer.correct = True
self.answer.marks = 1
self.answer.answer = ['a', 'b']
self.answer.save()
# When
details = self.answerpaper.regrade(self.question3.id)
# Then
self.answer = self.answerpaper.answers.filter(
question=self.question3).last()
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_validate_and_regrade_code_correct_answer(self):
# Given
# Start code server
user_answer = dedent("""\
def add(a,b):
return a+b
""")
self.answer = Answer(question=self.question1,
answer=user_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
user = self.answerpaper.user
# When
json_data = self.question1.consolidate_answer_data(
user_answer, user
)
get_result = self.answerpaper.validate_answer(user_answer,
self.question1,
json_data,
self.answer.id,
self.SERVER_POOL_PORT
)
url = 'http://localhost:%s' % self.SERVER_POOL_PORT
check_result = get_result_from_code_server(url, get_result['uid'],
block=True
)
result = json.loads(check_result.get('result'))
# Then
self.assertTrue(result['success'])
self.answer.correct = True
self.answer.marks = 1
# Regrade
# Given
self.answer.correct = True
self.answer.marks = 1
self.answer.answer = dedent("""
def add(a,b):
return a-b
""")
self.answer.save()
# When
details = self.answerpaper.regrade(self.question1.id,
self.SERVER_POOL_PORT
)
# Then
self.answer = self.answerpaper.answers.filter(question=self.question1
).last()
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_validate_and_regrade_mcq_correct_answer(self):
# Given
mcq_answer = str(self.mcq_based_testcase.id)
self.answer = Answer(
question=self.question2,
answer=mcq_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcq_answer,
self.question2, json_data
)
# Then
self.assertTrue(result['success'])
self.answer.correct = True
self.answer.marks = 1
# Given
self.answer.correct = True
self.answer.marks = 1
self.answer.answer = 'b'
self.answer.save()
# When
details = self.answerpaper.regrade(self.question2.id)
# Then
self.answer = self.answerpaper.answers.filter(
question=self.question2).last()
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_mcq_incorrect_answer(self):
# Given
mcq_answer = 'b'
self.answer = Answer(
question=self.question2,
answer=mcq_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcq_answer,
self.question2, json_data
)
# Then
self.assertFalse(result['success'])
def test_mcc_incorrect_answer(self):
# Given
mcc_answer = ['b']
self.answer = Answer(question=self.question3,
answer=mcc_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcc_answer,
self.question3, json_data
)
# Then
self.assertFalse(result['success'])
def test_answerpaper(self):
""" Test Answer Paper"""
self.assertEqual(self.answerpaper.user.username, 'creator')
self.assertEqual(self.answerpaper.user_ip, self.ip)
questions = [q.id for q in self.answerpaper.get_questions()]
num_questions = len(questions)
self.assertEqual(set(questions), set([q.id for q in self.questions]))
self.assertEqual(num_questions, 3)
self.assertEqual(self.answerpaper.question_paper, self.question_paper)
self.assertEqual(self.answerpaper.start_time, self.start_time)
self.assertEqual(self.answerpaper.status, 'inprogress')
def test_questions(self):
# Test questions_left() method of Answer Paper
self.assertEqual(self.answerpaper.questions_left(), 3)
# Test current_question() method of Answer Paper
current_question = self.answerpaper.current_question()
self.assertEqual(current_question.summary, "Q1")
# Test completed_question() method of Answer Paper
question = self.answerpaper.add_completed_question(self.question1.id)
self.assertIsNotNone(question)
self.assertEqual(self.answerpaper.questions_left(), 2)
# Test next_question() method of Answer Paper
current_question = self.answerpaper.current_question()
self.assertEqual(current_question.summary, "Q2")
# When
next_question_id = self.answerpaper.next_question(current_question.id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q3")
# Given, here question is already answered
current_question_id = self.question1.id
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q2")
# Given, wrong question id
current_question_id = 12
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q1")
# Given, last question in the list
current_question_id = self.question3.id
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q1")
# Test get_questions_answered() method
# When
questions_answered = self.answerpaper.get_questions_answered()
# Then
self.assertEqual(questions_answered.count(), 1)
self.assertSequenceEqual(questions_answered, [self.questions[0]])
# When
questions_unanswered = self.answerpaper.get_questions_unanswered()
# Then
self.assertEqual(questions_unanswered.count(), 2)
self.assertEqual(set([q.id for q in questions_unanswered]),
set([self.questions[1].id, self.questions[2].id])
)
# Test completed_question and next_question
# When all questions are answered
current_question = self.answerpaper.add_completed_question(
self.question2.id
)
# Then
self.assertEqual(self.answerpaper.questions_left(), 1)
self.assertIsNotNone(current_question)
self.assertEqual(current_question.summary, "Q3")
# When
current_question = self.answerpaper.add_completed_question(
self.question3.id
)
# Then
self.assertEqual(self.answerpaper.questions_left(), 0)
self.assertIsNotNone(current_question)
self.assertTrue(
current_question == self.answerpaper.get_all_ordered_questions()[0]
)
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
all_questions = self.questions.all()
self.assertTrue(next_question_id == all_questions[0])
def test_update_marks(self):
""" Test update_marks method of AnswerPaper"""
self.answerpaper.update_marks('inprogress')
self.assertEqual(self.answerpaper.status, 'inprogress')
self.assertTrue(self.answerpaper.is_attempt_inprogress())
self.answerpaper.update_marks()
self.assertEqual(self.answerpaper.status, 'completed')
self.assertEqual(self.answerpaper.marks_obtained, 1.0)
self.assertEqual(self.answerpaper.percent, 33.33)
self.assertTrue(self.answerpaper.passed)
self.assertFalse(self.answerpaper.is_attempt_inprogress())
def test_set_end_time(self):
current_time = timezone.now()
self.answerpaper.set_end_time(current_time)
self.assertEqual(self.answerpaper.end_time, current_time)
def test_get_question_answer(self):
""" Test get_question_answer() method of Answer Paper"""
questions = self.answerpaper.questions.all()
answered = self.answerpaper.get_question_answers()
for question in questions:
answers_saved = Answer.objects.filter(question=question)
error_list = [json.loads(ans.error) for ans in answers_saved]
if answers_saved:
self.assertEqual(len(answered[question]), len(answers_saved))
ans = []
err = []
for val in answered[question]:
ans.append(val.get('answer'))
err.append(val.get('error_list'))
self.assertEqual(set(ans), set(answers_saved))
self.assertEqual(error_list, err)
def test_is_answer_correct(self):
self.assertTrue(self.answerpaper.is_answer_correct(self.questions[0]))
self.assertFalse(self.answerpaper.is_answer_correct(self.questions[1]))
def test_get_previous_answers(self):
answers = self.answerpaper.get_previous_answers(self.questions[0])
self.assertEqual(answers.count(), 2)
self.assertTrue(answers[0], self.answer_right)
answers = self.answerpaper.get_previous_answers(self.questions[1])
self.assertEqual(answers.count(), 1)
self.assertTrue(answers[0], self.answer_wrong)
def test_set_marks(self):
self.answer_wrong.set_marks(0.5)
self.assertEqual(self.answer_wrong.marks, 0.5)
self.answer_wrong.set_marks(10.0)
self.assertEqual(self.answer_wrong.marks, 1.0)
def test_get_latest_answer(self):
latest_answer = self.answerpaper.get_latest_answer(self.question1.id)
self.assertEqual(latest_answer.id, self.answer1.id)
self.assertEqual(latest_answer.answer, "answer1")
def test_shuffle_questions(self):
ques_set_1 = self.user1_answerpaper.get_all_ordered_questions()
ques_set_2 = self.user2_answerpaper.get_all_ordered_questions()
self.assertFalse(ques_set_1 == ques_set_2)
def test_validate_current_question(self):
self.user2_answerpaper2.questions_unanswered.remove(*self.questions)
self.assertEqual(self.user2_answerpaper2.current_question(),
self.question1)
def test_duplicate_attempt_answerpaper(self):
with self.assertRaises(IntegrityError):
AnswerPaper.objects.create(
user=self.answerpaper.user,
question_paper=self.answerpaper.question_paper,
attempt_number=self.answerpaper.attempt_number,
start_time=self.answerpaper.start_time,
end_time=self.answerpaper.end_time,
course=self.answerpaper.course
)
###############################################################################
class CourseTestCases(unittest.TestCase):
def setUp(self):
self.course = Course.objects.get(name="Python Course")
self.creator = User.objects.get(username="creator")
self.template_course_user = User.objects.get(username="demo_user4")
self.student = User.objects.get(username="course_user")
self.student1 = User.objects.get(username="demo_user2")
self.student2 = User.objects.get(username="demo_user3")
self.quiz1 = Quiz.objects.get(description='demo quiz 1')
self.quiz2 = Quiz.objects.get(description='demo quiz 2')
self.questions = Question.objects.filter(active=True,
user=self.creator
)
self.modules = LearningModule.objects.filter(creator=self.creator)
# create courses with disabled enrollment
self.enroll_request_course = Course.objects.create(
name="Enrollment Request Course With Enrollment Disabled",
enrollment="Enroll Request",
creator=self.creator,
start_enroll_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_enroll_time=datetime(2015, 11, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
)
self.open_course = Course.objects.create(
name="Open Course With Enrollment Disabled",
enrollment="Open Course",
creator=self.creator,
start_enroll_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_enroll_time=datetime(2015, 11, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
)
# create a course that will be cloned
self.template_course = Course.objects.create(
name="Template Course to clone",
enrollment="Open Course",
creator=self.creator,
start_enroll_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_enroll_time=datetime(2015, 11, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
)
self.template_quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
duration=30,
active=False,
attempts_allowed=-1,
time_between_attempts=0,
description='template quiz 1',
pass_criteria=40,
instructions="Demo Instructions"
)
self.template_question_paper = QuestionPaper.objects.create(
quiz=self.template_quiz,
total_marks=0.0,
shuffle_questions=True
)
self.template_question_paper.fixed_questions.add(
self.questions[1], self.questions[2], self.questions[3]
)
self.template_quiz2 = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30,
active=True,
attempts_allowed=1,
time_between_attempts=0,
pass_criteria=0,
instructions="Demo Instructions"
)
self.template_question_paper2 = QuestionPaper.objects.create(
quiz=self.template_quiz2,
total_marks=0.0,
shuffle_questions=True
)
self.template_question_paper2.fixed_questions.add(
self.questions[1], self.questions[2], self.questions[3]
)
def test_get_learning_modules(self):
# Given
modules = list(self.modules)
# When
course_modules = self.course.get_learning_modules()
# Then
self.assertSequenceEqual(list(course_modules), modules)
# Given
modules = list(self.modules.filter(name='LM1'))
module_to_remove = self.modules.get(name='LM2')
# When
self.course.learning_module.remove(module_to_remove)
course_modules = self.course.get_learning_modules()
# Then
self.assertSequenceEqual(list(course_modules), modules)
def test_get_quizzes(self):
# Given
quizzes = [self.quiz1]
# When
course_quizzes = self.course.get_quizzes()
# Then
self.assertSequenceEqual(course_quizzes, quizzes)
def test_get_learning_units(self):
# Given
lesson = Lesson.objects.get(name='L1')
self.learning_unit_one = LearningUnit.objects.get(order=1,
lesson=lesson)
self.learning_unit_two = LearningUnit.objects.get(order=2,
quiz=self.quiz1)
learning_units = [self.learning_unit_one, self.learning_unit_two]
# When
course_learning_units = self.course.get_learning_units()
# Then
self.assertSequenceEqual(course_learning_units, learning_units)
def test_is_creator(self):
""" Test is_creator method of Course"""
self.assertTrue(self.course.is_creator(self.creator))
def test_is_self_enroll(self):
""" Test is_self_enroll method of Course"""
self.assertFalse(self.course.is_self_enroll())
def test_deactivate(self):
""" Test deactivate method of Course"""
self.course.deactivate()
self.assertFalse(self.course.active)
def test_activate(self):
""" Test activate method of Course"""
self.course.activate()
self.assertTrue(self.course.active)
def test_request(self):
""" Test request and get_requests methods of Course"""
self.course.request(self.student1, self.student2)
self.assertSequenceEqual(self.course.get_requests(),
[self.student1, self.student2])
def test_enroll_reject(self):
""" Test enroll, reject, get_enrolled and get_rejected methods"""
self.assertSequenceEqual(self.course.get_enrolled(), [self.student])
was_rejected = False
self.course.enroll(was_rejected, self.student1)
self.assertSequenceEqual(self.course.get_enrolled(),
[self.student1, self.student])
self.assertSequenceEqual(self.course.get_rejected(), [])
was_enrolled = False
self.course.reject(was_enrolled, self.student2)
self.assertSequenceEqual(self.course.get_rejected(), [self.student2])
was_rejected = True
self.course.enroll(was_rejected, self.student2)
self.assertSequenceEqual(self.course.get_enrolled(),
[self.student1, self.student2, self.student])
self.assertSequenceEqual(self.course.get_rejected(), [])
was_enrolled = True
self.course.reject(was_enrolled, self.student2)
self.assertSequenceEqual(self.course.get_rejected(), [self.student2])
self.assertSequenceEqual(self.course.get_enrolled(),
[self.student1, self.student])
self.assertTrue(self.course.is_enrolled(self.student1))
def test_add_teachers(self):
""" Test to add teachers to a course"""
self.course.add_teachers(self.student1, self.student2)
self.assertSequenceEqual(self.course.get_teachers(),
[self.student1, self.student2])
def test_remove_teachers(self):
""" Test to remove teachers from a course"""
self.course.add_teachers(self.student1, self.student2)
self.course.remove_teachers(self.student1)
self.assertSequenceEqual(self.course.get_teachers(), [self.student2])
def test_is_teacher(self):
""" Test to check if user is teacher"""
self.course.add_teachers(self.student2)
result = self.course.is_teacher(self.student2)
self.assertTrue(result)
def test_create_trial_course(self):
"""Test to check if trial course is created"""
trial_course = Course.objects.create_trial_course(self.creator)
self.assertEqual(trial_course.name, "trial_course")
self.assertEqual(trial_course.enrollment, "open")
self.assertTrue(trial_course.active)
self.assertEqual(self.creator, trial_course.creator)
self.assertIn(self.creator, trial_course.students.all())
self.assertTrue(trial_course.is_trial)
def test_enabled_enrollment_for_course(self):
"""Test to check enrollment is closed for open course"""
self.assertTrue(self.course.is_active_enrollment())
def test_disabled_enrollment_for_open_course(self):
"""Test to check enrollment is closed for open course"""
self.assertFalse(self.open_course.is_active_enrollment())
def test_disabled_enrollment_for_enroll_request_course(self):
"""Test to check enrollment is closed for open course"""
self.assertFalse(self.enroll_request_course.is_active_enrollment())
def test_course_complete_percent(self):
# for course with no modules
self.no_module_course = Course.objects.create(
name="test_course", creator=self.creator, enrollment="open")
modules = self.course.get_learning_modules()
percent = self.course.percent_completed(self.student1, modules)
self.assertEqual(percent, 0)
self.quiz1.questionpaper_set.all().delete()
# for course with module but zero percent completed
percent = self.course.percent_completed(self.student1, modules)
self.assertEqual(percent, 0)
# Add completed unit to course status and check percent
lesson = Lesson.objects.get(name='L1')
self.completed_unit = LearningUnit.objects.get(lesson=lesson)
course_status = CourseStatus.objects.create(
course=self.course, user=self.student1)
course_status.completed_units.add(self.completed_unit)
updated_percent = self.course.percent_completed(self.student1, modules)
self.assertEqual(updated_percent, 25)
def test_course_time_remaining_to_start(self):
# check if course has 0 days left to start
self.assertEqual(self.course.days_before_start(), 0)
# check if course has some days left to start
course_time = self.course.start_enroll_time
self.course.start_enroll_time = datetime(
2199, 12, 31, 10, 8, 15, 0,
tzinfo=pytz.utc
)
self.course.save()
updated_course = Course.objects.get(id=self.course.id)
time_diff = updated_course.start_enroll_time - timezone.now()
actual_days = time_diff.days + 1
self.assertEqual(updated_course.days_before_start(), actual_days)
self.course.start_enroll_time = course_time
self.course.save()
###############################################################################
class TestCaseTestCases(unittest.TestCase):
def setUp(self):
self.user = User.objects.get(username="creator")
self.question1 = Question(
summary='Demo question 1', language='Python',
type='Code', active=True, description='Write a function',
points=1.0, user=self.user, snippet='def myfunc()'
)
self.question2 = Question(
summary='Demo question 2', language='Python',
type='Code', active=True, description='Write to standard output',
points=1.0, user=self.user, snippet='def myfunc()'
)
self.question1.save()
self.question2.save()
self.assertion_testcase = StandardTestCase(
question=self.question1,
test_case='assert myfunc(12, 13) == 15',
type='standardtestcase'
)
self.stdout_based_testcase = StdIOBasedTestCase(
question=self.question2,
expected_output='Hello World',
type='standardtestcase'
)
self.assertion_testcase.save()
self.stdout_based_testcase.save()
answer_data = {'metadata': {'user_answer': 'demo_answer',
'language': 'python',
'partial_grading': False
},
'test_case_data': [
{'test_case': 'assert myfunc(12, 13) == 15',
'test_case_type': 'standardtestcase',
'test_case_args': "",
'weight': 1.0
}]
}
self.answer_data_json = json.dumps(answer_data)
def test_assertion_testcase(self):
""" Test question """
self.assertEqual(self.assertion_testcase.question, self.question1)
self.assertEqual(self.assertion_testcase.test_case,
'assert myfunc(12, 13) == 15')
def test_stdout_based_testcase(self):
""" Test question """
self.assertEqual(self.stdout_based_testcase.question, self.question2)
self.assertEqual(self.stdout_based_testcase.expected_output,
'Hello World'
)
def test_consolidate_answer_data(self):
""" Test consolidate answer data model method """
result = self.question1.consolidate_answer_data(
user_answer="demo_answer"
)
actual_data = json.loads(result)
exp_data = json.loads(self.answer_data_json)
self.assertEqual(actual_data['metadata']['user_answer'],
exp_data['metadata']['user_answer'])
self.assertEqual(actual_data['test_case_data'],
exp_data['test_case_data'])
class AssignmentUploadTestCases(unittest.TestCase):
def setUp(self):
self.user1 = User.objects.get(username="creator")
self.user1.first_name = "demo"
self.user1.last_name = "user"
self.user1.save()
self.user2 = User.objects.get(username="demo_user3")
self.user2.first_name = "demo"
self.user2.last_name = "user3"
self.user2.save()
self.quiz = Quiz.objects.get(description="demo quiz 1")
self.questionpaper = QuestionPaper.objects.create(
quiz=self.quiz, total_marks=0.0, shuffle_questions=True
)
self.question = Question.objects.create(
summary='Assignment', language='Python', type='upload',
active=True, description='Upload a file', points=1.0, snippet='',
user=self.user1
)
self.questionpaper.fixed_question_order = "{0}".format(
self.question.id)
self.questionpaper.fixed_questions.add(self.question)
file_path1 = os.path.join(tempfile.gettempdir(), "upload1.txt")
file_path2 = os.path.join(tempfile.gettempdir(), "upload2.txt")
self.assignment1 = AssignmentUpload.objects.create(
user=self.user1, assignmentQuestion=self.question,
assignmentFile=file_path1, question_paper=self.questionpaper
)
self.assignment2 = AssignmentUpload.objects.create(
user=self.user2, assignmentQuestion=self.question,
assignmentFile=file_path2, question_paper=self.questionpaper
)
def test_get_assignments_for_user_files(self):
assignment_files, file_name = AssignmentUpload.objects.get_assignments(
self.questionpaper, self.question.id,
self.user1.id
)
self.assertIn("upload1.txt", assignment_files[0].assignmentFile.name)
self.assertEqual(assignment_files[0].user, self.user1)
actual_file_name = self.user1.get_full_name().replace(" ", "_")
file_name = file_name.replace(" ", "_")
self.assertEqual(file_name, actual_file_name)
def test_get_assignments_for_quiz_files(self):
assignment_files, file_name = AssignmentUpload.objects.get_assignments(
self.questionpaper
)
files = [os.path.basename(file.assignmentFile.name)
for file in assignment_files]
question_papers = [file.question_paper for file in assignment_files]
self.assertIn("upload1.txt", files)
self.assertIn("upload2.txt", files)
self.assertEqual(question_papers[0].quiz, self.questionpaper.quiz)
actual_file_name = self.quiz.description.replace(" ", "_")
file_name = file_name.replace(" ", "_")
self.assertIn(actual_file_name, file_name)
class CourseStatusTestCases(unittest.TestCase):
def setUp(self):
user = User.objects.get(username='creator')
self.course = Course.objects.create(name="Demo Course", creator=user,
enrollment="Enroll Request")
self.module = LearningModule.objects.create(name='M1', creator=user,
description='module one')
self.quiz1 = Quiz.objects.create(time_between_attempts=0, weightage=50,
description='qz1')
self.quiz2 = Quiz.objects.create(
time_between_attempts=0, weightage=100, description='qz2'
)
question = Question.objects.first()
self.qpaper1 = QuestionPaper.objects.create(quiz=self.quiz1)
self.qpaper2 = QuestionPaper.objects.create(quiz=self.quiz2)
self.qpaper1.fixed_questions.add(question)
self.qpaper2.fixed_questions.add(question)
self.qpaper1.update_total_marks()
self.qpaper2.update_total_marks()
self.qpaper1.save()
self.qpaper2.save()
self.unit_1_quiz = LearningUnit.objects.create(order=1, type='quiz',
quiz=self.quiz1)
self.unit_2_quiz = LearningUnit.objects.create(order=2, type='quiz',
quiz=self.quiz2)
self.module.learning_unit.add(self.unit_1_quiz)
self.module.learning_unit.add(self.unit_2_quiz)
self.module.save()
self.course.learning_module.add(self.module)
student = User.objects.get(username='course_user')
self.course.students.add(student)
self.course.save()
attempt = 1
ip = '127.0.0.1'
self.answerpaper1 = self.qpaper1.make_answerpaper(student, ip, attempt,
self.course.id)
self.answerpaper2 = self.qpaper2.make_answerpaper(student, ip, attempt,
self.course.id)
self.course_status = CourseStatus.objects.create(course=self.course,
user=student)
def tearDown(self):
self.course_status.delete()
self.answerpaper1.delete()
self.answerpaper2.delete()
self.qpaper1.delete()
self.qpaper2.delete()
self.quiz1.delete()
self.quiz2.delete()
self.unit_1_quiz.delete()
self.unit_2_quiz.delete()
self.module.delete()
self.course.delete()
def test_course_is_complete(self):
# When
self.course_status.completed_units.add(self.unit_1_quiz)
# Then
self.assertFalse(self.course_status.is_course_complete())
# When
self.course_status.completed_units.add(self.unit_2_quiz)
# Then
self.assertTrue(self.course_status.is_course_complete())
# Given
self.answerpaper1.marks_obtained = 1
self.answerpaper1.save()
self.answerpaper2.marks_obtained = 0
self.answerpaper2.save()
# When
self.course_status.calculate_percentage()
# Then
self.assertEqual(round(self.course_status.percentage, 2), 33.33)
# When
self.course_status.set_grade()
# Then
self.assertEqual(self.course_status.get_grade(), 'F')
# Given
self.answerpaper1.marks_obtained = 0
self.answerpaper1.save()
self.answerpaper2.marks_obtained = 1
self.answerpaper2.save()
# When
self.course_status.calculate_percentage()
# Then
self.assertEqual(round(self.course_status.percentage, 2), 66.67)
# When
self.course_status.set_grade()
# Then
self.assertEqual(self.course_status.get_grade(), 'B')
# Test get course grade after completion
self.assertEqual(self.course.get_grade(self.answerpaper1.user), 'B')
|
yarn.py
|
"""
This is a script to submit dmlc job via Yarn
dmlc will run as a Yarn application
"""
# pylint: disable=invalid-name, too-many-locals, too-many-branches, missing-docstring
from __future__ import absolute_import
import os
import sys
import subprocess
import warnings
import logging
import platform
from threading import Thread
from . import opts
from . import tracker
from .util import py_str
def yarn_submit(args, nworker, nserver, pass_env):
"""Submission function for YARN."""
is_windows = os.name == 'nt'
hadoop_home = os.getenv('HADOOP_HOME')
assert hadoop_home is not None, 'Need to set HADOOP_HOME for YARN submission.'
hadoop_binary = os.path.join(hadoop_home, 'bin', 'hadoop')
assert os.path.exists(hadoop_binary), "HADOOP_HOME does not contain the hadoop binary"
if args.jobname is None:
if args.num_servers == 0:
prefix = ('DMLC[nworker=%d]:' % args.num_workers)
else:
prefix = ('DMLC[nworker=%d,nsever=%d]:' % (args.num_workers, args.num_servers))
args.jobname = prefix + args.command[0].split('/')[-1]
# Determine path for Yarn helpers
YARN_JAR_PATH = os.path.join(args.yarn_app_dir, 'dmlc-yarn.jar')
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
YARN_BOOT_PY = os.path.join(curr_path, 'launcher.py')
if not os.path.exists(YARN_JAR_PATH):
warnings.warn("cannot find \"%s\", I will try to run build" % YARN_JAR_PATH)
cmd = 'cd %s;./build.%s' % \
(os.path.join(os.path.dirname(__file__), os.pardir, 'yarn'),
'bat' if is_windows else 'sh')
print(cmd)
subprocess.check_call(cmd, shell=True, env=os.environ)
assert os.path.exists(YARN_JAR_PATH), "failed to build dmlc-yarn.jar, try it manually"
# detech hadoop version
(out, _) = subprocess.Popen('%s version' % hadoop_binary,
shell=True, stdout=subprocess.PIPE).communicate()
out = py_str(out).split('\n')[0].split()
assert out[0] == 'Hadoop', 'cannot parse hadoop version string'
hadoop_version = int(out[1].split('.')[0])
(classpath, _) = subprocess.Popen('%s classpath' % hadoop_binary,
shell=True, stdout=subprocess.PIPE).communicate()
classpath = py_str(classpath).strip()
if hadoop_version < 2:
raise RuntimeError('Hadoop Version is %s, dmlc_yarn will need Yarn(Hadoop 2.0)' % out[1])
fset, new_command = opts.get_cache_file_set(args)
fset.add(YARN_JAR_PATH)
fset.add(YARN_BOOT_PY)
ar_list = []
for fname in args.archives:
fset.add(fname)
ar_list.append(os.path.basename(fname))
JAVA_HOME = os.getenv('JAVA_HOME')
if JAVA_HOME is None:
JAVA = 'java'
else:
JAVA = os.path.join(JAVA_HOME, 'bin', 'java')
cmd = '%s -cp %s%s%s org.apache.hadoop.yarn.dmlc.Client '\
% (JAVA, classpath, ';' if is_windows else ':', YARN_JAR_PATH)
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
# ship lib-stdc++.so
if args.ship_libcxx is not None:
if platform.architecture()[0] == '64bit':
libcxx = args.ship_libcxx + '/libstdc++.so.6'
else:
libcxx = args.ship_libcxx + '/libstdc++.so'
fset.add(libcxx)
# update local LD_LIBRARY_PATH
LD_LIBRARY_PATH = env['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in env else ''
env['LD_LIBRARY_PATH'] = args.ship_libcxx + ':' + LD_LIBRARY_PATH
env['DMLC_JOB_CLUSTER'] = 'yarn'
env['DMLC_WORKER_CORES'] = str(args.worker_cores)
env['DMLC_WORKER_MEMORY_MB'] = str(args.worker_memory_mb)
env['DMLC_SERVER_CORES'] = str(args.server_cores)
env['DMLC_SERVER_MEMORY_MB'] = str(args.server_memory_mb)
env['DMLC_NUM_WORKER'] = str(args.num_workers)
env['DMLC_NUM_SERVER'] = str(args.num_servers)
env['DMLC_JOB_ARCHIVES'] = ':'.join(ar_list)
for f in fset:
cmd += ' -file %s' % f
cmd += ' -jobname %s ' % args.jobname
cmd += ' -tempdir %s ' % args.hdfs_tempdir
cmd += ' -queue %s ' % args.queue
if args.yarn_app_classpath:
cmd += ' -appcp %s ' % args.yarn_app_classpath
for entry in args.env:
cmd += ' -env %s ' % entry
cmd += (' '.join(['./launcher.py'] + new_command))
logging.debug("Submit job with %d workers and %d servers", nworker, nserver)
def run():
"""internal running function."""
logging.debug(cmd)
subprocess.check_call(cmd, shell=True, env=env)
thread = Thread(target=run, args=())
thread.setDaemon(True)
thread.start()
return thread
def submit(args):
submit_thread = []
def yarn_submit_pass(nworker, nserver, pass_env):
submit_thread.append(yarn_submit(args, nworker, nserver, pass_env))
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
YARN_BOOT_PY = os.path.join(curr_path, 'launcher.py')
tracker.submit(args.num_workers, args.num_servers,
fun_submit=yarn_submit_pass,
pscmd=(' '.join([YARN_BOOT_PY] + args.command)))
|
DNS_local.py
|
import socket
from .streambase import netutils
from queue import Queue
from threading import Thread
"""
userdict looks like this:
{'name', [addr, port]}
"""
userdict = {}
threadlist = []
_stop = False
def get_localip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def _runserver(socket):
socket.settimeout(10)
try:
name = socket.recv(1024).decode().split(".", [0]) # remove .### on the end of name
except TimeoutError:
return False
# add new connected users to userdict
try: userdict[name]
except KeyError: userdict[name] = socket.getpeername()
# the client requests a name
try:
name_req = socket.recv(1024).decode()
except TimeoutError:
return False
if name_req == "GET_ALL_USERS":
socket.send(str(userdict).encode())
return True
# try to send the associated name
try:
out_addr = userdict[name_req]
except KeyError:
socket.send("No User Found".encode())
return False
clean_addr = str(out_addr).replace("(","").replace(")","").replace("'","")
#NOTE: clean_addr looks like: "xxx.xxx.xxx.xxx, ####"
socket.send(clean_addr.encode())
return True
def _beginServing(localip, port):
s = socket.socket(socket.AF_INET)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.settimeout(5)
s.bind((localip, port))
s.listen(10)
while True:
if _stop: return None
try: client = s.accept()[0]
except TimeoutError: pass
T = Thread(target = _runserver, args = (client,))
threadlist.append(T)
T.start()
class DNS_local():
def __init__(self, **kwargs):
"""Default port 8000"""
#self.userdict = {}
self.verbose = kwargs.get("verbose", False)
self.localip = get_localip()
self.mainThread = None
self.port = kwargs.get("port", 8000)
def log(self,m):
if self.verbose: print(m)
def begin(self):
# this might not be a thing
self.mainThread = Thread(target = _beginServing, args = (self.localip, self.port,))
threadlist.append(self.mainThread)
self.mainThread.start()
self.log("DNS started with IP:{} on port: {}".format(self.localip, self.port))
def close(self):
self.log("closing")
_stop = True
for t in threadlist:
t.join()
self.log("DNS closed")
|
graph-size-circum-trim.py
|
#! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import khmer
import sys
import screed
import os.path
import threading
import Queue
import gc
K = 32
HASHTABLE_SIZE = int(1e9)
THRESHOLD = 500
N_HT = 4
WORKER_THREADS = 5
###
RADIUS = 2
MAX_CIRCUM = 4 # 4 seems to eliminate lump in 1m.fa
MAX_VOLUME = 200
incr = 2 * RADIUS
###
GROUPSIZE = 100
###
class SequenceGroup(object):
def __init__(self, order, seqlist):
self.order = order
self.seqlist = seqlist
def is_pair(r1, r2):
a = r1['name'].split('/')[0]
b = r2['name'].split('/')[0]
return (a == b)
def trim_by_circumference(ht, name, seq):
# calculate circumference for every point.
end = len(seq) - K
is_high = False
pos = 0
for pos in range(0, end, incr):
circum = ht.count_kmers_on_radius(seq[pos:pos + K], RADIUS, MAX_VOLUME)
if circum >= MAX_CIRCUM:
is_high = True
break
# ok. sequence has high-radius k-mers; can we trim them off?
if is_high and pos > incr:
pos -= incr
# find last k-mer with a low radius:
i = 1
for i in range(1, incr):
circum = ht.count_kmers_on_radius(seq[pos + i:pos + i + K],
RADIUS, MAX_VOLUME)
if circum >= MAX_CIRCUM:
break
pos += i - 1
# now trim sequence:
seq = seq[:pos + K]
is_high = False
name += "\tTRUNC.%d" % pos
if is_high:
return None, None
else:
return name, seq
def process(inq, outq, ht):
global worker_count
while not done or not inq.empty():
try:
g = inq.get(True, 1)
except Queue.Empty:
continue
x = []
last_record = None
for record in g.seqlist:
kmer = record['sequence'][:K]
size = ht.calc_connected_graph_size(kmer, THRESHOLD)
if size >= THRESHOLD:
# keep pairs together if either is "good"
if last_record and is_pair(last_record, record):
x.append(last_record)
x.append(record)
record = None
last_record = record
y = []
for record in x:
name, seq = trim_by_circumference(ht, record['name'],
record['sequence'])
if name:
y.append((name, seq))
gg = SequenceGroup(g.order, y)
outq.put(gg)
worker_count -= 1
def write(outq, outfp):
global worker_count
groups = {}
next_group = 0
while worker_count > 0 or not outq.empty():
try:
g = outq.get(True, 1)
except Queue.Empty:
continue
groups[g.order] = g
while next_group in groups:
g = groups[next_group]
for name, seq in g.seqlist:
outfp.write('>%s\n%s\n' % (name, seq,))
del groups[next_group]
next_group += 1
gc.collect()
def main():
global done, worker_count
done = False
worker_count = 0
infile = sys.argv[1]
outfile = os.path.basename(infile) + '.graphcirc'
if len(sys.argv) == 3:
outfile = sys.argv[2]
print 'creating ht'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
print 'eating fa', infile
total_reads, n_consumed = ht.consume_fasta(infile)
outfp = open(outfile, 'w')
inqueue = Queue.Queue(50)
outqueue = Queue.Queue(50)
# worker and writer threads
for i in range(WORKER_THREADS):
t = threading.Thread(target=process, args=(inqueue, outqueue, ht))
worker_count += 1
t.start()
threading.Thread(target=write, args=(outqueue, outfp)).start()
# main thread
x = []
i = 0
group_n = 0
for n, record in enumerate(screed.fasta.fasta_iter(open(infile))):
if n % 10000 == 0:
print '...', n
i += 1
if i > GROUPSIZE:
this_name = record['name'].split('/')[0]
last_name = x[-1]['name'].split('/')[0]
if is_pair(record, x[-1]): # preserve pairs
x.append(record)
g = SequenceGroup(group_n, x)
inqueue.put(g)
x = []
else:
g = SequenceGroup(group_n, x)
inqueue.put(g)
x = [record]
group_n += 1
i = 0
else:
x.append(record)
# submit last set of sequences
g = SequenceGroup(group_n, x)
inqueue.put(g)
done = True
if __name__ == '__main__':
main()
|
server_nonstop.py
|
import sys
import queue
import struct
import threading
import importlib
import torch
import torch.multiprocessing as mp
from util.utils import TcpServer, TcpAgent, timestamp
from mps.train import TrainProc
from mps.inference import InferProc
def func_get_request(active_model_name, qout):
# Listen connections
server = TcpServer('localhost', 12345)
while True:
# Get connection
conn, _ = server.accept()
agent = TcpAgent(conn)
model_name_length_b = agent.recv(4)
model_name_length = struct.unpack('I', model_name_length_b)[0]
if model_name_length == 0:
break
model_name_b = agent.recv(model_name_length)
model_name = model_name_b.decode()
if active_model_name not in model_name:
raise Exception('Invalid model name')
timestamp('tcp', 'get_name')
data_length_b = agent.recv(4)
data_length = struct.unpack('I', data_length_b)[0]
if data_length > 0:
data_b = agent.recv(data_length)
else:
data_b = None
timestamp('tcp', 'get_data')
if 'training' in model_name:
agent.send(b'FNSH')
del agent
else:
qout.put((agent, data_b))
def func_schedule(qin, p_train, p_child):
while True:
agent, data_b = qin.get()
p_child.send((agent, data_b))
#p_train.send('PAUSE')
p_child.recv()
#p_train.send('START')
def main(name=None):
# Get model name
if name is None:
model_name = sys.argv[1]
else:
model_name = name
# Create worker process
train_parent, train_child = mp.Pipe()
p_train = TrainProc(model_name, train_child)
p_train.start()
infer_parent, infer_child = mp.Pipe()
p_infer = InferProc(model_name, infer_child)
p_infer.start()
# Create threads and worker process
q_to_schedule = queue.Queue()
t_get = threading.Thread(target=func_get_request, args=(model_name, q_to_schedule))
t_get.start()
t_schedule = threading.Thread(target=func_schedule, args=(q_to_schedule, train_parent, infer_parent))
t_schedule.start()
# Accept connection
t_get.join()
t_schedule.join()
p_train.join()
p_infer.join()
if __name__ == '__main__':
mp.set_start_method('spawn')
''' modified by Ctry begin'''
main(name='inception_v3')
''' modified by Ctry end'''
|
loaders.py
|
"""
This module gathers all user-facing functions with a `load_` prefix.
"""
import atexit
import os
import sys
import tarfile
import time
import types
import warnings
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from urllib.parse import urlsplit
import numpy as np
from more_itertools import always_iterable
from yt.data_objects.static_output import Dataset
from yt.funcs import levenshtein_distance
from yt.sample_data.api import lookup_on_disk_data
from yt.utilities.decompose import decompose_array, get_psize
from yt.utilities.exceptions import (
MountError,
YTAmbiguousDataType,
YTIllDefinedAMR,
YTSimulationNotIdentified,
YTUnidentifiedDataType,
)
from yt.utilities.hierarchy_inspection import find_lowest_subclasses
from yt.utilities.lib.misc_utilities import get_box_grids_level
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.object_registries import (
output_type_registry,
simulation_time_series_registry,
)
from yt.utilities.on_demand_imports import _pooch as pooch, _ratarmount as ratarmount
# --- Loaders for known data formats ---
def load(
fn: Union[str, "os.PathLike[str]"], *args, hint: Optional[str] = None, **kwargs
):
"""
Load a Dataset or DatasetSeries object.
The data format is automatically discovered, and the exact return type is the
corresponding subclass of :class:`yt.data_objects.static_output.Dataset`.
A :class:`yt.data_objects.time_series.DatasetSeries` is created if the first
argument is a pattern.
Parameters
----------
fn : str, os.Pathlike[str]
A path to the data location. This can be a file name, directory name, a glob
pattern, or a url (for data types that support it).
hint : str, optional
Only classes whose name include a hint are considered. If loading fails with
a YTAmbiguousDataType exception, this argument can be used to lift ambiguity.
Hints are case insensitive.
Additional arguments, if any, are passed down to the return class.
Returns
-------
:class:`yt.data_objects.static_output.Dataset` object
If fn is a single path, create a Dataset from the appropriate subclass.
:class:`yt.data_objects.time_series.DatasetSeries`
If fn is a glob pattern (i.e. containing wildcards '[]?!*'), create a series.
Raises
------
FileNotFoundError
If fn does not match any existing file or directory.
yt.utilities.exceptions.YTUnidentifiedDataType
If fn matches existing files or directories with undetermined format.
yt.utilities.exceptions.YTAmbiguousDataType
If the data format matches more than one class of similar specilization levels.
"""
fn = os.fspath(fn)
if any(wildcard in fn for wildcard in "[]?!*"):
from yt.data_objects.time_series import DatasetSeries
return DatasetSeries(fn, *args, hint=hint, **kwargs)
# This will raise FileNotFoundError if the path isn't matched
# either in the current dir or yt.config.ytcfg['data_dir_directory']
if not fn.startswith("http"):
fn = str(lookup_on_disk_data(fn))
candidates = []
for cls in output_type_registry.values():
if cls._is_valid(fn, *args, **kwargs):
candidates.append(cls)
# Find only the lowest subclasses, i.e. most specialised front ends
candidates = find_lowest_subclasses(candidates, hint=hint)
if len(candidates) == 1:
return candidates[0](fn, *args, **kwargs)
if len(candidates) > 1:
raise YTAmbiguousDataType(fn, candidates)
raise YTUnidentifiedDataType(fn, *args, **kwargs)
def load_simulation(fn, simulation_type, find_outputs=False):
"""
Load a simulation time series object of the specified simulation type.
Parameters
----------
fn : str, os.Pathlike, or byte (types supported by os.path.expandusers)
Name of the data file or directory.
simulation_type : str
E.g. 'Enzo'
find_outputs : bool
Defaults to False
Raises
------
FileNotFoundError
If fn is not found.
yt.utilities.exceptions.YTSimulationNotIdentified
If simulation_type is unknown.
"""
fn = str(lookup_on_disk_data(fn))
try:
cls = simulation_time_series_registry[simulation_type]
except KeyError as e:
raise YTSimulationNotIdentified(simulation_type) from e
return cls(fn, find_outputs=find_outputs)
# --- Loaders for generic ("stream") data ---
def load_uniform_grid(
data,
domain_dimensions,
length_unit=None,
bbox=None,
nprocs=1,
sim_time=0.0,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(True, True, True),
geometry="cartesian",
unit_system="cgs",
default_species_fields=None,
):
r"""Load a uniform grid of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow a uniform grid of data to be loaded directly into yt and
analyzed as would any others. This comes with several caveats:
* Units will be incorrect unless the unit system is explicitly
specified.
* Some functions may behave oddly, and parallelism will be
disappointing or non-existent in most cases.
* Particles may be difficult to integrate.
Particle fields are detected as one-dimensional fields.
Parameters
----------
data : dict
This is a dict of numpy arrays or (numpy array, unit spec) tuples.
The keys are the field names.
domain_dimensions : array_like
This is the domain dimensions of the grid
length_unit : string
Unit to use for lengths. Defaults to unitless.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units specified by length_unit.
Defaults to a cubic unit-length domain.
nprocs: integer, optional
If greater than 1, will create this number of subarrays out of data
sim_time : float, optional
The simulation time in seconds
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
default_species_fields : string, optional
If set, default species fields are created for H and He which also
determine the mean molecular weight. Options are "ionized" and "neutral".
Examples
--------
>>> np.random.seed(int(0x4D3D3D3))
>>> bbox = np.array([[0.0, 1.0], [-1.5, 1.5], [1.0, 2.5]])
>>> arr = np.random.random((128, 128, 128))
>>> data = dict(density=arr)
>>> ds = load_uniform_grid(data, arr.shape, length_unit="cm", bbox=bbox, nprocs=12)
>>> dd = ds.all_data()
>>> dd[("gas", "density")]
unyt_array([0.76017901, 0.96855994, 0.49205428, ..., 0.78798258,
0.97569432, 0.99453904], 'g/cm**3')
"""
from yt.frontends.stream.data_structures import (
StreamDataset,
StreamDictFieldHandler,
StreamHandler,
)
from yt.frontends.stream.definitions import (
assign_particle_data,
process_data,
set_particle_types,
)
domain_dimensions = np.array(domain_dimensions)
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64")
domain_left_edge = np.array(bbox[:, 0], "float64")
domain_right_edge = np.array(bbox[:, 1], "float64")
grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1))
# First we fix our field names, apply units to data
# and check for consistency of field shapes
field_units, data, number_of_particles = process_data(
data, grid_dims=tuple(domain_dimensions)
)
sfh = StreamDictFieldHandler()
if number_of_particles > 0:
particle_types = set_particle_types(data)
# Used much further below.
pdata = {"number_of_particles": number_of_particles}
for key in list(data.keys()):
if len(data[key].shape) == 1 or key[0] == "io":
if not isinstance(key, tuple):
field = ("io", key)
mylog.debug("Reassigning '%s' to '%s'", key, field)
else:
field = key
sfh._additional_fields += (field,)
pdata[field] = data.pop(key)
else:
particle_types = {}
if nprocs > 1:
temp = {}
new_data = {}
for key in data.keys():
psize = get_psize(np.array(data[key].shape), nprocs)
grid_left_edges, grid_right_edges, shapes, slices = decompose_array(
data[key].shape, psize, bbox
)
grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
temp[key] = [data[key][slice] for slice in slices]
for gid in range(nprocs):
new_data[gid] = {}
for key in temp.keys():
new_data[gid].update({key: temp[key][gid]})
sfh.update(new_data)
del new_data, temp
else:
sfh.update({0: data})
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
if length_unit is None:
length_unit = "code_length"
if mass_unit is None:
mass_unit = "code_mass"
if time_unit is None:
time_unit = "code_time"
if velocity_unit is None:
velocity_unit = "code_velocity"
if magnetic_unit is None:
magnetic_unit = "code_magnetic"
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype="int64"),
np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # particle count
np.zeros(nprocs).reshape((nprocs, 1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity,
)
handler.name = "UniformGridData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
if np.all(domain_dimensions[1:] == 1):
dimensionality = 1
elif domain_dimensions[2] == 1:
dimensionality = 2
else:
dimensionality = 3
handler.dimensionality = dimensionality
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamDataset(
handler,
geometry=geometry,
unit_system=unit_system,
default_species_fields=default_species_fields,
)
# Now figure out where the particles go
if number_of_particles > 0:
# This will update the stream handler too
assign_particle_data(sds, pdata, bbox)
return sds
def load_amr_grids(
grid_data,
domain_dimensions,
bbox=None,
sim_time=0.0,
length_unit=None,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(True, True, True),
geometry="cartesian",
refine_by=2,
unit_system="cgs",
default_species_fields=None,
):
r"""Load a set of grids of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow a sequence of grids of varying resolution of data to be
loaded directly into yt and analyzed as would any others. This comes with
several caveats:
* Units will be incorrect unless the unit system is explicitly specified.
* Some functions may behave oddly, and parallelism will be
disappointing or non-existent in most cases.
* Particles may be difficult to integrate.
* No consistency checks are performed on the index
Parameters
----------
grid_data : list of dicts
This is a list of dicts. Each dict must have entries "left_edge",
"right_edge", "dimensions", "level", and then any remaining entries are
assumed to be fields. Field entries must map to an NDArray. The grid_data
may also include a particle count. If no particle count is supplied, the
dataset is understood to contain no particles. The grid_data will be
modified in place and can't be assumed to be static.
domain_dimensions : array_like
This is the domain dimensions of the grid
length_unit : string or float
Unit to use for lengths. Defaults to unitless. If set to be a string, the bbox
dimensions are assumed to be in the corresponding units. If set to a float, the
value is a assumed to be the conversion from bbox dimensions to centimeters.
mass_unit : string or float
Unit to use for masses. Defaults to unitless.
time_unit : string or float
Unit to use for times. Defaults to unitless.
velocity_unit : string or float
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string or float
Unit to use for magnetic fields. Defaults to unitless.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units specified by length_unit.
Defaults to a cubic unit-length domain.
sim_time : float, optional
The simulation time in seconds
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
refine_by : integer or list/array of integers.
Specifies the refinement ratio between levels. Defaults to 2. This
can be an array, in which case it specifies for each dimension. For
instance, this can be used to say that some datasets have refinement of
1 in one dimension, indicating that they span the full range in that
dimension.
default_species_fields : string, optional
If set, default species fields are created for H and He which also
determine the mean molecular weight. Options are "ionized" and "neutral".
Examples
--------
>>> grid_data = [
... dict(
... left_edge=[0.0, 0.0, 0.0],
... right_edge=[1.0, 1.0, 1.0],
... level=0,
... dimensions=[32, 32, 32],
... number_of_particles=0,
... ),
... dict(
... left_edge=[0.25, 0.25, 0.25],
... right_edge=[0.75, 0.75, 0.75],
... level=1,
... dimensions=[32, 32, 32],
... number_of_particles=0,
... ),
... ]
...
>>> for g in grid_data:
... g[("gas", "density")] = (
... np.random.random(g["dimensions"]) * 2 ** g["level"],
... "g/cm**3",
... )
...
>>> ds = load_amr_grids(grid_data, [32, 32, 32], length_unit=1.0)
"""
from yt.frontends.stream.data_structures import (
StreamDataset,
StreamDictFieldHandler,
StreamHandler,
)
from yt.frontends.stream.definitions import process_data, set_particle_types
domain_dimensions = np.array(domain_dimensions)
ngrids = len(grid_data)
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64")
domain_left_edge = np.array(bbox[:, 0], "float64")
domain_right_edge = np.array(bbox[:, 1], "float64")
grid_levels = np.zeros((ngrids, 1), dtype="int32")
grid_left_edges = np.zeros((ngrids, 3), dtype="float64")
grid_right_edges = np.zeros((ngrids, 3), dtype="float64")
grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
number_of_particles = np.zeros((ngrids, 1), dtype="int64")
parent_ids = np.zeros(ngrids, dtype="int64") - 1
sfh = StreamDictFieldHandler()
for i, g in enumerate(grid_data):
grid_left_edges[i, :] = g.pop("left_edge")
grid_right_edges[i, :] = g.pop("right_edge")
grid_dimensions[i, :] = g.pop("dimensions")
grid_levels[i, :] = g.pop("level")
field_units, data, n_particles = process_data(
g, grid_dims=tuple(grid_dimensions[i, :])
)
number_of_particles[i, :] = n_particles
sfh[i] = data
# We now reconstruct our parent ids, so that our particle assignment can
# proceed.
mask = np.empty(ngrids, dtype="int32")
for gi in range(ngrids):
get_box_grids_level(
grid_left_edges[gi, :],
grid_right_edges[gi, :],
grid_levels[gi] + 1,
grid_left_edges,
grid_right_edges,
grid_levels,
mask,
)
ids = np.where(mask.astype("bool"))
for ci in ids:
parent_ids[ci] = gi
# Check if the grid structure is properly aligned (bug #1295)
for lvl in range(grid_levels.min() + 1, grid_levels.max() + 1):
idx = grid_levels.flatten() == lvl
dims = domain_dimensions * refine_by ** (lvl - 1)
for iax, ax in enumerate("xyz"):
cell_edges = np.linspace(
domain_left_edge[iax], domain_right_edge[iax], dims[iax], endpoint=False
)
if set(grid_left_edges[idx, iax]) - set(cell_edges):
raise YTIllDefinedAMR(lvl, ax)
if length_unit is None:
length_unit = "code_length"
if mass_unit is None:
mass_unit = "code_mass"
if time_unit is None:
time_unit = "code_time"
if velocity_unit is None:
velocity_unit = "code_velocity"
if magnetic_unit is None:
magnetic_unit = "code_magnetic"
particle_types = {}
for grid in sfh.values():
particle_types.update(set_particle_types(grid))
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
parent_ids,
number_of_particles,
np.zeros(ngrids).reshape((ngrids, 1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity,
)
handler.name = "AMRGridData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = refine_by
if np.all(domain_dimensions[1:] == 1):
dimensionality = 1
elif domain_dimensions[2] == 1:
dimensionality = 2
else:
dimensionality = 3
handler.dimensionality = dimensionality
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamDataset(
handler,
geometry=geometry,
unit_system=unit_system,
default_species_fields=default_species_fields,
)
return sds
def load_particles(
data,
length_unit=None,
bbox=None,
sim_time=None,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(True, True, True),
geometry="cartesian",
unit_system="cgs",
data_source=None,
default_species_fields=None,
):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
This will allow a collection of particle data to be loaded directly into
yt and analyzed as would any others. This comes with several caveats:
* There must be sufficient space in memory to contain all the particle
data.
* Parallelism will be disappointing or non-existent in most cases.
* Fluid fields are not supported.
Note: in order for the dataset to take advantage of SPH functionality,
the following two fields must be provided:
* ('io', 'density')
* ('io', 'smoothing_length')
Parameters
----------
data : dict
This is a dict of numpy arrays or (numpy array, unit name) tuples,
where the keys are the field names. Particles positions must be named
"particle_position_x", "particle_position_y", and "particle_position_z".
length_unit : float
Conversion factor from simulation length units to centimeters
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of the length_unit
sim_time : float, optional
The simulation time in seconds
mass_unit : float
Conversion factor from simulation mass units to grams
time_unit : float
Conversion factor from simulation time units to seconds
velocity_unit : float
Conversion factor from simulation velocity units to cm/s
magnetic_unit : float
Conversion factor from simulation magnetic units to gauss
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
data_source : YTSelectionContainer, optional
If set, parameters like `bbox`, `sim_time`, and code units are derived
from it.
default_species_fields : string, optional
If set, default species fields are created for H and He which also
determine the mean molecular weight. Options are "ionized" and "neutral".
Examples
--------
>>> pos = [np.random.random(128 * 128 * 128) for i in range(3)]
>>> data = dict(
... particle_position_x=pos[0],
... particle_position_y=pos[1],
... particle_position_z=pos[2],
... )
>>> bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
>>> ds = load_particles(data, 3.08e24, bbox=bbox)
"""
from yt.frontends.stream.data_structures import (
StreamDictFieldHandler,
StreamHandler,
StreamParticlesDataset,
)
from yt.frontends.stream.definitions import process_data, set_particle_types
domain_dimensions = np.ones(3, "int32")
nprocs = 1
# Parse bounding box
if data_source is not None:
le, re = data_source.get_bbox()
le = le.to_value("code_length")
re = re.to_value("code_length")
bbox = list(zip(le, re))
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64")
else:
bbox = np.array(bbox)
domain_left_edge = np.array(bbox[:, 0], "float64")
domain_right_edge = np.array(bbox[:, 1], "float64")
grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1))
# Parse simulation time
if data_source is not None:
sim_time = data_source.ds.current_time
if sim_time is None:
sim_time = 0.0
else:
sim_time = float(sim_time)
# Parse units
def parse_unit(unit, dimension):
if unit is None:
unit = "code_" + dimension
if data_source is not None:
unit = getattr(data_source.ds, dimension + "_unit", unit)
return unit
length_unit = parse_unit(length_unit, "length")
mass_unit = parse_unit(mass_unit, "mass")
time_unit = parse_unit(time_unit, "time")
velocity_unit = parse_unit(velocity_unit, "velocity")
magnetic_unit = parse_unit(magnetic_unit, "magnetic")
# Preprocess data
field_units, data, _ = process_data(data)
sfh = StreamDictFieldHandler()
pdata = {}
for key in data.keys():
if not isinstance(key, tuple):
field = ("io", key)
mylog.debug("Reassigning '%s' to '%s'", key, field)
else:
field = key
pdata[field] = data[key]
sfh._additional_fields += (field,)
data = pdata # Drop reference count
particle_types = set_particle_types(data)
sfh.update({"stream_file": data})
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype="int64"),
np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary
np.zeros(nprocs).reshape((nprocs, 1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity,
)
handler.name = "ParticleData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = 3
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamParticlesDataset(
handler,
geometry=geometry,
unit_system=unit_system,
default_species_fields=default_species_fields,
)
return sds
def load_hexahedral_mesh(
data,
connectivity,
coordinates,
length_unit=None,
bbox=None,
sim_time=0.0,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(True, True, True),
geometry="cartesian",
unit_system="cgs",
):
r"""Load a hexahedral mesh of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow a semistructured grid of data to be loaded directly into
yt and analyzed as would any others. This comes with several caveats:
* Units will be incorrect unless the data has already been converted to
cgs.
* Some functions may behave oddly, and parallelism will be
disappointing or non-existent in most cases.
* Particles may be difficult to integrate.
Particle fields are detected as one-dimensional fields. The number of particles
is set by the "number_of_particles" key in data.
Parameters
----------
data : dict
This is a dict of numpy arrays, where the keys are the field names.
There must only be one. Note that the data in the numpy arrays should
define the cell-averaged value for of the quantity in in the hexahedral
cell.
connectivity : array_like
This should be of size (N,8) where N is the number of zones.
coordinates : array_like
This should be of size (M,3) where M is the number of vertices
indicated in the connectivity matrix.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of the length unit.
sim_time : float, optional
The simulation time in seconds
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
"""
from yt.frontends.stream.data_structures import (
StreamDictFieldHandler,
StreamHandler,
StreamHexahedralDataset,
)
from yt.frontends.stream.definitions import process_data, set_particle_types
domain_dimensions = np.ones(3, "int32") * 2
nprocs = 1
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64")
domain_left_edge = np.array(bbox[:, 0], "float64")
domain_right_edge = np.array(bbox[:, 1], "float64")
grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1))
field_units, data, _ = process_data(data)
sfh = StreamDictFieldHandler()
particle_types = set_particle_types(data)
sfh.update({"connectivity": connectivity, "coordinates": coordinates, 0: data})
# Simple check for axis length correctness
if len(data) > 0:
fn = list(sorted(data))[0]
array_values = data[fn]
if array_values.size != connectivity.shape[0]:
mylog.error(
"Dimensions of array must be one fewer than the coordinate set."
)
raise RuntimeError
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
if length_unit is None:
length_unit = "code_length"
if mass_unit is None:
mass_unit = "code_mass"
if time_unit is None:
time_unit = "code_time"
if velocity_unit is None:
velocity_unit = "code_velocity"
if magnetic_unit is None:
magnetic_unit = "code_magnetic"
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype="int64"),
np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary
np.zeros(nprocs).reshape((nprocs, 1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity,
)
handler.name = "HexahedralMeshData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = 3
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system)
return sds
def load_octree(
octree_mask,
data,
bbox=None,
sim_time=0.0,
length_unit=None,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(True, True, True),
over_refine_factor=1,
partial_coverage=1,
unit_system="cgs",
default_species_fields=None,
):
r"""Load an octree mask into yt.
Octrees can be saved out by calling save_octree on an OctreeContainer.
This enables them to be loaded back in.
This will initialize an Octree of data. Note that fluid fields will not
work yet, or possibly ever.
Parameters
----------
octree_mask : np.ndarray[uint8_t]
This is a depth-first refinement mask for an Octree. It should be
of size n_octs * 8 (but see note about the root oct below), where
each item is 1 for an oct-cell being refined and 0 for it not being
refined. For over_refine_factors != 1, the children count will
still be 8, so there will still be n_octs * 8 entries. Note that if
the root oct is not refined, there will be only one entry
for the root, so the size of the mask will be (n_octs - 1)*8 + 1.
data : dict
A dictionary of 1D arrays. Note that these must of the size of the
number of "False" values in the ``octree_mask``.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of length
sim_time : float, optional
The simulation time in seconds
length_unit : string
Unit to use for lengths. Defaults to unitless.
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
partial_coverage : boolean
Whether or not an oct can be refined cell-by-cell, or whether all
8 get refined.
default_species_fields : string, optional
If set, default species fields are created for H and He which also
determine the mean molecular weight. Options are "ionized" and "neutral".
Example
-------
>>> import numpy as np
>>> oct_mask = np.zeros(25)
... oct_mask[[0, 5, 7, 16]] = 8
>>> octree_mask = np.array(oct_mask, dtype=np.uint8)
>>> quantities = {}
>>> quantities["gas", "density"] = np.random.random((22, 1))
>>> bbox = np.array([[-10.0, 10.0], [-10.0, 10.0], [-10.0, 10.0]])
>>> ds = load_octree(
... octree_mask=octree_mask,
... data=quantities,
... bbox=bbox,
... over_refine_factor=0,
... partial_coverage=0,
... )
"""
from yt.frontends.stream.data_structures import (
StreamDictFieldHandler,
StreamHandler,
StreamOctreeDataset,
)
from yt.frontends.stream.definitions import process_data, set_particle_types
if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8:
raise TypeError("octree_mask should be a Numpy array with type uint8")
nz = 1 << (over_refine_factor)
domain_dimensions = np.array([nz, nz, nz])
nprocs = 1
if bbox is None:
bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64")
domain_left_edge = np.array(bbox[:, 0], "float64")
domain_right_edge = np.array(bbox[:, 1], "float64")
grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1))
field_units, data, _ = process_data(data)
sfh = StreamDictFieldHandler()
particle_types = set_particle_types(data)
sfh.update({0: data})
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
if length_unit is None:
length_unit = "code_length"
if mass_unit is None:
mass_unit = "code_mass"
if time_unit is None:
time_unit = "code_time"
if velocity_unit is None:
velocity_unit = "code_velocity"
if magnetic_unit is None:
magnetic_unit = "code_magnetic"
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype="int64"),
np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary
np.zeros(nprocs).reshape((nprocs, 1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity,
)
handler.name = "OctreeData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = 3
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamOctreeDataset(
handler, unit_system=unit_system, default_species_fields=default_species_fields
)
sds.octree_mask = octree_mask
sds.partial_coverage = partial_coverage
sds.over_refine_factor = over_refine_factor
return sds
def load_unstructured_mesh(
connectivity,
coordinates,
node_data=None,
elem_data=None,
length_unit=None,
bbox=None,
sim_time=0.0,
mass_unit=None,
time_unit=None,
velocity_unit=None,
magnetic_unit=None,
periodicity=(False, False, False),
geometry="cartesian",
unit_system="cgs",
):
r"""Load an unstructured mesh of data into yt as a
:class:`~yt.frontends.stream.data_structures.StreamHandler`.
This should allow an unstructured mesh data to be loaded directly into
yt and analyzed as would any others. Not all functionality for
visualization will be present, and some analysis functions may not yet have
been implemented.
Particle fields are detected as one-dimensional fields. The number of
particles is set by the "number_of_particles" key in data.
In the parameter descriptions below, a "vertex" is a 3D point in space, an
"element" is a single polyhedron whose location is defined by a set of
vertices, and a "mesh" is a set of polyhedral elements, each with the same
number of vertices.
Parameters
----------
connectivity : list of array_like or array_like
This should either be a single 2D array or list of 2D arrays. If this
is a list, each element in the list corresponds to the connectivity
information for a distinct mesh. Each array can have different
connectivity length and should be of shape (N,M) where N is the number
of elements and M is the number of vertices per element.
coordinates : array_like
The 3D coordinates of mesh vertices. This should be of size (L, D) where
L is the number of vertices and D is the number of coordinates per vertex
(the spatial dimensions of the dataset). Currently this must be either 2 or 3.
When loading more than one mesh, the data for each mesh should be concatenated
into a single coordinates array.
node_data : dict or list of dicts
For a single mesh, a dict mapping field names to 2D numpy arrays,
representing data defined at element vertices. For multiple meshes,
this must be a list of dicts. Note that these are not the values as a
function of the coordinates, but of the connectivity. Their shape
should be the same as the connectivity. This means that if the data is
in the shape of the coordinates, you may need to reshape them using the
`connectivity` array as an index.
elem_data : dict or list of dicts
For a single mesh, a dict mapping field names to 1D numpy arrays, where
each array has a length equal to the number of elements. The data
must be defined at the center of each mesh element and there must be
only one data value for each element. For multiple meshes, this must be
a list of dicts, with one dict for each mesh.
bbox : array_like (xdim:zdim, LE:RE), optional
Size of computational domain in units of the length unit.
sim_time : float, optional
The simulation time in seconds
length_unit : string
Unit to use for length. Defaults to unitless.
mass_unit : string
Unit to use for masses. Defaults to unitless.
time_unit : string
Unit to use for times. Defaults to unitless.
velocity_unit : string
Unit to use for velocities. Defaults to unitless.
magnetic_unit : string
Unit to use for magnetic fields. Defaults to unitless.
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
geometry : string or tuple
"cartesian", "cylindrical", "polar", "spherical", "geographic" or
"spectral_cube". Optionally, a tuple can be provided to specify the
axis ordering -- for instance, to specify that the axis ordering should
be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same
can be done for other coordinates, for instance:
("spherical", ("theta", "phi", "r")).
Examples
--------
Load a simple mesh consisting of two tets.
>>> # Coordinates for vertices of two tetrahedra
>>> coordinates = np.array(
... [
... [0.0, 0.0, 0.5],
... [0.0, 1.0, 0.5],
... [0.5, 1, 0.5],
... [0.5, 0.5, 0.0],
... [0.5, 0.5, 1.0],
... ]
... )
>>> # The indices in the coordinates array of mesh vertices.
>>> # This mesh has two elements.
>>> connectivity = np.array([[0, 1, 2, 4], [0, 1, 2, 3]])
>>> # Field data defined at the centers of the two mesh elements.
>>> elem_data = {("connect1", "elem_field"): np.array([1, 2])}
>>> # Field data defined at node vertices
>>> node_data = {
... ("connect1", "node_field"): np.array(
... [[0.0, 1.0, 2.0, 4.0], [0.0, 1.0, 2.0, 3.0]]
... )
... }
>>> ds = load_unstructured_mesh(
... connectivity, coordinates, elem_data=elem_data, node_data=node_data
... )
"""
from yt.frontends.exodus_ii.util import get_num_pseudo_dims
from yt.frontends.stream.data_structures import (
StreamDictFieldHandler,
StreamHandler,
StreamUnstructuredMeshDataset,
)
from yt.frontends.stream.definitions import process_data, set_particle_types
dimensionality = coordinates.shape[1]
domain_dimensions = np.ones(3, "int32") * 2
nprocs = 1
if elem_data is None and node_data is None:
raise RuntimeError("No data supplied in load_unstructured_mesh.")
connectivity = list(always_iterable(connectivity, base_type=np.ndarray))
num_meshes = max(1, len(connectivity))
elem_data = list(always_iterable(elem_data, base_type=dict)) or [{}] * num_meshes
node_data = list(always_iterable(node_data, base_type=dict)) or [{}] * num_meshes
data = [{} for i in range(num_meshes)]
for elem_dict, data_dict in zip(elem_data, data):
for field, values in elem_dict.items():
data_dict[field] = values
for node_dict, data_dict in zip(node_data, data):
for field, values in node_dict.items():
data_dict[field] = values
if bbox is None:
bbox = [
[
coordinates[:, i].min() - 0.1 * abs(coordinates[:, i].min()),
coordinates[:, i].max() + 0.1 * abs(coordinates[:, i].max()),
]
for i in range(dimensionality)
]
if dimensionality < 3:
bbox.append([0.0, 1.0])
if dimensionality < 2:
bbox.append([0.0, 1.0])
# handle pseudo-dims here
num_pseudo_dims = get_num_pseudo_dims(coordinates)
dimensionality -= num_pseudo_dims
for i in range(dimensionality, 3):
bbox[i][0] = 0.0
bbox[i][1] = 1.0
bbox = np.array(bbox, dtype=np.float64)
domain_left_edge = np.array(bbox[:, 0], "float64")
domain_right_edge = np.array(bbox[:, 1], "float64")
grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1))
field_units = {}
particle_types = {}
sfh = StreamDictFieldHandler()
sfh.update({"connectivity": connectivity, "coordinates": coordinates})
for i, d in enumerate(data):
_f_unit, _data, _ = process_data(d)
field_units.update(_f_unit)
sfh[i] = _data
particle_types.update(set_particle_types(d))
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
if length_unit is None:
length_unit = "code_length"
if mass_unit is None:
mass_unit = "code_mass"
if time_unit is None:
time_unit = "code_time"
if velocity_unit is None:
velocity_unit = "code_velocity"
if magnetic_unit is None:
magnetic_unit = "code_magnetic"
# I'm not sure we need any of this.
handler = StreamHandler(
grid_left_edges,
grid_right_edges,
grid_dimensions,
grid_levels,
-np.ones(nprocs, dtype="int64"),
np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary
np.zeros(nprocs).reshape((nprocs, 1)),
sfh,
field_units,
(length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
particle_types=particle_types,
periodicity=periodicity,
)
handler.name = "UnstructuredMeshData"
handler.domain_left_edge = domain_left_edge
handler.domain_right_edge = domain_right_edge
handler.refine_by = 2
handler.dimensionality = dimensionality
handler.domain_dimensions = domain_dimensions
handler.simulation_time = sim_time
handler.cosmology_simulation = 0
sds = StreamUnstructuredMeshDataset(
handler, geometry=geometry, unit_system=unit_system
)
fluid_types = ["all"]
for i in range(1, num_meshes + 1):
fluid_types += ["connect%d" % i]
sds.fluid_types = tuple(fluid_types)
def flatten(l):
return [item for sublist in l for item in sublist]
sds._node_fields = flatten([[f[1] for f in m] for m in node_data if m])
sds._elem_fields = flatten([[f[1] for f in m] for m in elem_data if m])
sds.default_field = [f for f in sds.field_list if f[0] == "connect1"][-1]
sds.default_fluid_type = sds.default_field[0]
return sds
# --- Loader for yt sample datasets ---
def load_sample(
fn: Optional[str] = None, *, progressbar: bool = True, timeout=None, **kwargs
):
r"""
Load sample data with yt.
This is a simple wrapper around :func:`~yt.loaders.load` to include fetching
data with pooch from remote source.
The data registry table can be retrieved and visualized using
:func:`~yt.sample_data.api.get_data_registry_table`.
The `filename` column contains usable keys that can be passed
as the first positional argument to load_sample.
Some data samples contain series of datasets. It may be required to
supply the relative path to a specific dataset.
Parameters
----------
fn: str
The `filename` of the dataset to load, as defined in the data registry
table.
progressbar: bool
display a progress bar (tqdm).
timeout: float or int (optional)
Maximal waiting time, in seconds, after which download is aborted.
`None` means "no limit". This parameter is directly passed to down to
requests.get via pooch.HTTPDownloader
Notes
-----
- This function is experimental as of yt 4.0.0, do not rely on its exact behaviour.
- Any additional keyword argument is passed down to :func:`~yt.loaders.load`.
- In case of collision with predefined keyword arguments as set in
the data registry, the ones passed to this function take priority.
- Datasets with slashes '/' in their names can safely be used even on Windows.
On the contrary, paths using backslashes '\' won't work outside of Windows, so
it is recommended to favour the UNIX convention ('/') in scripts that are meant
to be cross-platform.
- This function requires pandas and pooch.
- Corresponding sample data live at https://yt-project.org/data
"""
if fn is None:
print(
"One can see which sample datasets are available at: https://yt-project.org/data\n"
"or alternatively by running: yt.sample_data.api.get_data_registry_table()",
file=sys.stderr,
)
return None
from yt.sample_data.api import (
_download_sample_data_file,
_get_test_data_dir_path,
get_data_registry_table,
)
pooch_logger = pooch.utils.get_logger()
# normalize path for platform portability
# for consistency with yt.load, we also convert to str explicitly,
# which gives us support Path objects for free
fn = str(fn).replace("/", os.path.sep)
topdir, _, specific_file = fn.partition(os.path.sep)
registry_table = get_data_registry_table()
known_names: List[str] = registry_table.dropna()["filename"].to_list()
if topdir not in known_names:
msg = f"'{topdir}' is not an available dataset."
lexical_distances: List[Tuple[str, int]] = [
(name, levenshtein_distance(name, topdir)) for name in known_names
]
suggestions: List[str] = [name for name, dist in lexical_distances if dist < 4]
if len(suggestions) == 1:
msg += f" Did you mean '{suggestions[0]}' ?"
elif suggestions:
msg += " Did you mean to type any of the following ?\n\n "
msg += "\n ".join(f"'{_}'" for _ in suggestions)
raise ValueError(msg)
# PR 3089
# note: in the future the registry table should be reindexed
# so that the following line can be replaced with
#
# specs = registry_table.loc[fn]
#
# however we don't want to do it right now because the "filename" column is
# currently incomplete
specs = registry_table.query(f"`filename` == '{topdir}'").iloc[0]
load_name = specific_file or specs["load_name"] or ""
if not isinstance(specs["load_kwargs"], dict):
raise ValueError(
"The requested dataset seems to be improperly registered.\n"
"Tip: the entry in yt/sample_data_registry.json may be inconsistent with "
"https://github.com/yt-project/website/blob/master/data/datafiles.json\n"
"Please report this to https://github.com/yt-project/yt/issues/new"
)
kwargs = {**specs["load_kwargs"], **kwargs}
save_dir = _get_test_data_dir_path()
data_path = save_dir.joinpath(fn)
if save_dir.joinpath(topdir).exists():
# if the data is already available locally, `load_sample`
# only acts as a thin wrapper around `load`
if load_name and os.sep not in fn:
data_path = data_path.joinpath(load_name)
mylog.info("Sample dataset found in '%s'", data_path)
if timeout is not None:
mylog.info("Ignoring the `timeout` keyword argument received.")
return load(data_path, **kwargs)
mylog.info("'%s' is not available locally. Looking up online.", fn)
# effectively silence the pooch's logger and create our own log instead
pooch_logger.setLevel(100)
mylog.info("Downloading from %s", specs["url"])
# downloading via a pooch.Pooch instance behind the scenes
filename = urlsplit(specs["url"]).path.split("/")[-1]
tmp_file = _download_sample_data_file(
filename, progressbar=progressbar, timeout=timeout
)
# pooch has functionalities to unpack downloaded archive files,
# but it needs to be told in advance that we are downloading a tarball.
# Since that information is not necessarily trivial to guess from the filename,
# we rely on the standard library to perform a conditional unpacking instead.
if tarfile.is_tarfile(tmp_file):
mylog.info("Untaring downloaded file to '%s'", save_dir)
with tarfile.open(tmp_file) as fh:
fh.extractall(save_dir)
os.remove(tmp_file)
else:
os.replace(tmp_file, save_dir)
loadable_path = Path.joinpath(save_dir, fn)
if load_name not in str(loadable_path):
loadable_path = loadable_path.joinpath(load_name, specific_file)
return load(loadable_path, **kwargs)
def _mount_helper(
archive: str, mountPoint: str, ratarmount_kwa: Dict, conn: Connection
):
try:
fuseOperationsObject = ratarmount.TarMount(
pathToMount=archive,
mountPoint=mountPoint,
lazyMounting=True,
**ratarmount_kwa,
)
fuseOperationsObject.use_ns = True
conn.send(True)
except Exception:
conn.send(False)
raise
ratarmount.fuse.FUSE(
operations=fuseOperationsObject,
mountpoint=mountPoint,
foreground=True,
nothreads=True,
)
# --- Loader for tar-based datasets ---
def load_archive(
fn: Union[str, Path],
path: str,
ratarmount_kwa: Optional[Dict] = None,
mount_timeout: float = 1.0,
*args,
**kwargs,
) -> Dataset:
r"""
Load archived data with yt.
This is a wrapper around :func:`~yt.loaders.load` to include mounting
and unmounting the archive as a read-only filesystem and load it.
Parameters
----------
fn: str
The `filename` of the archive containing the dataset.
path: str
The path to the dataset in the archive.
ratarmount_kwa: dict, optional
Optional parameters to pass to ratarmount to mount the archive.
mount_timeout: float, optional
The timeout to wait for ratarmount to mount the archive. Default is 1s.
Notes
-----
- The function is experimental and may work or not depending on your setup.
- Any additional keyword argument is passed down to :func:`~yt.loaders.load`.
- This function requires ratarmount to be installed.
- This function does not work on Windows system.
"""
warnings.warn(
"The 'load_archive' function is still experimental and may be unstable."
)
fn = os.path.expanduser(fn)
# This will raise FileNotFoundError if the path isn't matched
# either in the current dir or yt.config.ytcfg['data_dir_directory']
if not fn.startswith("http"):
fn = str(lookup_on_disk_data(fn))
if ratarmount_kwa is None:
ratarmount_kwa = {}
try:
tarfile.open(fn)
except tarfile.ReadError:
raise YTUnidentifiedDataType(fn, *args, **kwargs)
# Note: the temporary directory will be created by ratarmount
tempdir = fn + ".mount"
tempdir_base = tempdir
i = 0
while os.path.exists(tempdir):
i += 1
tempdir = f"{tempdir_base}.{i}"
parent_conn, child_conn = Pipe()
proc = Process(target=_mount_helper, args=(fn, tempdir, ratarmount_kwa, child_conn))
proc.start()
if not parent_conn.recv():
raise MountError(f"An error occured while mounting {fn} in {tempdir}")
# Note: the mounting needs to happen in another process which
# needs be run in the foreground (otherwise it may
# unmount). To prevent a race-condition here, we wait
# for the folder to be mounted within a reasonable time.
t = 0.0
while t < mount_timeout:
if os.path.ismount(tempdir):
break
time.sleep(0.1)
t += 0.1
else:
raise MountError(f"Folder {tempdir} does not appear to be mounted")
# We need to kill the process at exit (to force unmounting)
def umount_callback():
proc.terminate()
atexit.register(umount_callback)
# Alternatively, can dismount manually
def del_callback(self):
proc.terminate()
atexit.unregister(umount_callback)
ds = load(os.path.join(tempdir, path), *args, **kwargs)
ds.dismount = types.MethodType(del_callback, ds)
return ds
|
main_gui.py
|
from tkinter import (
FALSE,
Button,
E,
Label,
Menu,
N,
S,
StringVar,
Tk,
W,
filedialog,
messagebox,
ttk,
)
import threading
from memory_reader import MemoryReader
reader = MemoryReader()
reader.getProcess()
proc_open = False
def read_config():
config = reader.readConfig()
if config is not None:
print("Config Values: ")
for b in config.buttons:
print(hex(b))
def wait_for_process():
global reader, proc_open
while True:
if proc_open != reader.hasWorkingPID():
proc_open = not proc_open
if proc_open:
loadstatus.set("+R Open!")
else:
loadstatus.set("+R Closed!")
# define ui-activated methods here
def import_recording(*args):
filename = filedialog.askopenfilename(
filetypes=[["AC+R Recording", ".acrec"]],
title="Import Recording",
defaultextension=".acrec",
)
if filename != "":
yesno_answer = messagebox.askyesno(
message="Are you sure you want to overwrite the current ingame recording?",
icon="warning",
title="Overwrite?",
)
if yesno_answer:
messagebox.showinfo("You hit yes.", "RIP old recording.")
loadstatus.set("Current recording: {}".format(filename))
# do what you will with the filename and its data
elif not yesno_answer:
messagebox.showinfo("You hit no.", "Save the recordings!")
def export_recording(*args):
filename = filedialog.asksaveasfilename(
filetypes=[["AC+R Recording", ".acrec"]],
confirmoverwrite=True,
title="Export Recording",
defaultextension=".acrec",
)
if filename != "":
# write data to file
messagebox.showinfo(
title="Export Successful",
message="Exported current recording to {}".format(filename),
)
root = Tk()
root.title("+R Recording Manager")
root.option_add("*tearOff", FALSE)
loadstatus = StringVar()
loadstatus.set(
"+R Closed!"
) # may want to adjust based on game open-ness among other things
root.geometry("400x400+200+200")
menubar = Menu(root)
root["menu"] = menubar
menu_file = Menu(menubar)
menubar.add_cascade(menu=menu_file, label="File")
menu_file.add_command(label="Import...", command=import_recording)
menu_file.add_command(label="Export", command=export_recording)
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
root.rowconfigure(0, weight=1)
root.columnconfigure(0, weight=1)
Button(mainframe, text="Read Config", command=read_config).grid(
column=1, row=2, sticky=(N, W, E)
)
Label(mainframe, textvariable=loadstatus).grid(column=1, row=1, sticky=(N, W, E))
proc_status_t = threading.Thread(target=wait_for_process)
proc_status_t.daemon = True
proc_status_t.start()
root.mainloop()
# anything that executes after mainloop is post-termination
|
Sender.py
|
#!/usr/bin/python
# -*- coding: ascii -*-
# $Id: bCNC.py,v 1.6 2014/10/15 15:04:48 bnv Exp bnv $
#
# Author: vvlachoudis@gmail.com
# Date: 17-Jun-2015
__author__ = "Vasilis Vlachoudis"
__email__ = "vvlachoudis@gmail.com"
import os
import re
import sys
import rexx
import time
import threading
import webbrowser
from datetime import datetime
try:
import serial
except:
serial = None
try:
from Queue import *
except ImportError:
from queue import *
from CNC import WAIT, MSG, UPDATE, WCS, CNC, GCode
import Utils
import Pendant
WIKI = "https://github.com/vlachoudis/bCNC/wiki"
SERIAL_POLL = 0.125 # s
SERIAL_TIMEOUT = 0.10 # s
G_POLL = 10 # s
RX_BUFFER_SIZE = 128
OV_FEED_100 = chr(0x90) # Extended override commands
OV_FEED_i10 = chr(0x91)
OV_FEED_d10 = chr(0x92)
OV_FEED_i1 = chr(0x93)
OV_FEED_d1 = chr(0x94)
OV_RAPID_100 = chr(0x95)
OV_RAPID_50 = chr(0x96)
OV_RAPID_25 = chr(0x97)
OV_SPINDLE_100 = chr(0x99)
OV_SPINDLE_i10 = chr(0x9A)
OV_SPINDLE_d10 = chr(0x9B)
OV_SPINDLE_i1 = chr(0x9C)
OV_SPINDLE_d1 = chr(0x9D)
OV_SPINDLE_STOP = chr(0x9E)
OV_FLOOD_TOGGLE = chr(0xA0)
OV_MIST_TOGGLE = chr(0xA1)
GPAT = re.compile(r"[A-Za-z]\d+.*")
STATUSPAT = re.compile(r"^<(\w*?),MPos:([+\-]?\d*\.\d*),([+\-]?\d*\.\d*),([+\-]?\d*\.\d*),WPos:([+\-]?\d*\.\d*),([+\-]?\d*\.\d*),([+\-]?\d*\.\d*),?(.*)>$")
POSPAT = re.compile(r"^\[(...):([+\-]?\d*\.\d*),([+\-]?\d*\.\d*),([+\-]?\d*\.\d*):?(\d*)\]$")
TLOPAT = re.compile(r"^\[(...):([+\-]?\d*\.\d*)\]$")
DOLLARPAT = re.compile(r"^\[G\d* .*\]$")
FEEDPAT = re.compile(r"^(.*)[fF](\d+\.?\d+)(.*)$")
SPLITPAT = re.compile(r"[:,]")
VARPAT = re.compile(r"^\$(\d+)=(\d*\.?\d*) *\(?.*")
CONNECTED = "Connected"
NOT_CONNECTED = "Not connected"
STATECOLOR = { "Alarm" : "Red",
"Run" : "LightGreen",
"Hold" : "Orange",
"Hold:0" : "Orange",
"Hold:1" : "Orange",
CONNECTED : "Orange",
NOT_CONNECTED : "OrangeRed"
}
STATE_DESC = {
"Hold:0" : "Hold complete",
"Hold:1" : "Hold in-progress",
"Door:0" : "Door closed",
"Door:1" : "Machine stopped",
"Door:2" : "Door opened",
"Door:3" : "Door closed and resuming",
}
STATECOLORDEF = "LightYellow"
# From https://github.com/grbl/grbl/wiki/Interfacing-with-Grbl
ERROR_CODES = {
"Run" : _("bCNC is currently sending a gcode program to Grbl"),
"Idle" : _("Grbl is in idle state and waiting for user commands"),
"Hold" : _("Grbl is on hold state. Click on resume (pause) to continue"),
"Alarm" : _("Alarm is an emergency state. Something has gone terribly wrong when these occur. Typically, they are caused by limit error when the machine has moved or wants to move outside the machine space and crash into something. They also report problems if Grbl is lost and can't guarantee positioning or a probe command has failed. Once in alarm-mode, Grbl will lock out and shut down everything until the user issues a reset. Even after a reset, Grbl will remain in alarm-mode, block all G-code from being executed, but allows the user to override the alarm manually. This is to ensure the user knows and acknowledges the problem and has taken steps to fix or account for it."),
NOT_CONNECTED : _("Grbl is not connected. Please specify the correct port and click Open."),
CONNECTED : _("Connection is established with Grbl"),
"ok" : _("All is good! Everything in the last line was understood by Grbl and was successfully processed and executed."),
"error:1" : _("G-code words consist of a letter and a value. Letter was not found."),
"error:2" : _("Numeric value format is not valid or missing an expected value."),
"error:3" : _("Grbl '$' system command was not recognized or supported."),
"error:4" : _("Negative value received for an expected positive value."),
"error:5" : _("Homing cycle is not enabled via settings."),
"error:6" : _("Minimum step pulse time must be greater than 3usec"),
"error:7" : _("EEPROM read failed. Reset and restored to default values."),
"error:8" : _("Grbl '$' command cannot be used unless Grbl is IDLE. Ensures smooth operation during a job."),
"error:9" : _("G-code locked out during alarm or jog state"),
"error:10" : _("Soft limits cannot be enabled without homing also enabled."),
"error:11" : _("Max characters per line exceeded. Line was not processed and executed."),
"error:12" : _("(Compile Option) Grbl '$' setting value exceeds the maximum step rate supported."),
"error:13" : _("Safety door detected as opened and door state initiated."),
"error:14" : _("(Grbl-Mega Only) Build info or startup line exceeded EEPROM line length limit."),
"error:15" : _("Jog target exceeds machine travel. Command ignored."),
"error:16" : _("Jog command with no '=' or contains prohibited g-code."),
"error:17" : _("Laser mode requires PWM output."),
"error:20" : _("Unsupported or invalid g-code command found in block."),
"error:21" : _("More than one g-code command from same modal group found in block."),
"error:22" : _("Feed rate has not yet been set or is undefined."),
"error:23" : _("G-code command in block requires an integer value."),
"error:24" : _("Two G-code commands that both require the use of the XYZ axis words were detected in the block."),
"error:25" : _("A G-code word was repeated in the block."),
"error:26" : _("A G-code command implicitly or explicitly requires XYZ axis words in the block, but none were detected."),
"error:27" : _("N line number value is not within the valid range of 1 - 9,999,999."),
"error:28" : _("A G-code command was sent, but is missing some required P or L value words in the line."),
"error:29" : _("Grbl supports six work coordinate systems G54-G59. G59.1, G59.2, and G59.3 are not supported."),
"error:30" : _("The G53 G-code command requires either a G0 seek or G1 feed motion mode to be active. A different motion was active."),
"error:31" : _("There are unused axis words in the block and G80 motion mode cancel is active."),
"error:32" : _("A G2 or G3 arc was commanded but there are no XYZ axis words in the selected plane to trace the arc."),
"error:33" : _("The motion command has an invalid target. G2, G3, and G38.2 generates this error, if the arc is impossible to generate or if the probe target is the current position."),
"error:34" : _("A G2 or G3 arc, traced with the radius definition, had a mathematical error when computing the arc geometry. Try either breaking up the arc into semi-circles or quadrants, or redefine them with the arc offset definition."),
"error:35" : _("A G2 or G3 arc, traced with the offset definition, is missing the IJK offset word in the selected plane to trace the arc."),
"error:36" : _("There are unused, leftover G-code words that aren't used by any command in the block."),
"error:37" : _("The G43.1 dynamic tool length offset command cannot apply an offset to an axis other than its configured axis. The Grbl default axis is the Z-axis."),
"ALARM:1" : _("Hard limit triggered. Machine position is likely lost due to sudden and immediate halt. Re-homing is highly recommended."),
"ALARM:2" : _("G-code motion target exceeds machine travel. Machine position safely retained. Alarm may be unlocked."),
"ALARM:3" : _("Reset while in motion. Grbl cannot guarantee position. Lost steps are likely. Re-homing is highly recommended."),
"ALARM:4" : _("Probe fail. The probe is not in the expected initial state before starting probe cycle, where G38.2 and G38.3 is not triggered and G38.4 and G38.5 is triggered."),
"ALARM:5" : _("Probe fail. Probe did not contact the workpiece within the programmed travel for G38.2 and G38.4."),
"ALARM:6" : _("Homing fail. Reset during active homing cycle."),
"ALARM:7" : _("Homing fail. Safety door was opened during active homing cycle."),
"ALARM:8" : _("Homing fail. Cycle failed to clear limit switch when pulling off. Try increasing pull-off setting or check wiring."),
"ALARM:9" : _("Homing fail. Could not find limit switch within search distance. Defined as 1.5 * max_travel on search and 5 * pulloff on locate phases."),
"Hold:0" : _("Hold complete. Ready to resume."),
"Hold:1" : _("Hold in-progress. Reset will throw an alarm."),
"Door:0" : _("Door closed. Ready to resume."),
"Door:1" : _("Machine stopped. Door still ajar. Can't resume until closed."),
"Door:2" : _("Door opened. Hold (or parking retract) in-progress. Reset will throw an alarm."),
"Door:3" : _("Door closed and resuming. Restoring from park, if applicable. Reset will throw an alarm."),
}
# Convert Grbl V1.0 codes to Grbl V0.9
for e1,e0 in ( ("error: Expected command letter", "error:1"),
("error: Bad number format", "error:2"),
("error: Invalid statement", "error:3"),
("error: Value < 0", "error:4"),
("error: Setting disabled", "error:5"),
("error: Value < 3 usec", "error:6"),
("error: EEPROM read fail. Using defaults", "error:7"),
("error: Not idle", "error:8"),
("error: G-code lock", "error:9"),
("error: Homing not enabled", "error:10"),
("error: Line overflow", "error:11"),
("error: Step rate > 30kHz*", "error:12"),
("error: Check Door", "error:13"),
("error: Line length exceeded", "error:14"),
("error: Travel exceeded", "error:15"),
("error: Invalid jog command", "error:16"),
("error: Unsupported command", "error:20"),
("error: Modal group violation", "error:21"),
("error: Undefined feed rate", "error:22"),
("error: Invalid gcode ID:23", "error:23"),
("error: Invalid gcode ID:24", "error:24"),
("error: Invalid gcode ID:25", "error:25"),
("error: Invalid gcode ID:26", "error:26"),
("error: Invalid gcode ID:27", "error:27"),
("error: Invalid gcode ID:28", "error:28"),
("error: Invalid gcode ID:29", "error:29"),
("error: Invalid gcode ID:30", "error:30"),
("error: Invalid gcode ID:31", "error:31"),
("error: Invalid gcode ID:32", "error:32"),
("error: Invalid gcode ID:33", "error:33"),
("error: Invalid gcode ID:34", "error:34"),
("error: Invalid gcode ID:35", "error:35"),
("error: Invalid gcode ID:36", "error:36"),
("error: Invalid gcode ID:37", "error:37"),
("ALARM: Hard limit", "ALARM:1"),
("ALARM: Soft limit", "ALARM:2"),
("ALARM: Abort during cycle", "ALARM:3"),
("ALARM: Probe fail", "ALARM:4"),
("ALARM: Probe fail", "ALARM:5"),
("ALARM: Homing fail", "ALARM:6"),
("ALARM: Homing fail", "ALARM:7"),
("ALARM: Homing fail", "ALARM:8"),
("ALARM: Homing fail", "ALARM:9") ):
ERROR_CODES[e1] = ERROR_CODES[e0]
#==============================================================================
# bCNC Sender class
#==============================================================================
class Sender:
# Messages types for log Queue
MSG_BUFFER = 0 # write to buffer one command
MSG_SEND = 1 # send message
MSG_RECEIVE = 2 # receive message from controller
MSG_OK = 3 # ok response from controller, move top most command to terminal
MSG_ERROR = 4 # error message or exception
MSG_RUNEND = 5 # run ended
MSG_CLEAR = 6 # clear buffer
def __init__(self):
# Global variables
self.history = []
self._historyPos = None
CNC.loadConfig(Utils.config)
self.gcode = GCode()
self.cnc = self.gcode.cnc
self.log = Queue() # Log queue returned from GRBL
self.queue = Queue() # Command queue to be send to GRBL
self.pendant = Queue() # Command queue to be executed from Pendant
self.serial = None
self.thread = None
self.controller = Utils.CONTROLLER["Grbl"]
self._posUpdate = False # Update position
self._probeUpdate= False # Update probe
self._gUpdate = False # Update $G
self._update = None # Generic update
self.running = False
self._runLines = 0
self._quit = 0 # Quit counter to exit program
self._stop = False # Raise to stop current run
self._pause = False # machine is on Hold
self._alarm = True # Display alarm message if true
self._msg = None
self._sumcline = 0
self._lastFeed = 0
self._newFeed = 0
self._onStart = ""
self._onStop = ""
#----------------------------------------------------------------------
def quit(self, event=None):
self.saveConfig()
Pendant.stop()
#----------------------------------------------------------------------
def loadConfig(self):
self.controller = Utils.CONTROLLER.get(Utils.getStr("Connection", "controller"), Utils.GRBL0)
Pendant.port = Utils.getInt("Connection","pendantport",Pendant.port)
GCode.LOOP_MERGE = Utils.getBool("File","dxfloopmerge")
self.loadHistory()
#----------------------------------------------------------------------
def saveConfig(self):
self.saveHistory()
#----------------------------------------------------------------------
def loadHistory(self):
try:
f = open(Utils.hisFile,"r")
except:
return
self.history = [x.strip() for x in f]
f.close()
#----------------------------------------------------------------------
def saveHistory(self):
try:
f = open(Utils.hisFile,"w")
except:
return
f.write("\n".join(self.history))
f.close()
#----------------------------------------------------------------------
# Evaluate a line for possible expressions
# can return a python exception, needs to be catched
#----------------------------------------------------------------------
def evaluate(self, line):
return self.gcode.evaluate(CNC.compileLine(line,True))
#----------------------------------------------------------------------
# Execute a line as gcode if pattern matches
# @return True on success
# False otherwise
#----------------------------------------------------------------------
def executeGcode(self, line):
if isinstance(line, tuple) or \
line[0] in ("$","!","~","?","(","@") or GPAT.match(line):
self.sendGCode(line)
return True
return False
#----------------------------------------------------------------------
# Execute a single command
#----------------------------------------------------------------------
def executeCommand(self, line):
#print
#print "<<<",line
#try:
# line = self.gcode.evaluate(CNC.compileLine(line,True))
#except:
# return "Evaluation error", sys.exc_info()[1]
#print ">>>",line
if line is None: return
oline = line.strip()
line = oline.replace(","," ").split()
cmd = line[0].upper()
# ABS*OLUTE: Set absolute coordinates
if rexx.abbrev("ABSOLUTE",cmd,3):
self.sendGCode("G90")
# HELP: open browser to display help
elif cmd == "HELP":
self.help()
# HOME: perform a homing cycle
elif cmd == "HOME":
self.home()
# LO*AD [filename]: load filename containing g-code
elif rexx.abbrev("LOAD",cmd,2):
self.load(line[1])
# OPEN: open serial connection to grbl
# CLOSE: close serial connection to grbl
elif cmd in ("OPEN","CLOSE"):
self.openClose()
# QU*IT: quit program
# EX*IT: exit program
elif rexx.abbrev("QUIT",cmd,2) or rexx.abbrev("EXIT",cmd,2):
self.quit()
# PAUSE: pause cycle
elif cmd == "PAUSE":
self.pause()
# RESUME: resume
elif cmd == "RESUME":
self.resume()
# FEEDHOLD: feedhold
elif cmd == "FEEDHOLD":
self.feedHold()
# REL*ATIVE: switch to relative coordinates
elif rexx.abbrev("RELATIVE",cmd,3):
self.sendGCode("G91")
# RESET: perform a soft reset to grbl
elif cmd == "RESET":
self.softReset()
# RUN: run g-code
elif cmd == "RUN":
self.run()
# SAFE [z]: safe z to move
elif cmd=="SAFE":
try: CNC.vars["safe"] = float(line[1])
except: pass
self.statusbar["text"] = "Safe Z= %g"%(CNC.vars["safe"])
# SA*VE [filename]: save to filename or to default name
elif rexx.abbrev("SAVE",cmd,2):
if len(line)>1:
self.save(line[1])
else:
self.saveAll()
# SENDHEX: send a hex-char in grbl
elif cmd == "SENDHEX":
self.sendHex(line[1])
# SET [x [y [z]]]: set x,y,z coordinates to current workspace
elif cmd == "SET":
try: x = float(line[1])
except: x = None
try: y = float(line[2])
except: y = None
try: z = float(line[3])
except: z = None
self._wcsSet(x,y,z)
elif cmd == "SET0":
self._wcsSet(0.,0.,0.)
elif cmd == "SETX":
try: x = float(line[1])
except: x = ""
self._wcsSet(x,None,None)
elif cmd == "SETY":
try: y = float(line[1])
except: y = ""
self._wcsSet(None,y,None)
elif cmd == "SETZ":
try: z = float(line[1])
except: z = ""
self._wcsSet(None,None,z)
# STOP: stop current run
elif cmd == "STOP":
self.stopRun()
# UNL*OCK: unlock grbl
elif rexx.abbrev("UNLOCK",cmd,3):
self.unlock()
# Send commands to SMOOTHIE
elif self.controller == Utils.SMOOTHIE:
if line[0] in ( "help", "version", "mem", "ls",
"cd", "pwd", "cat", "rm", "mv",
"remount", "play", "progress", "abort",
"reset", "dfu", "break", "config-get",
"config-set", "get", "set_temp", "get",
"get", "net", "load", "save", "upload",
"calc_thermistor", "thermistors", "md5sum"):
self.serial.write(oline+"\n")
else:
return _("unknown command"),_("Invalid command %s")%(oline)
#----------------------------------------------------------------------
def help(self, event=None):
webbrowser.open(WIKI,new=2)
#----------------------------------------------------------------------
def loadRecent(self, recent):
filename = Utils.getRecent(recent)
if filename is None: return
self.load(filename)
#----------------------------------------------------------------------
def _loadRecent0(self,event): self.loadRecent(0)
def _loadRecent1(self,event): self.loadRecent(1)
def _loadRecent2(self,event): self.loadRecent(2)
def _loadRecent3(self,event): self.loadRecent(3)
def _loadRecent4(self,event): self.loadRecent(4)
def _loadRecent5(self,event): self.loadRecent(5)
def _loadRecent6(self,event): self.loadRecent(6)
def _loadRecent7(self,event): self.loadRecent(7)
def _loadRecent8(self,event): self.loadRecent(8)
def _loadRecent9(self,event): self.loadRecent(9)
#----------------------------------------------------------------------
def _saveConfigFile(self, filename=None):
if filename is None:
filename = self.gcode.filename
Utils.setUtf("File", "dir", os.path.dirname(os.path.abspath(filename)))
Utils.setUtf("File", "file", os.path.basename(filename))
Utils.setUtf("File", "probe", os.path.basename(self.gcode.probe.filename))
#----------------------------------------------------------------------
# Load a file into editor
#----------------------------------------------------------------------
def load(self, filename):
fn,ext = os.path.splitext(filename)
ext = ext.lower()
if ext==".probe":
if filename is not None:
self.gcode.probe.filename = filename
self._saveConfigFile()
self.gcode.probe.load(filename)
elif ext == ".orient":
# save orientation file
self.gcode.orient.load(filename)
elif ext == ".stl":
# FIXME: implements solid import???
pass
elif ext==".dxf":
self.gcode.init()
self.gcode.importDXF(filename)
self._saveConfigFile(filename)
else:
self.gcode.load(filename)
self._saveConfigFile()
Utils.addRecent(filename)
#----------------------------------------------------------------------
def save(self, filename):
fn,ext = os.path.splitext(filename)
ext = ext.lower()
if ext == ".probe":
# save probe
if filename is not None:
self.gcode.probe.filename = filename
self._saveConfigFile()
if not self.gcode.probe.isEmpty():
self.gcode.probe.save()
elif ext == ".orient":
# save orientation file
self.gcode.orient.save(filename)
elif ext == ".stl":
#save probe as STL
self.gcode.probe.saveAsSTL(filename)
elif ext == ".dxf":
return self.gcode.saveDXF(filename)
elif ext == ".txt":
#save gcode as txt (only enable blocks and no bCNC metadata)
return self.gcode.saveTXT(filename)
else:
if filename is not None:
self.gcode.filename = filename
self._saveConfigFile()
Utils.addRecent(self.gcode.filename)
return self.gcode.save()
#----------------------------------------------------------------------
def saveAll(self, event=None):
if self.gcode.filename:
self.save(self.gcode.filename)
if self.gcode.probe.filename:
self.save(self.gcode.probe.filename)
return "break"
#----------------------------------------------------------------------
# Open serial port
#----------------------------------------------------------------------
def open(self, device, baudrate):
#self.serial = serial.Serial(
self.serial = serial.serial_for_url(
device,
baudrate,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=SERIAL_TIMEOUT,
xonxoff=False,
rtscts=False)
# Toggle DTR to reset Arduino
try:
self.serial.setDTR(0)
except IOError:
pass
time.sleep(1)
CNC.vars["state"] = CONNECTED
CNC.vars["color"] = STATECOLOR[CNC.vars["state"]]
#self.state.config(text=CNC.vars["state"],
# background=CNC.vars["color"])
# toss any data already received, see
# http://pyserial.sourceforge.net/pyserial_api.html#serial.Serial.flushInput
self.serial.flushInput()
try:
self.serial.setDTR(1)
except IOError:
pass
time.sleep(1)
self.serial.write(b"\n\n")
self._gcount = 0
self._alarm = True
self.thread = threading.Thread(target=self.serialIO)
self.thread.start()
return True
#----------------------------------------------------------------------
# Close serial port
#----------------------------------------------------------------------
def close(self):
if self.serial is None: return
try:
self.stopRun()
except:
pass
self._runLines = 0
self.thread = None
time.sleep(1)
try:
self.serial.close()
except:
pass
self.serial = None
CNC.vars["state"] = NOT_CONNECTED
CNC.vars["color"] = STATECOLOR[CNC.vars["state"]]
#----------------------------------------------------------------------
# Send to controller a gcode or command
# WARNING: it has to be a single line!
#----------------------------------------------------------------------
def sendGCode(self, cmd):
if self.serial and not self.running:
if isinstance(cmd,tuple):
self.queue.put(cmd)
else:
self.queue.put(cmd+"\n")
#----------------------------------------------------------------------
def sendHex(self, hexcode):
if self.serial is None: return
self.serial.write(chr(int(hexcode,16)))
self.serial.flush()
#----------------------------------------------------------------------
def hardReset(self):
self.busy()
if self.serial is not None:
if self.controller == Utils.SMOOTHIE:
self.serial.write(b"reset\n")
self.openClose()
if self.controller == Utils.SMOOTHIE:
time.sleep(6)
self.openClose()
self.stopProbe()
self._alarm = False
CNC.vars["_OvChanged"] = True # force a feed change if any
self.notBusy()
#----------------------------------------------------------------------
def softReset(self, clearAlarm=True):
if self.serial:
# if self.controller in (Utils.GRBL, Utils.GRBL1):
self.serial.write(b"\030")
# elif self.controller == Utils.SMOOTHIE:
# self.serial.write(b"reset\n")
self.stopProbe()
if clearAlarm: self._alarm = False
CNC.vars["_OvChanged"] = True # force a feed change if any
#----------------------------------------------------------------------
def unlock(self, clearAlarm=True):
if clearAlarm: self._alarm = False
self.sendGCode("$X")
#----------------------------------------------------------------------
def home(self, event=None):
self._alarm = False
self.sendGCode("$H")
#----------------------------------------------------------------------
def viewSettings(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$$")
def viewParameters(self):
self.sendGCode("$#")
def viewState(self):
self.sendGCode("$G")
def viewBuild(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$I")
elif self.controller == Utils.SMOOTHIE:
self.serial.write(b"version\n")
def viewStartup(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$N")
def checkGcode(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$C")
def grblHelp(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$")
elif self.controller == Utils.SMOOTHIE:
self.serial.write(b"help\n")
def grblRestoreSettings(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$RST=$")
def grblRestoreWCS(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$RST=#")
def grblRestoreAll(self):
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.sendGCode("$RST=#")
#----------------------------------------------------------------------
def goto(self, x=None, y=None, z=None):
cmd = "G90G0"
if x is not None: cmd += "X%g"%(x)
if y is not None: cmd += "Y%g"%(y)
if z is not None: cmd += "Z%g"%(z)
self.sendGCode("%s"%(cmd))
#----------------------------------------------------------------------
# FIXME Duplicate with ControlPage
#----------------------------------------------------------------------
def _wcsSet(self, x, y, z):
p = WCS.index(CNC.vars["WCS"])
if p<6:
cmd = "G10L20P%d"%(p+1)
elif p==6:
cmd = "G28.1"
elif p==7:
cmd = "G30.1"
elif p==8:
cmd = "G92"
pos = ""
if x is not None and abs(x)<10000.0: pos += "X"+str(x)
if y is not None and abs(y)<10000.0: pos += "Y"+str(y)
if z is not None and abs(z)<10000.0: pos += "Z"+str(z)
cmd += pos
self.sendGCode(cmd)
self.sendGCode("$#")
self.event_generate("<<Status>>",
data=(_("Set workspace %s to %s")%(WCS[p],pos)))
#data=(_("Set workspace %s to %s")%(WCS[p],pos)).encode("utf8"))
self.event_generate("<<CanvasFocus>>")
#----------------------------------------------------------------------
def feedHold(self, event=None):
if event is not None and not self.acceptKey(True): return
if self.serial is None: return
self.serial.write(b"!")
self.serial.flush()
self._pause = True
#----------------------------------------------------------------------
def resume(self, event=None):
if event is not None and not self.acceptKey(True): return
if self.serial is None: return
self.serial.write(b"~")
self.serial.flush()
self._msg = None
self._alarm = False
self._pause = False
#----------------------------------------------------------------------
def pause(self, event=None):
if self.serial is None: return
if self._pause:
self.resume()
else:
self.feedHold()
#----------------------------------------------------------------------
# FIXME ????
#----------------------------------------------------------------------
def g28Command(self):
self.sendGCode("G28.1")
#----------------------------------------------------------------------
# FIXME ????
#----------------------------------------------------------------------
def g30Command(self):
self.sendGCode("G30.1")
#----------------------------------------------------------------------
def emptyQueue(self):
while self.queue.qsize()>0:
try:
self.queue.get_nowait()
except Empty:
break
#----------------------------------------------------------------------
def stopProbe(self):
if self.gcode.probe.start:
self.gcode.probe.clear()
#----------------------------------------------------------------------
def getBufferFill(self):
return self._sumcline * 100. / RX_BUFFER_SIZE
#----------------------------------------------------------------------
def initRun(self):
self._quit = 0
self._pause = False
self._paths = None
self.running = True
self.disable()
self.emptyQueue()
time.sleep(1)
#----------------------------------------------------------------------
# Called when run is finished
#----------------------------------------------------------------------
def runEnded(self):
if self.running:
self.log.put((Sender.MSG_RUNEND,_("Run ended")))
self.log.put((Sender.MSG_RUNEND, str(datetime.now())))
self.log.put((Sender.MSG_RUNEND, str(CNC.vars["msg"])))
if self._onStop:
try:
os.system(self._onStop)
except:
pass
self._runLines = 0
self._quit = 0
self._msg = None
self._pause = False
self.running = False
CNC.vars["running"] = False
#----------------------------------------------------------------------
# Purge the buffer of the controller. Unfortunately we have to perform
# a reset to clear the buffer of the controller
#---------------------------------------------------------------------
def purgeController(self):
self.serial.write(b"!")
self.serial.flush()
time.sleep(1)
# remember and send all G commands
G = " ".join([x for x in CNC.vars["G"] if x[0]=="G"]) # remember $G
TLO = CNC.vars["TLO"]
self.softReset(False) # reset controller
if self.controller in (Utils.GRBL0, Utils.GRBL1):
time.sleep(1)
self.unlock(False)
self.runEnded()
self.stopProbe()
if G: self.sendGCode(G) # restore $G
self.sendGCode("G43.1Z%s"%(TLO)) # restore TLO
self.sendGCode("$G")
#----------------------------------------------------------------------
# Stop the current run
#----------------------------------------------------------------------
def stopRun(self, event=None):
self.feedHold()
self._stop = True
# if we are in the process of submitting do not do anything
if self._runLines != sys.maxint:
self.purgeController()
#----------------------------------------------------------------------
# thread performing I/O on serial line
#----------------------------------------------------------------------
def serialIO(self):
cline = [] # length of pipeline commands
sline = [] # pipeline commands
wait = False # wait for commands to complete (status change to Idle)
tosend = None # next string to send
status = False # waiting for status <...> report
tr = tg = time.time() # last time a ? or $G was send to grbl
while self.thread:
t = time.time()
# refresh machine position?
if t-tr > SERIAL_POLL:
self.serial.write(b"?")
status = True
tr = t
#If Override change, attach feed
if CNC.vars["_OvChanged"] and self.controller == Utils.GRBL1:
CNC.vars["_OvChanged"] = False # Temporary
# Check feed
diff = CNC.vars["_OvFeed"] - CNC.vars["OvFeed"]
if diff==0:
pass
elif CNC.vars["_OvFeed"] == 100:
self.serial.write(OV_FEED_100)
elif diff >= 10:
self.serial.write(OV_FEED_i10)
CNC.vars["_OvChanged"] = diff>10
elif diff <= -10:
self.serial.write(OV_FEED_d10)
CNC.vars["_OvChanged"] = diff<-10
elif diff >= 1:
self.serial.write(OV_FEED_i1)
CNC.vars["_OvChanged"] = diff>1
elif diff <= -1:
self.serial.write(OV_FEED_d1)
CNC.vars["_OvChanged"] = diff<-1
# Check rapid
target = CNC.vars["_OvRapid"]
current = CNC.vars["OvRapid"]
if target == current:
pass
elif target == 100:
self.serial.write(OV_RAPID_100)
elif target == 75:
self.serial.write(OV_RAPID_50) # FIXME
elif target == 50:
self.serial.write(OV_RAPID_50)
elif target == 25:
self.serial.write(OV_RAPID_25)
# Check Spindle
diff = CNC.vars["_OvSpindle"] - CNC.vars["OvSpindle"]
if diff==0:
pass
elif CNC.vars["_OvSpindle"] == 100:
self.serial.write(OV_SPINDLE_100)
elif diff >= 10:
self.serial.write(OV_SPINDLE_i10)
CNC.vars["_OvChanged"] = diff>10
elif diff <= -10:
self.serial.write(OV_SPINDLE_d10)
CNC.vars["_OvChanged"] = diff<-10
elif diff >= 1:
self.serial.write(OV_SPINDLE_i1)
CNC.vars["_OvChanged"] = diff>1
elif diff <= -1:
self.serial.write(OV_SPINDLE_d1)
CNC.vars["_OvChanged"] = diff<-1
# Fetch new command to send if...
if tosend is None and not wait and not self._pause and self.queue.qsize()>0:
try:
tosend = self.queue.get_nowait()
#print "+++",repr(tosend)
if isinstance(tosend, tuple):
#print "gcount tuple=",self._gcount
# wait to empty the grbl buffer and status is Idle
if tosend[0] == WAIT:
# Don't count WAIT until we are idle!
wait = True
#print "+++ WAIT ON"
#print "gcount=",self._gcount, self._runLines
elif tosend[0] == MSG:
# Count executed commands as well
self._gcount += 1
if tosend[1] is not None:
# show our message on machine status
self._msg = tosend[1]
elif tosend[0] == UPDATE:
# Count executed commands as well
self._gcount += 1
self._update = tosend[1]
else:
# Count executed commands as well
self._gcount += 1
tosend = None
elif not isinstance(tosend,str) and not isinstance(tosend,unicode):
try:
tosend = self.gcode.evaluate(tosend)
# if isinstance(tosend, list):
# cline.append(len(tosend[0]))
# sline.append(tosend[0])
if isinstance(tosend,str) or isinstance(tosend,unicode):
tosend += "\n"
else:
# Count executed commands as well
self._gcount += 1
#print "gcount str=",self._gcount
#print "+++ eval=",repr(tosend),type(tosend)
except:
for s in str(sys.exc_info()[1]).splitlines():
self.log.put((Sender.MSG_ERROR,s))
self._gcount += 1
tosend = None
except Empty:
break
if tosend is not None:
# All modification in tosend should be
# done before adding it to cline
if isinstance(tosend, unicode):
tosend = tosend.encode("ascii","replace")
# Keep track of last feed
pat = FEEDPAT.match(tosend)
if pat is not None:
self._lastFeed = pat.group(2)
if self.controller in (Utils.GRBL0, Utils.SMOOTHIE):
if CNC.vars["_OvChanged"]:
CNC.vars["_OvChanged"] = False
self._newFeed = float(self._lastFeed)*CNC.vars["_OvFeed"]/100.0
if pat is None and self._newFeed!=0 \
and not tosend.startswith("$"):
tosend = "f%g%s" % (self._newFeed, tosend)
# Apply override Feed
if CNC.vars["_OvFeed"] != 100 and self._newFeed != 0:
pat = FEEDPAT.match(tosend)
if pat is not None:
try:
tosend = "%sf%g%s\n" % \
(pat.group(1),
self._newFeed,
pat.group(3))
except:
pass
# Bookkeeping of the buffers
sline.append(tosend)
cline.append(len(tosend))
# Anything to receive?
if self.serial.inWaiting() or tosend is None:
try:
line = str(self.serial.readline()).strip()
except:
self.log.put((Sender.MSG_RECEIVE, str(sys.exc_info()[1])))
self.emptyQueue()
self.close()
return
#print "<R<",repr(line)
#print "*-* stack=",sline,"sum=",sum(cline),"wait=",wait,"pause=",self._pause
if not line:
pass
elif line[0]=="<":
if not status:
self.log.put((Sender.MSG_RECEIVE, line))
elif self.controller == Utils.GRBL1:
status = False
fields = line[1:-1].split("|")
#FIXME: not sure why this was here, but it was breaking stuff
#(eg.: pause button #773 and status display)
#if not self._alarm:
CNC.vars["state"] = fields[0]
for field in fields[1:]:
word = SPLITPAT.split(field)
if word[0] == "MPos":
try:
CNC.vars["mx"] = float(word[1])
CNC.vars["my"] = float(word[2])
CNC.vars["mz"] = float(word[3])
CNC.vars["wx"] = round(CNC.vars["mx"]-CNC.vars["wcox"], CNC.digits)
CNC.vars["wy"] = round(CNC.vars["my"]-CNC.vars["wcoy"], CNC.digits)
CNC.vars["wz"] = round(CNC.vars["mz"]-CNC.vars["wcoz"], CNC.digits)
self._posUpdate = True
except (ValueError,IndexError):
CNC.vars["state"] = "Garbage receive %s: %s"%(word[0],line)
self.log.put((Sender.MSG_RECEIVE, CNC.vars["state"]))
break
elif word[0] == "F":
try:
CNC.vars["curfeed"] = float(word[1])
except (ValueError,IndexError):
CNC.vars["state"] = "Garbage receive %s: %s"%(word[0],line)
self.log.put((Sender.MSG_RECEIVE, CNC.vars["state"]))
break
elif word[0] == "FS":
try:
CNC.vars["curfeed"] = float(word[1])
CNC.vars["curspindle"] = float(word[2])
except (ValueError,IndexError):
CNC.vars["state"] = "Garbage receive %s: %s"%(word[0],line)
self.log.put((Sender.MSG_RECEIVE, CNC.vars["state"]))
break
elif word[0] == "Bf":
try:
CNC.vars["planner"] = int(word[1])
CNC.vars["rxbytes"] = int(word[2])
except (ValueError,IndexError):
CNC.vars["state"] = "Garbage receive %s: %s"%(word[0],line)
self.log.put((Sender.MSG_RECEIVE, CNC.vars["state"]))
break
elif word[0] == "Ov":
try:
CNC.vars["OvFeed"] = int(word[1])
CNC.vars["OvRapid"] = int(word[2])
CNC.vars["OvSpindle"] = int(word[3])
except (ValueError,IndexError):
CNC.vars["state"] = "Garbage receive %s: %s"%(word[0],line)
self.log.put((Sender.MSG_RECEIVE, CNC.vars["state"]))
break
elif word[0] == "WCO":
try:
CNC.vars["wcox"] = float(word[1])
CNC.vars["wcoy"] = float(word[2])
CNC.vars["wcoz"] = float(word[3])
except (ValueError,IndexError):
CNC.vars["state"] = "Garbage receive %s: %s"%(word[0],line)
self.log.put((Sender.MSG_RECEIVE, CNC.vars["state"]))
break
# Machine is Idle buffer is empty stop waiting and go on
if wait and not cline and fields[0] in ("Idle","Check"):
wait = False
self._gcount += 1
elif self.controller == Utils.SMOOTHIE:
# <Idle|MPos:68.9980,-49.9240,40.0000,12.3456|WPos:68.9980,-49.9240,40.0000|F:12345.12|S:1.2>
ln= line[1:-1] # strip off < .. >
# split fields
l= ln.split('|')
# strip off status
CNC.vars["state"]= l[0]
# strip of rest into a dict of name: [values,...,]
d= { a: [float(y) for y in b.split(',')] for a, b in [x.split(':') for x in l[1:]] }
CNC.vars["mx"] = float(d['MPos'][0])
CNC.vars["my"] = float(d['MPos'][1])
CNC.vars["mz"] = float(d['MPos'][2])
CNC.vars["wx"] = float(d['WPos'][0])
CNC.vars["wy"] = float(d['WPos'][1])
CNC.vars["wz"] = float(d['WPos'][2])
CNC.vars["wcox"] = CNC.vars["mx"] - CNC.vars["wx"]
CNC.vars["wcoy"] = CNC.vars["my"] - CNC.vars["wy"]
CNC.vars["wcoz"] = CNC.vars["mz"] - CNC.vars["wz"]
if 'F' in d:
CNC.vars["curfeed"] = float(d['F'][0])
self._posUpdate = True
# Machine is Idle buffer is empty
# stop waiting and go on
if wait and not cline and l[0] in ("Idle","Check"):
wait = False
self._gcount += 1
else:
status = False
pat = STATUSPAT.match(line)
if pat:
if not self._alarm:
CNC.vars["state"] = pat.group(1)
CNC.vars["mx"] = float(pat.group(2))
CNC.vars["my"] = float(pat.group(3))
CNC.vars["mz"] = float(pat.group(4))
CNC.vars["wx"] = float(pat.group(5))
CNC.vars["wy"] = float(pat.group(6))
CNC.vars["wz"] = float(pat.group(7))
CNC.vars["wcox"] = CNC.vars["mx"] - CNC.vars["wx"]
CNC.vars["wcoy"] = CNC.vars["my"] - CNC.vars["wy"]
CNC.vars["wcoz"] = CNC.vars["mz"] - CNC.vars["wz"]
self._posUpdate = True
if pat.group(1)[:4] != "Hold" and self._msg:
self._msg = None
# Machine is Idle buffer is empty
# stop waiting and go on
#print "<<< WAIT=",wait,sline,pat.group(1),sum(cline)
#print ">>>", line
if wait and not cline and pat.group(1) in ("Idle","Check"):
#print ">>>",line
wait = False
#print "<<< NO MORE WAIT"
self._gcount += 1
else:
self.log.put((Sender.MSG_RECEIVE, line))
elif line[0]=="[":
self.log.put((Sender.MSG_RECEIVE, line))
if self.controller == Utils.GRBL1:
word = SPLITPAT.split(line[1:-1])
#print word
if word[0] == "PRB":
CNC.vars["prbx"] = float(word[1])
CNC.vars["prby"] = float(word[2])
CNC.vars["prbz"] = float(word[3])
#if self.running:
self.gcode.probe.add(
CNC.vars["prbx"]-CNC.vars["wcox"],
CNC.vars["prby"]-CNC.vars["wcoy"],
CNC.vars["prbz"]-CNC.vars["wcoz"])
self._probeUpdate = True
CNC.vars[word[0]] = word[1:]
elif word[0] == "GC":
CNC.vars["G"] = word[1].split()
CNC.updateG()
self._gUpdate = True
elif word[0] == "TLO":
CNC.vars[word[0]] = word[1]
self._probeUpdate = True
else:
CNC.vars[word[0]] = word[1:]
else:
pat = POSPAT.match(line)
if pat:
if pat.group(1) == "PRB":
CNC.vars["prbx"] = float(pat.group(2))
CNC.vars["prby"] = float(pat.group(3))
CNC.vars["prbz"] = float(pat.group(4))
#if self.running:
self.gcode.probe.add(
CNC.vars["prbx"]
+CNC.vars["wx"]
-CNC.vars["mx"],
CNC.vars["prby"]
+CNC.vars["wy"]
-CNC.vars["my"],
CNC.vars["prbz"]
+CNC.vars["wz"]
-CNC.vars["mz"])
self._probeUpdate = True
CNC.vars[pat.group(1)] = \
[float(pat.group(2)),
float(pat.group(3)),
float(pat.group(4))]
else:
pat = TLOPAT.match(line)
if pat:
CNC.vars[pat.group(1)] = pat.group(2)
self._probeUpdate = True
elif DOLLARPAT.match(line):
CNC.vars["G"] = line[1:-1].split()
CNC.updateG()
self._gUpdate = True
elif "error:" in line or "ALARM:" in line:
self.log.put((Sender.MSG_ERROR, line))
self._gcount += 1
#print "gcount ERROR=",self._gcount
if cline: del cline[0]
if sline: CNC.vars["errline"] = sline.pop(0)
if not self._alarm: self._posUpdate = True
self._alarm = True
CNC.vars["state"] = line
if self.running:
self._stop = True
elif line.find("ok")>=0:
self.log.put((Sender.MSG_OK, line))
self._gcount += 1
if cline: del cline[0]
if sline: del sline[0]
#print "SLINE:",sline
# if self._alarm and not self.running:
# # turn off alarm for connected status once
# # a valid gcode event occurs
# self._alarm = False
elif line[0] == "$":
self.log.put((Sender.MSG_RECEIVE, line))
pat = VARPAT.match(line)
if pat:
CNC.vars["grbl_%s"%(pat.group(1))] = pat.group(2)
elif line[:4]=="Grbl" or line[:13]=="CarbideMotion": # and self.running:
tg = time.time()
self.log.put((Sender.MSG_RECEIVE, line))
self._stop = True
del cline[:] # After reset clear the buffer counters
del sline[:]
CNC.vars["version"] = line.split()[1]
# Detect controller
if self.controller in (Utils.GRBL0, Utils.GRBL1):
self.controller = int(CNC.vars["version"][0])
else:
self.log.put((Sender.MSG_RECEIVE, line))
# Received external message to stop
if self._stop:
self.emptyQueue()
tosend = None
self.log.put((Sender.MSG_CLEAR, ""))
# WARNING if runLines==maxint then it means we are
# still preparing/sending lines from from bCNC.run(),
# so don't stop
if self._runLines != sys.maxint:
self._stop = False
#print "tosend='%s'"%(repr(tosend)),"stack=",sline,
# "sum=",sum(cline),"wait=",wait,"pause=",self._pause
if tosend is not None and sum(cline) < RX_BUFFER_SIZE:
self._sumcline = sum(cline)
# if isinstance(tosend, list):
# self.serial.write(str(tosend.pop(0)))
# if not tosend: tosend = None
#print ">S>",repr(tosend),"stack=",sline,"sum=",sum(cline)
if self.controller==Utils.SMOOTHIE: tosend = tosend.upper()
self.serial.write(bytes(tosend))
#self.serial.write(tosend.encode("utf8"))
#self.serial.flush()
self.log.put((Sender.MSG_BUFFER,tosend))
tosend = None
if not self.running and t-tg > G_POLL:
tosend = b"$G\n"
sline.append(tosend)
cline.append(len(tosend))
tg = t
|
gui_app.py
|
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
from scrapy.utils import project
from scrapy import spiderloader
from scrapy.utils.log import configure_logging
from scrapy.crawler import CrawlerRunner
from twisted.internet import reactor
import threading
def get_spiders():
settings = project.get_project_settings()
spider_loader = spiderloader.SpiderLoader.from_settings(settings)
return spider_loader.list()
def get_chosen_spider(value):
global chosen_spider
chosen_spider = value
return chosen_spider
def get_chosen_feed(value):
global chosen_feed
chosen_feed = value
return chosen_feed
def browse_btn():
global folder_path
folder_path = filedialog.askdirectory()
folder_path_entry.delete(0, END)
folder_path_entry.insert(0, folder_path)
return folder_path
def execute_spider():
if dataset_entry.get() == '' or chosen_feed not in ['CSV', 'JSON']:
messagebox.showerror('Error', 'All entries are required!')
return
try:
feed_uri = f"file:///{folder_path}/{dataset_entry.get()}.{chosen_feed}"
except:
messagebox.showerror('Error', 'All entries are required!')
settings = project.get_project_settings()
settings.set('FEED_URI', feed_uri)
settings.set('FEED_TYPE', chosen_feed)
configure_logging()
runner = CrawlerRunner(settings)
runner.crawl(chosen_spider)
reactor.run(installSignalHandlers=False)
def start_execute_thread(event):
global execute_thread
execute_thread = threading.Thread(target=execute_spider, daemon=True)
execute_thread.start()
app.after(10, check_execute_thread)
def check_execute_thread():
if execute_thread.is_alive():
app.after(10, check_execute_thread)
app = Tk()
# Spiders List
spider_label = Label(app, text="Choose a Spider")
spider_label.grid(row=0, column=0, sticky=W, pady=10, padx=10)
spider_text = StringVar(app)
spider_text.set('Choose a Spider')
spiders = [spider for spider in get_spiders()]
spiders_dropdown = OptionMenu(app, spider_text, *spiders, command=get_chosen_spider)
spiders_dropdown.grid(row=0, column=1, columnspan=2)
#Feed Type
feed_label = Label(app, text="Choose a feed")
feed_label.grid(row=1, column=0, sticky=W, pady=10, padx=10)
feed_text = StringVar(app)
feed_text.set('Choose a feed')
feeds = ['JSON', 'CSV']
feeds_dropdown = OptionMenu(app, feed_text, *feeds, command=get_chosen_feed)
feeds_dropdown.grid(row=1, column=1, columnspan=2)
#Path Entry
folder_path_text = StringVar(app)
folder_path_entry = Entry(app, textvariable=folder_path_text)
folder_path_entry.grid(row=2, column=0, pady=10, padx=10)
#Dataset Entry
dataset_text = StringVar(app)
dataset_entry = Entry(app, textvariable=dataset_text, width=10)
dataset_entry.grid(row=2, column=1, pady=10, padx=10)
browse_btn = Button(app, text='Browse', command=browse_btn)
browse_btn.grid(row=2, column=2)
execute_btn = Button(app, text="Execute", command=lambda: start_execute_thread(None))
execute_btn.grid(row=3, column=0, columnspan=3)
app.title("Spider Executer")
app.geometry('300x200')
app.resizable(False, False)
app.mainloop()
|
lab3b.py
|
import multiprocessing as mp
import numpy as np
import time
from functools import partial
class Timer:
def __init__(self):
self.__start = 0.0
def __enter__(self):
self.__start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
print(time.time() - self.__start)
# Task 5
def root(number: float, order: int) -> float:
return number ** (1.0 / (order + 2))
def sequential(x: float, results: np.array, iterations: int) -> None:
for i in range(iterations):
results[i] = root(x, i)
# Task 6
def multiprocess(x: float, results: np.array,
iterations: int, pool: mp.Pool) -> None:
with pool:
res = pool.starmap(root, ((x, i) for i in range(iterations)))
results[:] = res
# Task 7
def root_array(number: float, begin: int, size: int) -> np.array:
results = np.empty(size)
for i in range(size):
results[i] = root(number, begin + i)
return results
def multiprocess_array(x: float, results: np.array,
iterations: int, pool: mp.Pool) -> None:
no_workers = pool._processes
size = iterations // no_workers
with pool:
res = pool.starmap(root_array,
((x, i * size, size) for i in range(no_workers)))
results[:] = np.concatenate(res)
# Task 8
def task(job_id: int, lock: mp.Lock) -> None:
lock.acquire()
try:
print("Job id: %d" % job_id)
finally:
lock.release()
def main():
# Task 5, 6, 7
iterations = int(1e6)
np.random.seed(237)
x = np.random.uniform(1, 100)
results = np.empty(iterations)
pool = mp.Pool(mp.cpu_count())
pool2 = mp.Pool(mp.cpu_count())
algorithms = [partial(sequential, x, results, iterations),
partial(multiprocess, x, results, iterations, pool),
partial(multiprocess_array, x, results, iterations, pool2)]
for algorithm in algorithms:
print(algorithm.func.__name__)
with Timer():
algorithm()
"""
The best performance can be achieved using multiprocess_array function.
VM with limited amount of cores: sequential is faster than multiprocess.
Host: multiprocess is faster than sequential.
"""
# Task 8
lock = mp.Lock()
workers = [mp.Process(target=task, args=(job, lock)) for job in range(10)]
for worker in workers:
worker.start()
for worker in workers:
worker.join()
if __name__ == "__main__":
main()
|
ccbench.py
|
# -*- coding: utf-8 -*-
# This file should be kept compatible with both Python 2.6 and Python >= 3.0.
from __future__ import division
from __future__ import print_function
"""
ccbench, a Python concurrency benchmark.
"""
import time
import os
import sys
import functools
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
# Compatibility
try:
xrange
except NameError:
xrange = range
try:
map = itertools.imap
except AttributeError:
pass
THROUGHPUT_DURATION = 2.0
LATENCY_PING_INTERVAL = 0.1
LATENCY_DURATION = 2.0
BANDWIDTH_PACKET_SIZE = 1024
BANDWIDTH_DURATION = 2.0
def task_pidigits():
"""Pi calculation (Python)"""
_map = map
_count = itertools.count
_islice = itertools.islice
def calc_ndigits(n):
# From http://shootout.alioth.debian.org/
def gen_x():
return _map(lambda k: (k, 4*k + 2, 0, 2*k + 1), _count(1))
def compose(a, b):
aq, ar, as_, at = a
bq, br, bs, bt = b
return (aq * bq,
aq * br + ar * bt,
as_ * bq + at * bs,
as_ * br + at * bt)
def extract(z, j):
q, r, s, t = z
return (q*j + r) // (s*j + t)
def pi_digits():
z = (1, 0, 0, 1)
x = gen_x()
while 1:
y = extract(z, 3)
while y != extract(z, 4):
z = compose(z, next(x))
y = extract(z, 3)
z = compose((10, -10*y, 0, 1), z)
yield y
return list(_islice(pi_digits(), n))
return calc_ndigits, (50, )
def task_regex():
"""regular expression (C)"""
# XXX this task gives horrendous latency results.
import re
# Taken from the `inspect` module
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)', re.MULTILINE)
with open(__file__, "r") as f:
arg = f.read(2000)
def findall(s):
t = time.time()
try:
return pat.findall(s)
finally:
print(time.time() - t)
return pat.findall, (arg, )
def task_sort():
"""list sorting (C)"""
def list_sort(l):
l = l[::-1]
l.sort()
return list_sort, (list(range(1000)), )
def task_compress_zlib():
"""zlib compression (C)"""
import zlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 3
def compress(s):
zlib.decompress(zlib.compress(s, 5))
return compress, (arg, )
def task_compress_bz2():
"""bz2 compression (C)"""
import bz2
with open(__file__, "rb") as f:
arg = f.read(3000) * 2
def compress(s):
bz2.compress(s)
return compress, (arg, )
def task_hashing():
"""SHA1 hashing (C)"""
import hashlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 30
def compute(s):
hashlib.sha1(s).digest()
return compute, (arg, )
throughput_tasks = [task_pidigits, task_regex]
for mod in 'bz2', 'hashlib':
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
# For whatever reasons, zlib gives irregular results, so we prefer bz2 or
# hashlib if available.
# (NOTE: hashlib releases the GIL from 2.7 and 3.1 onwards)
if bz2 is not None:
throughput_tasks.append(task_compress_bz2)
elif hashlib is not None:
throughput_tasks.append(task_hashing)
else:
throughput_tasks.append(task_compress_zlib)
latency_tasks = throughput_tasks
bandwidth_tasks = [task_pidigits]
class TimedLoop:
def __init__(self, func, args):
self.func = func
self.args = args
def __call__(self, start_time, min_duration, end_event, do_yield=False):
step = 20
niters = 0
duration = 0.0
_time = time.time
_sleep = time.sleep
_func = self.func
_args = self.args
t1 = start_time
while True:
for i in range(step):
_func(*_args)
t2 = _time()
# If another thread terminated, the current measurement is invalid
# => return the previous one.
if end_event:
return niters, duration
niters += step
duration = t2 - start_time
if duration >= min_duration:
end_event.append(None)
return niters, duration
if t2 - t1 < 0.01:
# Minimize interference of measurement on overall runtime
step = step * 3 // 2
elif do_yield:
# OS scheduling of Python threads is sometimes so bad that we
# have to force thread switching ourselves, otherwise we get
# completely useless results.
_sleep(0.0001)
t1 = t2
def run_throughput_test(func, args, nthreads):
assert nthreads >= 1
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
end_event = []
if nthreads == 1:
# Pure single-threaded performance, without any switching or
# synchronization overhead.
start_time = time.time()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=False))
return results
started = False
ready_cond = threading.Condition()
start_cond = threading.Condition()
ready = []
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=True))
threads = []
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# We don't want measurements to include thread startup overhead,
# so we arrange for timing to start after all threads are ready.
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
with start_cond:
start_time = time.time()
started = True
start_cond.notify(nthreads)
for t in threads:
t.join()
return results
def run_throughput_tests(max_threads):
for task in throughput_tasks:
print(task.__doc__)
print()
func, args = task()
nthreads = 1
baseline_speed = None
while nthreads <= max_threads:
results = run_throughput_test(func, args, nthreads)
# Taking the max duration rather than average gives pessimistic
# results rather than optimistic.
speed = sum(r[0] for r in results) / max(r[1] for r in results)
print("threads=%d: %d" % (nthreads, speed), end="")
if baseline_speed is None:
print(" iterations/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
LAT_END = "END"
def _sendto(sock, s, addr):
sock.sendto(s.encode('ascii'), addr)
def _recv(sock, n):
return sock.recv(n).decode('ascii')
def latency_client(addr, nb_pings, interval):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_time = time.time
_sleep = time.sleep
def _ping():
_sendto(sock, "%r\n" % _time(), addr)
# The first ping signals the parent process that we are ready.
_ping()
# We give the parent a bit of time to notice.
_sleep(1.0)
for i in range(nb_pings):
_sleep(interval)
_ping()
_sendto(sock, LAT_END + "\n", addr)
def run_latency_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--latclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_latency_test(func, args, nthreads):
# Create a listening socket to receive the pings. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
interval = LATENCY_PING_INTERVAL
duration = LATENCY_DURATION
nb_pings = int(duration / interval)
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first ping(s) to arrive before
# unblocking the background threads.
chunks = []
process = run_latency_client(addr=sock.getsockname(),
nb_pings=nb_pings, interval=interval)
s = _recv(sock, 4096)
_time = time.time
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
while LAT_END not in s:
s = _recv(sock, 4096)
t = _time()
chunks.append((t, s))
# Tell the background threads to stop.
end_event.append(None)
for t in threads:
t.join()
process.wait()
for recv_time, chunk in chunks:
# NOTE: it is assumed that a line sent by a client wasn't received
# in two chunks because the lines are very small.
for line in chunk.splitlines():
line = line.strip()
if line and line != LAT_END:
send_time = eval(line)
assert isinstance(send_time, float)
results.append((send_time, recv_time))
return results
def run_latency_tests(max_threads):
for task in latency_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
while nthreads <= max_threads:
results = run_latency_test(func, args, nthreads)
n = len(results)
# We print out milliseconds
lats = [1000 * (t2 - t1) for (t1, t2) in results]
#print(list(map(int, lats)))
avg = sum(lats) / n
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
print()
#print(" [... from %d samples]" % n)
nthreads += 1
print()
BW_END = "END"
def bandwidth_client(addr, packet_size, duration):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
local_addr = sock.getsockname()
_time = time.time
_sleep = time.sleep
def _send_chunk(msg):
_sendto(sock, ("%r#%s\n" % (local_addr, msg)).rjust(packet_size), addr)
# We give the parent some time to be ready.
_sleep(1.0)
try:
start_time = _time()
end_time = start_time + duration * 2.0
i = 0
while _time() < end_time:
_send_chunk(str(i))
s = _recv(sock, packet_size)
assert len(s) == packet_size
i += 1
_send_chunk(BW_END)
finally:
sock.close()
def run_bandwidth_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--bwclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_bandwidth_test(func, args, nthreads):
# Create a listening socket to receive the packets. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
duration = BANDWIDTH_DURATION
packet_size = BANDWIDTH_PACKET_SIZE
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first packet to arrive before
# unblocking the background threads.
process = run_bandwidth_client(addr=addr,
packet_size=packet_size,
duration=duration)
_time = time.time
# This will also wait for the parent to be ready
s = _recv(sock, packet_size)
remote_addr = eval(s.partition('#')[0])
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
n = 0
first_time = None
while not end_event and BW_END not in s:
_sendto(sock, s, remote_addr)
s = _recv(sock, packet_size)
if first_time is None:
first_time = _time()
n += 1
end_time = _time()
end_event.append(None)
for t in threads:
t.join()
process.kill()
return (n - 1) / (end_time - first_time)
def run_bandwidth_tests(max_threads):
for task in bandwidth_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
baseline_speed = None
while nthreads <= max_threads:
results = run_bandwidth_test(func, args, nthreads)
speed = results
#speed = len(results) * 1.0 / results[-1][0]
print("CPU threads=%d: %.1f" % (nthreads, speed), end="")
if baseline_speed is None:
print(" packets/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
def main():
usage = "usage: %prog [-h|--help] [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--throughput",
action="store_true", dest="throughput", default=False,
help="run throughput tests")
parser.add_option("-l", "--latency",
action="store_true", dest="latency", default=False,
help="run latency tests")
parser.add_option("-b", "--bandwidth",
action="store_true", dest="bandwidth", default=False,
help="run I/O bandwidth tests")
parser.add_option("-i", "--interval",
action="store", type="int", dest="check_interval", default=None,
help="sys.setcheckinterval() value")
parser.add_option("-I", "--switch-interval",
action="store", type="float", dest="switch_interval", default=None,
help="sys.setswitchinterval() value")
parser.add_option("-n", "--num-threads",
action="store", type="int", dest="nthreads", default=4,
help="max number of threads in tests")
# Hidden option to run the pinging and bandwidth clients
parser.add_option("", "--latclient",
action="store", dest="latclient", default=None,
help=SUPPRESS_HELP)
parser.add_option("", "--bwclient",
action="store", dest="bwclient", default=None,
help=SUPPRESS_HELP)
options, args = parser.parse_args()
if args:
parser.error("unexpected arguments")
if options.latclient:
kwargs = eval(options.latclient)
latency_client(**kwargs)
return
if options.bwclient:
kwargs = eval(options.bwclient)
bandwidth_client(**kwargs)
return
if not options.throughput and not options.latency and not options.bandwidth:
options.throughput = options.latency = options.bandwidth = True
if options.check_interval:
sys.setcheckinterval(options.check_interval)
if options.switch_interval:
sys.setswitchinterval(options.switch_interval)
print("== %s %s (%s) ==" % (
platform.python_implementation(),
platform.python_version(),
platform.python_build()[0],
))
# Processor identification often has repeated spaces
cpu = ' '.join(platform.processor().split())
print("== %s %s on '%s' ==" % (
platform.machine(),
platform.system(),
cpu,
))
print()
if options.throughput:
print("--- Throughput ---")
print()
run_throughput_tests(options.nthreads)
if options.latency:
print("--- Latency ---")
print()
run_latency_tests(options.nthreads)
if options.bandwidth:
print("--- I/O bandwidth ---")
print()
run_bandwidth_tests(options.nthreads)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.